repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
sst-macro
|
sst-macro-master/python/default.py
|
from sst.macro import *
import sys
import os
import sst
ic = setupDeprecated()
| 81 | 9.25 | 23 |
py
|
sst-macro
|
sst-macro-master/bin/sstcompile.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
def addPreprocess(ctx, sourceFile, outputFile, args, cmds):
ppArgs = [ctx.compiler]
for entry in ctx.directIncludes:
ppArgs.append("-include")
ppArgs.append(entry)
ppArgs.extend(map(lambda x: "-D%s" % x, ctx.defines))
ppArgs.extend(map(lambda x: "-D%s" % x, args.D))
ppArgs.extend(map(lambda x: "-I%s" % x, args.I))
ppArgs.extend(ctx.cppFlags)
ppArgs.extend(ctx.compilerFlags)
ppArgs.append("-E")
ppArgs.append(sourceFile)
if args.O:
ppArgs.append("-O%s" % args.O)
cmds.append([outputFile,ppArgs,[]]) #pipe file, no extra temps
def addEmitLlvm(ctx, sourceFile, outputFile, args, cmds):
cmdArr = [
ctx.compiler,
"-emit-llvm",
"-S",
"--no-integrated-cpp",
sourceFile,
"-o",
outputFile
]
if args.O:
cmds.append("-O%s" % args.O)
cmds.append([None,cmdArr,[outputFile]])
def addLlvmOptPass(ctx, llFile, llvmPass, args, cmds):
from sstccvars import clangDir
from sstccvars import prefix
import os
optTool = os.path.join(clangDir, "bin", "opt")
llvmLib = os.path.join(prefix, "lib", "lib%s.so" % llvmPass)
cmdArr = [
optTool,
"-S",
"-load",
llvmLib,
llFile,
"-sst-%s" % llvmPass,
"-o",
llFile,
]
cmds.append([None,cmdArr,[]])
def addLlvmCompile(ctx, llFile, objFile, args, cmds):
cmdArr = [
ctx.compiler,
"-o",
objFile,
"-c",
llFile,
]
cmdArr.extend(ctx.compilerFlags)
cmds.append([None,cmdArr,[objFile]])
def addModeDefines(ctx, args):
if ctx.mode != ctx.COMPONENT:
ctx.defines.append("SSTMAC_EXTERNAL")
if ctx.sstCore:
ctx.defines.append("SSTMAC_EXTERNAL_SKELETON")
if ctx.srcToSrc() or ctx.mode == ctx.COMPONENT:
ctx.defines.append("SSTMAC_NO_REFACTOR_MAIN")
if not ctx.simulateMode():
ctx.defines.append("SSTMAC_NO_REPLACEMENTS")
def addSrc2SrcCompile(ctx, sourceFile, outputFile, args, cmds):
from sstccvars import prefix
from sstccvars import defaultIncludePaths, includeDir
from sstccutils import cleanFlag, swapSuffix, addPrefixAndRebase, addPrefix
from sstccvars import clangCppFlagsStr, clangLdFlagsStr
from sstccvars import clangLibtoolingCxxFlagsStr, clangLibtoolingCFlagsStr
from sstccvars import haveFloat128
from sstccvars import sstStdFlag
import os
#First we must pre-process the file to get it read for source-to-source
objBaseFolder, objName = os.path.split(outputFile)
ppTmpFile = addPrefixAndRebase("pp.", sourceFile, objBaseFolder)
if not ctx.src2srcDebug:
addPreprocess(ctx, sourceFile, ppTmpFile, args, cmds)
rawPaths = defaultIncludePaths.split(":")
cleanPaths = []
for path in rawPaths:
cleanPaths.append(os.path.abspath(path))
defaultIncludePaths = ":".join(cleanPaths)
defaultIncludePaths += ":" + cleanFlag(includeDir)
clangDeglobal = os.path.join(prefix, "bin", "sstmac_clang")
clangCmdArr = [clangDeglobal]
clangCmdArr.append(ppTmpFile)
clangCmdArr.extend(ctx.clangArgs)
clangCmdArr.append("--system-includes=%s" % defaultIncludePaths)
clangCmdArr.append("--")
#all of the compiler options go after the -- separator
#fix intrinsics which might not be known to clang if using a different compiler
intrinsicsFixerPath = os.path.join(cleanFlag(includeDir), "sstmac", "replacements", "fixIntrinsics.h")
clangCmdArr.append("-include")
clangCmdArr.append(intrinsicsFixerPath)
clangCmdArr.append("-stdlib=libc++")
if args.std:
clangCmdArr.append("-std=%s" % args.std)
elif ctx.typ == "c++": #make sure we have something for C++
clangCmdArr.append(sstStdFlag)
if not haveFloat128:
clangCmdArr.append("-D__float128=clangFloat128Fix")
if ctx.typ == "c++":
clangCmdArr.extend(clangLibtoolingCxxFlagsStr.split())
else:
clangCmdArr.extend(clangLibtoolingCFlagsStr.split())
srcRepl = addPrefixAndRebase("sst.pp.",sourceFile,objBaseFolder)
cxxInitSrcFile = addPrefixAndRebase("sstGlobals.pp.",sourceFile,objBaseFolder) + ".cpp"
if not ctx.src2srcDebug:
cmds.append([None,clangCmdArr,[ppTmpFile,srcRepl,cxxInitSrcFile]]) #None -> don't pipe output anywhere
tmpTarget = addPrefix("tmp.", outputFile)
llvmPasses = []
if args.skeletonize:
llvmPasses = args.skeletonize.split(",")
if llvmPasses:
llvmFile = swapSuffix("ll", tmpTarget)
addEmitLlvm(ctx, srcRepl, llvmFile, args, cmds)
for llvmPass in llvmPasses:
addLlvmOptPass(ctx, llvmFile, llvmPass, args, cmds)
addLlvmCompile(ctx, llvmFile, tmpTarget, args, cmds)
else:
cmdArr = [ctx.compiler]
cmdArr.extend(ctx.compilerFlags)
cmdArr.append("--no-integrated-cpp")
cmdArr.append("-o")
cmdArr.append(tmpTarget)
cmdArr.append("-c")
cmdArr.append(srcRepl)
if args.O:
cmdArr.append("-O%s" % args.O)
if args.g:
cmdArr.append("-g")
cmds.append([None,cmdArr,[tmpTarget]])
cxxInitObjFile = addPrefix("sstGlobals.", outputFile)
cxxInitCmdArr = [
ctx.cxx,
"-o",
cxxInitObjFile,
"-I%s/include" % prefix,
"-c",
cxxInitSrcFile
]
cxxInitCmdArr.extend(ctx.cxxFlags)
cxxInitCmdArr.extend(ctx.cppFlags)
if args.O:
cxxInitCmdArr.append("-O%s" % args.O)
if args.g:
cxxInitCmdArr.append("-g")
cmds.append([None,cxxInitCmdArr,[cxxInitObjFile]])
mergeCmdArr = [
"ld", "-r"
]
import platform
if not platform.system() == "Darwin":
mergeCmdArr.append("--unique")
mergeCmdArr.append(tmpTarget)
mergeCmdArr.append(cxxInitObjFile)
mergeCmdArr.append("-o")
mergeCmdArr.append(outputFile)
cmds.append([None,mergeCmdArr,[]])
def addComponentCompile(ctx, sourceFile, outputFile, args, cmds):
buildArgs = [ctx.compiler]
for entry in ctx.directIncludes:
buildArgs.append("-include")
buildArgs.append(entry)
buildArgs.extend(map(lambda x: "-D%s" % x, ctx.defines))
buildArgs.extend(map(lambda x: "-I%s" % x, args.I))
buildArgs.extend(ctx.cppFlags)
buildArgs.extend(ctx.compilerFlags)
buildArgs.append("-c")
buildArgs.append(sourceFile)
buildArgs.append("-o")
buildArgs.append(outputFile)
cmds.append([None,buildArgs,[]])
def addCompile(ctx, sourceFile, outputFile, args, cmds):
ppArgs = [ctx.compiler]
for entry in ctx.directIncludes:
ppArgs.append("-include")
ppArgs.append(entry)
ppArgs.extend(map(lambda x: "-D%s" % x, ctx.defines))
ppArgs.extend(map(lambda x: "-I%s" % x, args.I))
ppArgs.extend(ctx.cppFlags)
ppArgs.extend(ctx.compilerFlags)
ppArgs.append("-c")
ppArgs.append(sourceFile)
ppArgs.append("-o")
ppArgs.append(outputFile)
cmds.append([outputFile,ppArgs,[]]) #pipe file, no extra temps
| 8,647 | 32.78125 | 106 |
py
|
sst-macro
|
sst-macro-master/bin/configlib.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
import sys
def getstatusoutput3(cmd,stdin=None,pipe=None):
if not (sys.version_info < (3,0)):
try:
#Python 3 is an unmitigated disaster
#Thanks for breaking everything, Guido
from subprocess import check_output,STDOUT,Popen,PIPE
if stdin:
stdin = open(stdin)
elif pipe:
pipe = Popen(pipe, stdout=PIPE)
stdin = pipe.stdout
import io
result = check_output(cmd.split(),stdin=stdin,stderr=STDOUT).decode("utf-8").rstrip("\n")
if pipe:
pipe.wait
#Oh, and I love decoding byte strings manually
return 0,result
except:
return 1, ""
def getstatusoutput2(cmd,stdin=None,pipe=None):
if sys.version_info < (3,0):
import commands
if stdin:
cmd = cmd + " < %s" % stdin
elif pipe:
str_arr = []
for elem in pipe:
if " " in elem: str_arr.append("'%s'" % elem)
else: str_arr.append(elem)
cmd = " ".join(str_arr) + " | " + cmd
return commands.getstatusoutput(cmd)
getstatusoutput = None
if sys.version_info < (3,0):
getstatusoutput = getstatusoutput2
else:
getstatusoutput = getstatusoutput3
def getoutput(cmd):
rc, output = getstatusoutput(cmd)
return output
def read_utf8(path):
if sys.version_info < (3,0):
return open(path).read()
else:
return open(path, encoding="utf-8", errors="ignore").read()
| 3,463 | 35.463158 | 95 |
py
|
sst-macro
|
sst-macro-master/bin/sstlink.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
def addLink(ctx, ldTarget, args, cmds, objects, toExe=False):
#xargs includes the list of object files
from sstccvars import prefix
from sstccvars import soFlagsStr
ldpathMaker = "-Wl,-rpath,%s/lib" % prefix
linkCmdArr = [ctx.ld, ldpathMaker]
linkCmdArr.extend(ctx.ldFlags)
if not toExe:
linkCmdArr.extend(soFlagsStr.split())
linkCmdArr.extend(ctx.libs)
linkCmdArr.extend(ctx.compilerFlags)
linkCmdArr.extend(map(lambda x: "-L%s" % x, args.L)) #add all the -L flags for now
linkCmdArr.extend(map(lambda x: "-l%s" % x, args.l)) #add all the -l flags for now
linkCmdArr.extend(objects)
linkCmdArr.append("-o")
linkCmdArr.append(ldTarget)
cmds.append([None,linkCmdArr,[]])
def addModeLinks(ctx, args):
if not ctx.sstCore:
ctx.libs.append('-lsstmac')
ctx.libs.append('-lundumpi')
| 2,910 | 42.447761 | 84 |
py
|
sst-macro
|
sst-macro-master/bin/sstccutils.py
|
def swapSuffix(suffix, path):
splitter = path.split(".")[:-1]
splitter.append(suffix)
return ".".join(splitter)
def rebaseFolder(path, srcDir, dstDir):
folder, fname = os.path.split(path)
newBaseFolder = folder.replace(srcDir,dstDir)
return os.path.join(newBaseFolder, fname)
def addPrefix(prefix, path):
import os
splitPath = os.path.split(path)
if len(splitPath) == 2:
return os.path.join(splitPath[0], prefix + splitPath[1])
else:
return prefix + path
def addPrefixAndRebase(prefix, path, newBase):
import os
newPath = addPrefix(prefix, path)
folder, name = os.path.split(newPath)
return os.path.join(newBase, name)
def delete(files):
import traceback
import os
os.system("rm -f %s" % (" ".join(files)))
def getProcTreeHelper(mypid, arr):
mypid = str(mypid)
import configlib
info = configlib.getoutput("ps axo pid,ppid,comm")
for line in info.splitlines():
args = line.strip().split()
if args[0] == mypid:
arr.append(" ".join(args[2:]))
getProcTreeHelper(args[1], arr)
break
def getProcTree():
import os
arr = []
getProcTreeHelper(os.getpid(), arr)
return arr
def getProcName():
import os
import configlib
import sys
pid = int(os.getppid())
runCmds = configlib.getoutput("ps -p %d" % pid).splitlines()[-1].split()
runCmds = runCmds[3:]
firstCmd = runCmds[0].lstrip("-")
if firstCmd in ("/bin/sh", "sh", "bash", "/bin/bash", "tcsh", "/bin/tcsh", "zsh", "/bin/zsh"):
if len(runCmds) > 1: #it might just be bash
firstCmd = runCmds[1]
cmd = os.path.split(firstCmd)[-1]
return cmd
def cleanFlag(flag):
from sstccvars import includeDir, execPrefix, prefix
return flag.replace("${includedir}", includeDir).replace("${exec_prefix}", execPrefix).replace("${prefix}",prefix).strip()
| 1,809 | 26.014925 | 124 |
py
|
sst-macro
|
sst-macro-master/bin/sstcclib.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
helpText = """The following environmental variables can be defined for the SST compiler
SSTMAC_VERBOSE=0 or 1: produce verbose output from the SST compiler (default 0)
SSTMAC_DELETE_TEMPS=0 or 1: remove all temp source-to-source files (default 1)
SSTMAC_DELETE_TEMP_OFILES=0 or 1: remove all temporary object files (default 1)
SSTMAC_CONFIG=0: running automake, cmake - skip certain steps to fool build system
"""
def createBashWrapper(compiler, exeName, ldTarget, sstCore, sstmacExe):
import configlib
#there is one scenario in which autoconf actually WANTS
#this to fail... check for it now
output = configlib.getoutput("nm %s | grep some_bogus_nonexistent_symbol" % ldTarget)
if output:
#crash and burn
return 1
import os
cmd = ""
if sstCore:
sys.exit("Do not yet support standalone exe for SST core")
else:
exeLoad = ldTarget
if not os.path.isabs(exeLoad):
exeLoad = os.path.join(os.getcwd(), ldTarget)
cmd = "%s -a -n 1 --exe=%s --use-app-rc --app-argv='%%s'" % (sstmacExe, exeLoad)
str_arr = ["#! /usr/bin/env python",
"# -*- coding: utf-8 -*-",
"import os",
"import sys",
"import subprocess as sp",
'argv = " ".join(sys.argv[1:])',
"cmd = ['%s', '-a', '-n', '1', '--exe=%s', '--use-app-rc', '--app-argv=%%s' %% argv]" % (sstmacExe, exeLoad),
"child = sp.Popen(cmd)",
"streamdata = child.communicate()[0]",
"rc = child.returncode",
"sys.exit(rc)",
"",
'"""',
configlib.read_utf8(ldTarget),
'"""',
]
open(exeName,"w",encoding='utf8').write("\n".join(str_arr))
os.system("chmod +x %s" % exeName)
return 0
def runCmdArr(cmdArr,verbose):
import sys,os
if cmdArr:
cmd = " ".join(cmdArr)
if verbose: sys.stderr.write("%s\n" % cmd)
return os.system(cmd)
else:
return 0
class TempFiles:
def __init__(self, doDeleteSources, doDeleteObjects, verbose, clangBin):
self.doDeleteSources = doDeleteSources
self.doDeleteObjects = doDeleteObjects
self.verbose = verbose
self.files = []
self.clangBin = clangBin
def append(self, f):
self.files.append(f)
def __del__(self):
self.cleanUp()
def cleanUp(self):
import os
import sys
import traceback
objects = [f for f in self.files if f.endswith('.o')]
sources = [f for f in self.files if not f.endswith('.o')]
if self.doDeleteSources and sources:
cmdall = "rm -f %s" % " ".join(sources)
if self.verbose:
sys.stderr.write("%s\n" % cmdall)
os.system(cmdall)
self.files = []
if self.doDeleteObjects and objects:
cmdobjects = "rm -f %s" % " ".join(objects)
if self.verbose:
sys.stderr.write("%s\n" % cmdobjects)
os.system(cmdobjects)
if not self.doDeleteSources: # attempt to format the files with clangformat
# taken from https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/12611523
clang_format_prog = "clang-format"
clang_format = os.path.join(self.clangBin + "bin/", clang_format_prog)
has_clang_format = os.path.isfile(clang_format) and os.access(clang_format, os.X_OK)
if not has_clang_format: # Look for one in the path
for path in os.environ["PATH"].split(os.pathsep):
exe = os.path.join(path, clang_format_prog)
if os.path.isfile(exe) and os.access(exe, os.X_OK):
has_clang_format = True
clang_format = exe
break
if self.verbose:
if has_clang_format:
sys.stderr.write("Attempting to format temp files with %s\n" % clang_format)
else:
sys.stderr.write("Could not find %s\n" % clang_format)
if has_clang_format:
for f in sources:
cmd = clang_format + " -i -style=llvm " + f
if self.verbose:
sys.stderr.write(cmd + "\n")
os.system(cmd)
def runAllCmds(cmds, verbose, doDeleteSources, doDeleteObjects, clangBin):
tmpFiles = TempFiles(doDeleteSources, doDeleteObjects, verbose, clangBin)
from subprocess import check_output,STDOUT,Popen,PIPE
import sys
for outfile, cmdArr, tempFiles in cmds:
if verbose:
sys.stderr.write("===============================\n")
sys.stderr.write(" ".join(cmdArr))
sys.stderr.write("\n")
stdout=None
if outfile:
stdout = open(outfile,"w")
child = Popen([x.strip() for x in cmdArr],stdout=stdout)
result = child.communicate()
if outfile:
stdout.close()
rc = child.returncode
if not rc == 0:
return rc
for tmp in tempFiles:
tmpFiles.append(tmp)
return 0
class Context:
SKELETONIZE = 0
MEMOIZE = 1
COMPONENT = 2
NONE = 3
def __init__(self):
self.cxxFlags = []
self.cFlags = []
self.cppFlags = []
self.ldFlags = []
self.libs = []
self.typ = ""
self.defines = []
self.directIncludes = []
self.ld = None
self.cc = None
self.cxx = None
self.compiler = None
self.clangArgs = []
self.mode = self.NONE
self.replacementIncludes = []
self.sstCore = False
self.hasClang = False
self.src2srcDebug = False
def simulateMode(self):
return self.mode == self.SKELETONIZE
def srcToSrc(self):
return self.hasClang and self.simulateMode()
def modeString(self):
if self.mode == self.SKELETONIZE: return "SKELETONIZE"
if self.mode == self.MEMOIZE: return "MEMOIZE"
if self.mode == self.COMPONENT: return "COMPONENT"
if self.mode == self.NONE: return "NONE"
def setMode(self, mode):
if self.mode != self.NONE:
sys.exit("Mode already set to %s - undefined behavior to also use --sst_component" % self.modeString())
self.mode = mode
def setDefaultMode(self, mode):
if self.mode == self.NONE:
self.mode = mode
def run(typ, extraLibs=""):
import os
import sys
import platform
from configlib import getstatusoutput
from sstccvars import sstLdFlags, sstCppFlags
from sstccvars import prefix, execPrefix, includeDir, cc, cxx, spackcc, spackcxx
from sstccvars import sstCxxFlagsStr, sstCFlagsStr
from sstccvars import includeDir
from sstccvars import sstCore
from sstccvars import soFlagsStr
from sstccvars import clangBin
from sstccvars import clangCppFlagsStr, clangLdFlagsStr
from sstccutils import cleanFlag, getProcTree, swapSuffix
# Probably better to just always compile PIC
#needfPIC = "fPIC" in sstCxxFlagsStr
needfPIC = True
sstmacExe = cleanFlag(os.path.join(prefix, "bin", "sstmac"))
verbose = False #whether to print verbose output
if "SSTMAC_VERBOSE" in os.environ:
flag = int(os.environ["SSTMAC_VERBOSE"])
verbose = verbose or flag
#whether to make a shared object for loading
#or a bash script that emulates an executable
makeBashExe = False
if "SSTMAC_CONFIG" in os.environ:
flag = int(os.environ["SSTMAC_CONFIG"])
makeBashExe = flag
procTree = getProcTree()[1:] #throw out python command
parentProc = procTree[0]
#if parentProc.endswith("configure"):
# makeBashExe = True
# the parent proc here is just launchd - configure vanishes
if parentProc.endswith("cmake"):
numCmakes = 0
for exe in procTree:
if exe.endswith("cmake"):
numCmakes += 1
if numCmakes > 1:
makeBashExe = True
import argparse
parser = argparse.ArgumentParser(description='Process flags for the SST/macro compiler wrapper')
parser.add_argument('-o', '--output', type=str,
help="the linker/compilation target")
parser.add_argument('--keep-exe', default=False, action="store_true",
help="whether to create an executable script or build a loadable shared object")
parser.add_argument('--skeletonize', type=str,
help="whether to activate skeletonization mode, stripping compute and mem allocation. Can take a list of LLVM passes as argument.")
parser.add_argument('--memoize', type=str,
help="whether to activate memoization mode that instruments and records execution. Can take a list of LLVM passes as argument.")
parser.add_argument('-I', action="append", type=str, help="an include path", default=[])
parser.add_argument('-D', action="append", type=str, help="a defines", default=[])
parser.add_argument('-W', action="append", type=str, help="activate a particular warning", default=[])
parser.add_argument('-L', action="append", type=str, help="a library path", default=[])
parser.add_argument('-l', action="append", type=str, help="a library to link against", default=[])
parser.add_argument('-O', type=str)
parser.add_argument('-g', action="store_true", default=False)
parser.add_argument('-c', '--compile', default=False, action="store_true")
parser.add_argument('-E', '--preprocess', default=False, action="store_true")
parser.add_argument('-V', '--version', default=False, action="store_true", help="Print SST and compiler version info")
parser.add_argument('--disable-mpi', default=False, action="store_true", help="Do not include virtual MPI environment")
parser.add_argument('--flags', default=False, action="store_true", help="Print the extra flags SST automatically adds")
parser.add_argument('--prefix', default=False, action="store_true", help="Print the SST installation prefix")
parser.add_argument('--sst-component', default=False, action="store_true",
help="Whether we are building an SST component and should skip all source-to-source")
parser.add_argument('-fPIC', default=False, action="store_true", help="compile for position independent code")
parser.add_argument('-std', type=str, help="specify the standard level for C or C++")
parser.add_argument('--no-integrated-cpp', default=False, action="store_true", help="whether to skip preprocessing")
parser.add_argument("-fvisibility", type=str, help="control the visibility of certain symbols")
parser.add_argument("--host-cxx", type=str, help="override the C++ compiler used underneath from the one used to build SST/macro")
parser.add_argument("--host-cc", type=str, help="override the C compiler used underneath from the one used to build SST/macro")
args, extraArgs = parser.parse_known_args()
ctx = Context()
for entry in sstCppFlags:
clean = cleanFlag(entry)
if clean: #don't add empty flags
ctx.cppFlags.append(clean)
ctx.sstCore = sstCore
ctx.cc = spackcc if spackcc else cc
ctx.cxx = spackcxx if spackcxx else cxx
ctx.typ = typ
ctx.sstCore = sstCore
ctx.hasClang = bool(clangCppFlagsStr)
#it is possible to override the host compilers use to do preprocessing/compilation
if args.host_cxx:
ctx.cxx = args.host_cxx
elif "SSTMAC_CXX" in os.environ:
ctx.cxx = os.environ["SSTMAC_CXX"]
if args.host_cc:
ctx.cc = args.host_cc
elif "SSTMAC_CC" in os.environ:
ctx.cc = os.environ["SSTMAC_CC"]
if args.no_integrated_cpp:
sys.exit("SST compiler wrapper cannot handle --no-integrated-cpp flag")
#keep visibility arg, but not if it causes hidden symbols
if args.fvisibility and args.fvisibility != "hidden":
ctx.compilerFlags.append("-fvisibility=%s" % args.fvisibility)
delTempObjectFiles = True #whether to delete all temporary object files created
delTempSourceFiles = True #whether to delete all temporary source files created
if "SSTMAC_DELETE_TEMPS" in os.environ:
flag = int(os.environ["SSTMAC_DELETE_TEMPS"])
delTempSourceFiles = bool(flag)
delTempObjectFiles = bool(flag)
if "SSTMAC_DELETE_TEMP_OBJECTS" in os.environ:
flag = int(os.environ["SSTMAC_DELETE_TEMP_OBJECTS"])
delTempObjectFiles = bool(flag)
elif args.g: #debug build, don't delete temps
delTempObjectFiles = False
if "SSTMAC_DELETE_TEMP_SOURCES" in os.environ:
flag = int(os.environ["SSTMAC_DELETE_TEMP_SOURCES"])
delTempSourceFiles = bool(flag)
skeletonizing = False
if "SSTMAC_SKELETONIZE" in os.environ:
val = int(os.environ["SSTMAC_SKELETONIZE"])
skeletonizing = bool(val)
if skeletonizing or (not args.skeletonize is None):
ctx.clangArgs.append("--skeletonize")
ctx.setMode(ctx.SKELETONIZE)
memoizing = False
if "SSTMAC_MEMOIZE" in os.environ:
val = int(os.environ["SSTMAC_MEMOIZE"])
memoizing = bool(val)
if memoizing or (not args.memoize is None):
ctx.clangArgs.append("--memoize")
ctx.setMode(ctx.MEMOIZE)
if "SSTMAC_DEBUG_SRC2SRC" in os.environ:
ctx.src2srcDebug = int(os.environ["SSTMAC_DEBUG_SRC2SRC"])
if args.sst_component:
ctx.setMode(ctx.COMPONENT)
#unless told otherwise, I am skeletonizinng
ctx.setDefaultMode(ctx.SKELETONIZE)
from sstcompile import addModeDefines
addModeDefines(ctx, args)
from sstlink import addModeLinks
addModeLinks(ctx, args)
#this is probably cmake being a jack-donkey during configure, overwrite it
if args.std == "c++98": args.std = "c++1y"
#if we are in simulate mode, we have to create the "replacement" environment
#we do this by rerouting all the headers to SST/macro headers
if ctx.simulateMode():
include_root = cleanFlag(includeDir)
repldir = os.path.join(include_root, "sstmac", "replacements")
repldir = cleanFlag(repldir)
args.I.append(os.path.join(include_root, "sumi"))
args.I.insert(0,repldir)
#also force inclusion of wrappers
if typ == "c++":
ctx.directIncludes.append("cstdint")
else:
ctx.directIncludes.append("stdint.h")
ctx.directIncludes.append("sstmac/compute.h")
ctx.directIncludes.append("sstmac/skeleton.h")
if not args.disable_mpi:
args.I.insert(0,os.path.join(repldir, "mpi"))
sysargs = sys.argv[1:]
asmFiles = False
sourceFiles = []
givenObjects = []
unparsedArgs = []
for arg in extraArgs:
sarg = arg.strip().strip("'")
#okay, well, the flags might have literal quotes in them
#which get lost passing into here - restore all " to literal quotes
sarg = sarg.replace("\"",r'\"')
sarg = sarg.replace(" ", r'\ ')
if sarg.endswith('.o'):
givenObjects.append(sarg)
elif sarg.endswith('.cpp') or sarg.endswith('.cc') or sarg.endswith('.c') \
or sarg.endswith(".cxx") or sarg.endswith(".C"):
sourceFiles.append(sarg)
elif sarg.endswith('.S') or sarg.endswith(".s"):
asmFiles = True
elif sarg.startswith("-g"):
unparsedArgs.append("-g") #gstab,g3,etc can appear but mess things up - make them regular -g
else:
unparsedArgs.append(sarg)
ctx.cFlags.extend(unparsedArgs) #add anything I didn't recognize here for now
ctx.cxxFlags.extend(unparsedArgs) #add anything I didn't recognize here for now
# Substitute compiler path when built using Spack
if args.fPIC or needfPIC:
#assume fPIC was given
ctx.cxxFlags.append("-fPIC")
ctx.cFlags.append("-fPIC")
#for now, just assume any time an exe name "conftest"
#is being built, it comes from configure
if args.output == "conftest":
makeBashExe = True
for entry in sstLdFlags:
flag = cleanFlag(entry)
if flag:
ctx.ldFlags.append(flag)
sstparser = argparse.ArgumentParser(description='Process flags for the SST/macro compiler wrapper')
sstparser.add_argument('-std', type=str, help="specify the standard level for C or C++")
sstparser.add_argument('-O', type=str, help="the optimization level for SST/macro - this gets consumed and not forwarded")
sstparser.add_argument('-g', action="store_true", default=False,
help="the debug level for SST/macro = this gets consumed and not forwarded")
sstCxxFlags = cleanFlag(sstCxxFlagsStr).split()
sstCxxArgs, passedThroughSstCxxFlags = sstparser.parse_known_args(sstCxxFlags)
sstCFlags = cleanFlag(sstCFlagsStr).split()
sstCArgs, passedThroughSstCFlags = sstparser.parse_known_args(sstCFlags)
ctx.cxxFlags.extend(passedThroughSstCxxFlags)
ctx.cFlags.extend(passedThroughSstCFlags)
compiler = ""
sstCompilerFlags = []
if typ.lower() == "c++":
ctx.compiler = ctx.cxx
ctx.ld = ctx.cxx
ctx.compilerFlags = ctx.cxxFlags
if args.std:
# let's turn off this warning for now
# if sstCxxArgs.std and args.std != sstCxxArgs.std:
# sys.stderr.write("WARNING: SST compiled with %s, but app compiled with %s - choosing app's version\n" % (sstCxxArgs.std, args.std))
ctx.cxxFlags.append("-std=%s" % args.std)
elif sstCxxArgs.std:
ctx.cxxFlags.append("-std=%s" % sstCxxArgs.std)
else:
pass
ctx.compilerFlags = ctx.cxxFlags[:]
elif typ.lower() == "c":
ctx.compiler = ctx.cc
if ctx.hasClang:
#always use c++ for linking since we are bringing a bunch of sstmac C++ into the game
ctx.ld = ctx.cxx
else:
# this mode doesn't work any more (skeletonization uses code that is invalid with C compiler)
#ctx.ld = ctx.cc
sys.stderr.write("ERROR: Compiling C requires Clang autoskeletonizer\n")
sys.exit(1)
if args.std:
ctx.cFlags.append("-std=%s" % args.std)
elif sstCArgs.std:
ctx.cFlags.append("-std=%s" % sstCArgs.std)
ctx.compilerFlags = ctx.cFlags[:]
if sstCxxArgs.std: #we will still do some C++, make sure we get the right -std flag
ctx.cxxFlags.append("-std=%s" % sstCxxArgs.std)
if args.version:
import inspect, os
pathStr = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
print(pathStr)
cmd = "%s --version" % (ctx.cxx)
os.system(cmd)
sys.exit()
elif args.flags:
sys.stderr.write("LDFLAGS=%s\n" % " ".join(ctx.ldFlags))
sys.stderr.write("CPPFLAGS=%s\n" % " ".join(ctx.cppFlags))
if typ == "c++":
sys.stderr.write("CXXFLAGS=%s\n" % " ".join(ctx.compilerFlags))
else:
sys.stderr.write("CFLAGS=%s\n" % " ".join(ctx.compilerFlags))
sys.exit()
elif args.prefix:
sys.stdout.write("%s\n" % prefix)
sys.exit()
if asmFiles:
#just execute the command as-is with no frills
cmdArr = [
ctx.cc
]
cmdArr.extend(sys.argv[1:])
cmds = [ [None,cmdArr,[]] ]
return runAllCmds(cmds, verbose, delTempSourceFiles, delTempObjectFiles, clangBin)
#this might be an actual library, not an exe wrapper
ldTarget = args.output
if not ldTarget:
ldTarget = "a.out"
if ldTarget.startswith("lib"):
if ".so" in ldTarget or ".dylib" in ldTarget:
makeBashExe = False
exeName = ldTarget #maybe needed later
if makeBashExe:
ldTarget += "_exe"
runLinker = not args.preprocess and not args.compile
from sstcompile import addSrc2SrcCompile, addComponentCompile
from sstcompile import addCompile, addPreprocess
from sstlink import addLink
#the format of the entry in the cmd arr is as follows
# [outfile, [cmds,...], [tmpFiles,...]]
# the outfile can be None or a string saying where stdout should be piped
# if None, stdout is printed to screen
# the cmd arr is directly passed the Popen command to run a subprocess
# the tmp files is a (possibly empty) list of files that get generated
# but should be cleaned up after all commands finish
cmds = []
if args.preprocess:
for srcFile in sourceFiles:
addPreprocess(ctx, srcFile, None, args, cmds)
rc = runAllCmds(cmds, verbose, delTempSourceFiles, delTempObjectFiles, clangBin)
return rc
generatedObjects = []
#this is more complicated - we have to use clang to do a source to source transformation
#then we need to run the compiler on that modified source
for srcFile in sourceFiles:
target = args.output
if not target or len(givenObjects) > 1 or not args.compile:
srcName = os.path.split(srcFile)[-1]
target = swapSuffix("o", srcName)
generatedObjects.append(target)
if ctx.srcToSrc():
addSrc2SrcCompile(ctx, srcFile, target, args, cmds)
elif ctx.mode == ctx.COMPONENT:
addComponentCompile(ctx, srcFile, target, args, cmds)
else:
addCompile(ctx, srcFile, target, args, cmds)
allObjects = generatedObjects[:]
allObjects.extend(givenObjects)
if runLinker:
shouldMakeExe = memoizing
addLink(ctx, ldTarget, args, cmds, allObjects, shouldMakeExe)
if makeBashExe:
objects = allObjects[:]
objects.append("-lsstmac_main")
addLink(ctx, ldTarget + "_validate", args, cmds, objects, toExe=True)
rc = runAllCmds(cmds, verbose, delTempSourceFiles, delTempObjectFiles, clangBin)
if not rc == 0: return rc
if makeBashExe:
rc = createBashWrapper(compiler, exeName, ldTarget, sstCore, sstmacExe)
if not rc == 0: return rc
return 0
| 22,732 | 36.637417 | 151 |
py
|
sst-macro
|
sst-macro-master/bin/tools/configlib.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
import sys
def getoutput3(cmd,stdin=None,pipe=None):
if not (sys.version_info < (3,0)):
#Python 3 is an unmitigated disaster
#Thanks for breaking everything, Guido
from subprocess import check_output,STDOUT,Popen,PIPE
if stdin:
stdin = open(stdin)
elif pipe:
pipe = Popen(pipe, stdout=PIPE)
stdin = pipe.stdout
import io
result = check_output(cmd.split(),stdin=stdin,stderr=STDOUT).decode("utf-8").rstrip("\n")
if pipe:
pipe.wait()
#Oh, and I love decoding byte strings manually
return result
def getoutput2(cmd,stdin=None,pipe=None):
if sys.version_info < (3,0):
import commands
if stdin:
cmd = cmd + " < %s" % stdin
elif pipe:
str_arr = []
for elem in pipe:
if " " in elem: str_arr.append("'%s'" % elem)
else: str_arr.append(elem)
cmd = " ".join(str_arr) + " | " + cmd
return commands.getoutput(cmd)
getoutput = None
if sys.version_info < (3,0):
getoutput = getoutput2
else:
getoutput = getoutput3
| 3,124 | 36.650602 | 93 |
py
|
sst-macro
|
sst-macro-master/bin/tools/opa_snapshot_route_tracer.py
|
#!/usr/bin/env python3
__license__ = """
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
import argparse, json
def parse_args():
parser = argparse.ArgumentParser(description='Traces network routes between nodes using json output from opareport_snapshot_parser.py')
parser.add_argument('-l', '--link_delim', default='\n', help='Delimiter to use between fabric hops (including intraswitch)')
parser.add_argument('-p', '--port_delim', default=':', help='Delimiter to use between port and hostname')
parser.add_argument('FILE', help='JSON output from opareport_snapshot_parser.py')
parser.add_argument('START', help='Beginning of the route to trace')
parser.add_argument('END', help='End of the route to trace')
return parser.parse_args()
def gen_global_tables(f_name):
with open(f_name) as f:
parsed = json.load(f)
global routing_table
global name_guid_map
global guid_name_map
global guid_lid_map
global lid_guid_map
global link_table
routing_table = parsed['routing_table']
name_guid_map = parsed['name_guid_map']
guid_name_map = parsed['guid_name_map']
guid_lid_map = parsed['guid_lid_map']
lid_guid_map = parsed['lid_guid_map']
link_table = parsed['link_table']
def get_link(guid, port):
return link_table[str(guid)][str(port)] if str(guid) in link_table else False
def trace_route(start_lid, dest_lid):
dest_guid = lid_guid_map[str(dest_lid)]
cur_guid = lid_guid_map[str(start_lid)]
route_list = []
while cur_guid != dest_guid:
port = routing_table[str(cur_guid)][str(dest_lid)] if str(cur_guid) in routing_table else 1
route_list.append((cur_guid, port))
cur_guid, port = get_link(cur_guid, port)
route_list.append((cur_guid, port))
return route_list
def trace_route_from_name(start_name, dest_name):
return trace_route(
guid_lid_map[str(name_guid_map[start_name])],
guid_lid_map[str(name_guid_map[dest_name])])
def print_route(start_name, dest_name, port_delim=':', link_delim='\n', padding=0):
route = trace_route_from_name(start_name, dest_name)
hop_list = []
for hop in route:
guid, port = hop
hop_list.append(('%s%s%i' % (guid_name_map[str(guid)], port_delim, port)))
print(link_delim.join(hop_list))
if __name__ == '__main__':
args = parse_args()
gen_global_tables(args.FILE)
print_route(args.START,
args.END,
link_delim=args.link_delim,
port_delim=args.port_delim)
| 4,636 | 37.966387 | 139 |
py
|
sst-macro
|
sst-macro-master/bin/tools/ref.py
|
#! /usr/bin/env python
__license__ = """
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
import os
files = """
"""
def check_files(fxn):
configStatus = ""
try:
configStatus = open("../config.status").read()
except:
sys.exit("could not find valid config.status file")
import re
match = re.compile("srcdir=(.*)").search(configStatus)
if not match:
sys.exit("could not located srcdir in config.status")
srcdir = match.groups()[0].strip().strip('"').strip("'")
refdir = os.path.join(srcdir, "tests", "reference")
for f in files.strip().splitlines():
path = f.strip().split()[0]
fname = os.path.split(path)[-1].replace("chk","tmp")
ref = os.path.join(refdir, fname.replace("tmp","ref"))
fxn(fname, ref)
| 2,790 | 36.716216 | 81 |
py
|
sst-macro
|
sst-macro-master/bin/tools/opareport_snapshot_parser.py
|
#!/usr/bin/env python3
__license__ = """
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
'''
Loads a snapshot and parses parameters out of the fabric.
Snapshots should be generated with the following
```bash
opareport -o snapshot --route > snapshot-$(date -Iminutes).xml
```
Terminology
- LID (Local ID)
- Represented as hex, but converted into integers in this script
- Nodes will have one LID, and switches will have one per port
- GUID (Global Unique ID)
- Represented as hex, but converted into integers in this script
- Name
- A string identifier, similar to a hostname.
- Switch names tend to be "{hostname}{GUID}"
- Node names tend to be "{hostname} {Interface}" (note the space)
- In every example I've seen, Names have a unique mapping to GUIDs
- Link
- Snapshot enumerates connections between fabric nodes
- Each consist of two (GUID, Port) pairs
'''
import argparse, sys, collections
import xml.etree.ElementTree as ET
from collections import defaultdict
def parse_args():
parser = argparse.ArgumentParser(description='Parses reports generated by "opareport -o snapshot --route" into a JSON or YAML dictionary. Each enabled flag will populate out a key')
parser.add_argument('-r', '--routing_table', action='store_true', help='Switch routing tables: GUID -> (lid -> port)')
parser.add_argument('--named_routing_table', action='store_true', help='Switch routing tables with Names: Name -> (Name -> port)')
parser.add_argument('-l', '--link_table', action='store_true', help='Fabric link table: GUID -> Port -> (GUID, Port)')
parser.add_argument('-n', '--name_guid_map', action='store_true', help='Name to GUID map: Name -> GUID')
parser.add_argument('-g', '--guid_name_map', action='store_true', help='GUID to Name map: GUID -> Name')
parser.add_argument('-N', '--guid_lid_map', action='store_true', help='Node to LID map: GUID -> LID')
parser.add_argument('-L', '--lid_guid_map', action='store_true', help='LID to Node map: LID -> GUID')
parser.add_argument('-s', '--shorten_name', action='store_true', help='Shortens string names')
parser.add_argument('--switch_links', metavar='SWITCH_NAME', help='Print switch links')
parser.add_argument('--node_links', metavar='NODE_NAME', help='Print node links')
parser.add_argument('-a', '--all', action='store_true', help='The kitchen sink!')
parser.add_argument('-o', choices={'json', 'yaml'}, default='json', help='Output format')
parser.add_argument('FILE', help='opareport xml file name.')
return parser.parse_args()
def parse_xml_file(fname):
return ET.parse(fname).getroot()
def parse_hex(lid):
return int(lid, 0)
def get_guid(xml_element):
return parse_hex(xml_element.attrib['id'])
def xml_is_switch(xml_element):
return xml_element.find('./NodeType_Int').text == '2'
def get_nodes(xml_root):
for n in xml_root.findall('./Nodes/Node'):
if not xml_is_switch(n):
yield n
def get_switches(xml_root):
for sw in xml_root.findall('./Nodes/Node'):
if xml_is_switch(sw):
yield sw
def get_links(xml_root):
for link in xml_root.findall('./Links/Link'):
yield link
def gen_link_table(xml_root):
'''guid -> port -> (guid, port)'''
link_table_ = defaultdict(dict)
for l in get_links(xml_root):
from_guid = parse_hex(l.find('./From/NodeGUID').text)
from_port = int(l.find('./From/PortNum').text)
to_guid = parse_hex(l.find('./To/NodeGUID').text)
to_port = int(l.find('./To/PortNum').text)
# Make the table bidirectional
link_table_[from_guid][from_port] = (to_guid, to_port)
link_table_[to_guid][to_port] = (from_guid, from_port)
return dict(link_table_) # convert to dict to play more nicely with the serializer
def get_link(guid, port):
'''link table wrapper function for simplicity'''
return link_table[guid][port] if key in link_table else False
def gen_routing_tables(xml_root):
'''Switch GUID -> (lid -> port) Map'''
switch_lid_port_map = {}
for switch in get_switches(xml_root):
guid = get_guid(switch)
switch_lid_port_map[guid] = {parse_hex(entry.attrib['LID']): int(entry.text)
for entry in switch.findall('./SwitchData/LinearFDB/')}
return switch_lid_port_map
def iter_node_lids(xml_root):
'''(node GUID, LID) generator'''
for node in get_nodes(xml_root):
guid = get_guid(node)
lid = node.find('./PortInfo/LID').text
yield (guid, parse_hex(lid))
def gen_node_lid_map(xml_root):
'''node GUID -> LID'''
return {n: l for n, l in iter_node_lids(xml_root)}
def gen_lid_node_map(xml_root):
'''LID -> node GUID'''
return {l: n for n, l in iter_node_lids(xml_root)}
def iter_node_name_guids(xml_root):
'''(name, GUID) generator'''
for n in xml_root.findall('./Nodes/Node'):
name = n.find('./NodeDesc').text
guid = get_guid(n)
yield (name, guid)
def parse_if(true, name):
'''Parses a node name when the first argument is true'''
return name.split()[0] if true else name
def gen_guid_name_map(xml_root, parse_name=False):
'''GUID -> name'''
return {g: parse_if(parse_name, n) for n, g in iter_node_name_guids(xml_root)}
def gen_name_guid_map(xml_root, parse_name=False):
'''name -> GUID'''
return {parse_if(parse_name, n): g for n, g in iter_node_name_guids(xml_root)}
def switch_port_for_lid(switch_id, target_lid):
'''For a given named switch and target LID, find the port to route to'''
return routing_table[switch_id][target_lid]
def sst_map(name_prefix, guid_name_map, link_table):
'''Generates a dictionary that can be parsed by SST'''
name_map = {}
unmapped = set()
for guid, name in guid_name_map.items():
# Determine whether we are parsing a node or switch
if name_prefix not in name:
unmapped.add(name)
continue
cur_map = name_map[name] = {'outports': {}}
for (outport, (guid, inport)) in link_table[guid].items():
cur_map['outports'][str(outport - 1)] = {
'destination': guid_name_map[guid],
'inport': inport - 1}
return (name_map, unmapped)
def named_routing_tables(routing_table, guid_name_map, lid_guid_map):
'''Generates a routing table with names instead of GUIDs'''
new_rt = {}
for (guid, lid_port_map) in routing_table.items():
cur_id = new_rt[guid_name_map[guid]] = {'routes': {}}
for (lid, port) in lid_port_map.items():
if lid in lid_guid_map: # Switches don't have LIDs
cur_id['routes'][guid_name_map[lid_guid_map[lid]]] = port - 1
return new_rt
# https://gist.github.com/angstwad/bf22d1822c38a92ec0a9#gistcomment-1986197
def dict_merge(dct, merge_dct, add_keys=True):
'''Merges two dictionaries into'''
dct = dct.copy()
if not add_keys:
merge_dct = {
k: merge_dct[k]
for k in set(dct).intersection(set(merge_dct))
}
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dct[k] = dict_merge(dct[k], merge_dct[k], add_keys=add_keys)
else:
dct[k] = merge_dct[k]
return dct
if __name__ == '__main__':
args = parse_args()
xml_root = parse_xml_file(args.FILE)
out_dict = {}
if args.routing_table or args.all:
out_dict['routing_table'] = gen_routing_tables(xml_root)
if args.link_table or args.all:
out_dict['link_table'] = gen_link_table(xml_root)
if args.name_guid_map or args.all:
out_dict['name_guid_map'] = gen_name_guid_map(xml_root, args.shorten_name)
if args.guid_name_map or args.all:
out_dict['guid_name_map'] = gen_guid_name_map(xml_root, args.shorten_name)
if args.guid_lid_map or args.all:
out_dict['guid_lid_map'] = gen_node_lid_map(xml_root)
if args.lid_guid_map or args.all:
out_dict['lid_guid_map'] = gen_lid_node_map(xml_root)
if args.switch_links:
(out_dict['switches'], unmapped_sw) = sst_map(args.switch_links,
gen_guid_name_map(xml_root, args.shorten_name),
gen_link_table(xml_root))
if args.node_links:
(out_dict['nodes'], unmapped_nd) = sst_map(args.node_links,
gen_guid_name_map(xml_root, args.shorten_name),
gen_link_table(xml_root))
if args.named_routing_table:
output = named_routing_tables(gen_routing_tables(xml_root),
gen_guid_name_map(xml_root, args.shorten_name),
gen_lid_node_map(xml_root))
# If switch_links are used, we have to interleave the dicts b/c Jeremy says so
out_dict['switches'] = dict_merge(out_dict['switches'], output) if args.switch_links else output
# List fabric nodes that did not match either prefix
if args.node_links and args.switch_links:
unmapped = unmapped_sw.intersection(unmapped_nd)
if len(unmapped) > 0 :
print('Warning some fabric nodes unmapped:', file=sys.stderr)
[print(' %s' % name, file=sys.stderr) for name in unmapped]
if args.o == 'json':
from json import dumps
print(dumps(out_dict, sort_keys=True, indent=2))
elif args.o == 'yaml':
try:
from yaml import safe_dump # safe_dump drops the annoying type tags
except:
print('ERROR: please install PyYAML for yaml output', file=sys.stderr)
exit()
print(safe_dump(out_dict, default_flow_style=False))
| 11,770 | 38.106312 | 185 |
py
|
sst-macro
|
sst-macro-master/bin/config_tools/configlib.py
|
"""
Copyright 2009-2023 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2023, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact [email protected]
"""
import sys
def getstatusoutput3(cmd,stdin=None,pipe=None):
if not (sys.version_info < (3,0)):
try:
#Python 3 is an unmitigated disaster
#Thanks for breaking everything, Guido
from subprocess import check_output,STDOUT,Popen,PIPE
if stdin:
stdin = open(stdin)
result = None
child = None
if pipe:
p1 = Popen(pipe, stdout=PIPE)
child = Popen(cmd.split(), stdin=p1.stdout, stdout=PIPE, stderr=STDOUT)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
result, stderr = child.communicate()
else:
#result = check_output(cmd.split(),stdin=stdin,stderr=STDOUT)
child = Popen(cmd.split(), stdin=stdin, stdout=PIPE, stderr=STDOUT)
result, stderr = child.communicate()
#Oh, and I love decoding byte strings manually
if not result:
return 1, ""
else:
return child.returncode, result.decode("utf-8").rstrip("\n")
except Exception as e:
sys.stderr.write("FAILED: %s" % cmd)
if stdin:
sys.stderr.write(" stdin=%s" % stdin)
elif pipe:
sys.stderr.write(" pipe=%s" % pipe)
sys.stderr.write("\n")
raise e
def getoutput3(cmd,stdin=None,pipe=None):
rc, text = getstatusoutput3(cmd,stdin,pipe)
return text
def get_cmd_from_pipe2(cmd,stdin,pipe):
if stdin:
cmd = cmd + " < %s" % stdin
elif pipe:
str_arr = []
for elem in pipe:
if " " in elem: str_arr.append("'%s'" % elem)
else: str_arr.append(elem)
cmd = " ".join(str_arr) + " | " + cmd
return cmd
def getoutput2(cmd,stdin=None,pipe=None):
if sys.version_info < (3,0):
import commands
newCmd = get_cmd_from_pipe2(cmd, stdin, pipe)
return commands.getoutput(newCmd)
def getstatusoutput2(cmd,stdin=None,pipe=None):
if sys.version_info < (3,0):
import commands
newCmd = get_cmd_from_pipe2(cmd, stdin, pipe)
return commands.getstatusoutput(newCmd)
getoutput = None
getstatusoutput = None
if sys.version_info < (3,0):
getoutput = getoutput2
getstatusoutput = getstatusoutput2
else:
getstatusoutput = getstatusoutput3
getoutput = getoutput3
| 4,253 | 35.672414 | 81 |
py
|
sst-macro
|
sst-macro-master/bin/plots/syncplot.py
|
import re
import os
import matplotlib.pyplot as plt
import numpy as np
import re
import sys
remap = {
"MPI_Waitsome" : "MPI_Wait",
"ComputeTime" : "Compute",
"ComputeInstructions" : "Compute",
"MPIEager1Protocol_Handle_RDMA_Header" : "MPI Compute",
"MPIEager1Protocol_Handle_RDMA_Payload" : "MPI Compute",
"MPIEager0Protocol_Handle_Header" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Configure_Buffer" : "MPI Compute",
"MPIEager1Protocol_Handle_RDMA_Header" : "MPI Compute",
"MPIEager1Protocol_Handle_RDMA_Payload" : "MPI Compute",
"MPIEager1Protocol_Handle_RDMA_Payload" : "MPI Compute",
"MPIEager0Protocol_Handle_Header" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Configure_Buffer" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Send_Header" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Handle_Header" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Handle_Payload" : "MPI Compute",
"MPIEager1Protocol_Send_RDMA_Header" : "MPI Compute",
"MPIEager0Protocol_Send_Header" : "MPI Compute",
"MPIRendezvousProtocol_RDMA_Send_Header" : "MPI Compute",
"MPIQueuePostRDMARequest" : "MPI Compute",
"MPIQueuePostHeader" : "MPI Compute",
}
class Rank:
def __init__(self):
self.bars = {}
self.totalMPI = 0
self.totalSync = 0
self.comp = 0
self.compMPI = 0
def total(self):
return float(self.totalMPI + self.comp)
def mpiFraction(self, norm=None):
if not norm: norm = float(self.totalMPI + self.comp)
return float(self.totalMPI) / norm
def commFraction(self, norm=None):
if not norm: norm = float(self.totalMPI + self.comp)
return float(self.totalMPI - self.totalSync) / norm
def syncFraction(self, norm=None):
if not norm: norm = float(self.totalMPI + self.comp)
return float(self.totalSync) / norm
def compFraction(self, norm=None):
if not norm: norm = float(self.totalMPI + self.comp)
return float(self.comp) / norm
def __bool__(self):
return bool(self.bars)
class Bar:
def __init__(self, name, total):
self.name = name
self.total = total
self.comp = 0
self.comm = 0
self.sync = 0
def __repr__(self):
return "%d:%d" % (self.total, self.sync)
def parse(fname):
print "parsing", fname
folder, ignore = os.path.split(fname)
text = open(fname).read()
rank = Rank()
currentBar = None
redundantCompute = 0
started = False
inMain = False
for line in text.strip().splitlines():
if "Call Graph Summary" in line:
started = True
continue
if not started:
continue
if "Estimated total" in line:
started = False
continue
if line.startswith(" "):
name, count = line.strip().split()
name = name.strip()
if name in remap:
name = remap[name]
count = int(count)
if inMain:
if name == "Compute":
rank.comp += count
else:
if "Finalize" in name:
continue
if "MPI_" in name:
currentBar = Bar(name,count)
rank.bars[name] = currentBar
rank.totalMPI += count
elif currentBar: #not in main, but I guess another MPI call
if name == "MPI Compute":
currentBar.comp += count
rank.compMPI += count
elif name == "Compute":
currentBar.comp += count
rank.compMPI += count
elif name == "memcopy":
currentBar.comp += count
rank.compMPI += count
elif name == "sync":
currentBar.sync += count
else:
inMain = False
entries = line.strip().split()
fxn = " ".join(entries[:-2])
if fxn in remap:
fxn = remap[fxn]
self = eval(entries[-1])
total = eval(entries[-2])
if "MPI_" in fxn:
if rank.bars.has_key(fxn):
currentBar = rank.bars[fxn]
#elif fxn == "MPI Compute":
# rank.compMPI += total
elif fxn == "sync":
rank.totalSync += total
elif fxn == "main":
inMain = True
else:
currentBar = None
rank.totalComm = rank.totalMPI - rank.totalSync - rank.compMPI
cwd = os.getcwd()
if folder:
os.chdir(folder)
if folder:
os.chdir(cwd)
return rank
def plotBars(data, title=None, output=None):
main = data
fig = plt.figure()
ax = fig.add_subplot(111)
colors = [
"#afeeee", #pale turquoise
'#f5deb3', #pale wheat
"#cc99ff", #purple
"green",
'red',
"#ffcc99", #orange
'cyan',
'yellow',
'magenta',
]
barWidth=0.25
thk = 2
fxns = main.bars.keys()
fxns.sort()
totalMPI = float(main.totalMPI)
idx = 1
colorIdx = 0
mainTotal = float(main.totalMPI + main.comp)
mainMPI = float(main.totalMPI)
totalSync = main.totalSync / mainTotal
totalComm = main.totalComm / mainTotal
totalMPIComp = main.compMPI / mainTotal
totalMPI = main.totalMPI / mainTotal
totalComp = main.comp / mainTotal
comms = [totalComm]
syncs = [totalSync]
comps = [totalMPIComp]
xlabels = ["Total"]
#just take the 5 top functions
maxFxns = min(5,len(fxns))
sorter = []
for f in fxns:
b = main.bars[f]
sorter.append((b.total,f))
sorter.sort()
sorter.reverse()
for ignore, f in sorter[:maxFxns]:
b = main.bars[f]
comm = (b.total - b.sync - b.comp) / mainMPI
sync = b.sync / mainMPI
comp = b.comp / mainMPI
comms.append(comm)
syncs.append(sync)
comps.append(comp)
xlabels.append(f.replace("MPI_",""))
xs = range(2,maxFxns+2)
xs.insert(0, 0.6)
comms = np.array(comms)
syncs = np.array(syncs)
comps = np.array(comps)
commBar = ax.bar(xs, comms, barWidth, color=colors[0], ecolor='block')
syncBar = ax.bar(xs, syncs, barWidth, color=colors[1], ecolor='block', hatch='///', bottom=comms)
mpiStackBar = ax.bar(xs, comps, barWidth,color=colors[2], ecolor='block', bottom=comms+syncs)
ax.set_ylabel("Fraction Total Time")
#now add the bars for actual computation, not MPI stack
ax2 = ax.twinx()
myx=[0.6]
myy=[totalComp]
bottoms=[totalMPI]
compBar = ax2.bar(myx,myy,barWidth,color=colors[3], ecolor='block', bottom=bottoms)
ax2.set_ylabel("Fraction MPI Time")
ax2.tick_params('y', length=0, labelright='off')
ax2.set_ylim([0,1])
plt.xticks(xs, xlabels)
plt.axvline(1.5, color='black', lw=5, ls='dashed')
ax.legend([commBar, syncBar, mpiStackBar, compBar], ["Network", "Sync", "MPI Stack", "Compute"])
#locs, labels = plt.xticks()
#plt.setp(labels, rotation=45)
ax.set_ylim([0,1])
plt.xlim([0,maxFxns+2])
if title:
ax.set_title(title)
if output:
plt.savefig(output)
else:
plt.show()
| 6,559 | 24.038168 | 99 |
py
|
sst-macro
|
sst-macro-master/tests/testsuite_default_sst_macro.py
|
# -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
# Put your single instance Init Code Here
module_init = 1
module_sema.release()
################################################################################
class testcase_sst_macro(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
def test_sst_macro_make_check(self):
self.sst_macro_test_template("check")
def test_sst_macro_make_installcheck(self):
self.sst_macro_test_template("installcheck")
#####
def sst_macro_test_template(self, testcase, testtimeout = 120):
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
MacroElementDir = os.path.abspath("{0}/../".format(test_path))
# Set the various file paths
testDataFileName="test_sst_macro_{0}".format(testcase)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
# Launch SST-Macro Test
oscmd = "make {0}".format(testcase)
rtn = OSCommand(oscmd, output_file_path = outfile,
error_file_path = errfile,
set_cwd = MacroElementDir).run(timeout_sec=testtimeout)
# Look for runtime error conditions
err_str = "SST-Macro Timed-Out ({0} secs) while running {1}".format(testtimeout, oscmd)
self.assertFalse(rtn.timeout(), err_str)
err_str = "SST-Macro returned {0}; while running {1}".format(rtn.result(), oscmd)
self.assertEqual(rtn.result(), 0, err_str)
| 2,610 | 33.355263 | 95 |
py
|
sst-macro
|
sst-macro-master/tests/api/globals/config.py
|
import sst.macro
sst.macro.loadLibrary("libsstmac_api_globals_test.so")
sst.macro.setupDeprecated()
| 100 | 24.25 | 54 |
py
|
sst-macro
|
sst-macro-master/tests/api/mpi/config.py
|
import sst.macro
sst.macro.setupDeprecated()
| 45 | 14.333333 | 27 |
py
|
sst-macro
|
sst-macro-master/share/sstlldb.py
|
import lldb
import commands
import optparse
import shlex
def start(debugger, command, result, internal_dict):
debugger.HandleCommand("pro handle SIGUSR1 -p true -s false -n false")
debugger.HandleCommand("br set -b sst_gdb_swap")
debugger.HandleCommand("br com add -o bt")
newCmd = "expr --ignore-breakpoints false -- sst_gdbSetActive(1)"
debugger.HandleCommand(newCmd)
def select(debugger, command, result, internal_dict):
newCmd = "expr --ignore-breakpoints false -- sst_gdb_select_rank(%s)" % command
debugger.HandleCommand(newCmd)
# And the initialization code to add your commands
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f sst.start sst_start')
debugger.HandleCommand('command script add -f sst.select sst_select')
debugger.HandleCommand('br set -b main')
print('The sst commands have been installed and are ready for use')
| 911 | 37 | 82 |
py
|
sst-macro
|
sst-macro-master/docs/manual/figures/matplotlib/ftq/output_app1.py
|
#!/usr/bin/env python3
try:
import sys
import numpy as np
import matplotlib.pyplot as plt
import argparse
except ImportError:
print('ImportError caught. Please install matplotlib')
exit()
import numpy as np
import matplotlib.pyplot as plt
import argparse
# Getting CLI args
parser = argparse.ArgumentParser()
parser.add_argument('--show', action='store_true', help='display the plot on screen')
parser.add_argument('--title', default='Histogram plot', help='set the title')
parser.add_argument('--eps', action='store_true', help='output .eps file')
parser.add_argument('--pdf', action='store_true', help='output .pdf file')
parser.add_argument('--png', action='store_true', help='output .png file')
parser.add_argument('--svg', action='store_true', help='output .svg file')
args = parser.parse_args()
# Parsing the data file
file_name='/home/sknigh/code/github/sst-macro/build/output_app1'
with open(file_name + '.dat') as f:
names = f.readline().split()
data = np.loadtxt(f, dtype=float).transpose()
time, normalized = data[0], np.divide(data[1:-1],data[-1])
# Plot fomatting
plt.xlabel('Time (us)')
plt.xlim(time[0], time[-1])
plt.ylim(0,1)
plt.yticks([])
plt.title(args.title)
plt.legend(plt.stackplot(time, normalized), names[1:])
# Saving
if args.eps: plt.savefig(file_name + '.eps')
if args.pdf: plt.savefig(file_name + '.pdf')
if args.png: plt.savefig(file_name + '.png')
if args.svg: plt.savefig(file_name + '.svg')
if args.show:
plt.show()
| 1,496 | 29.55102 | 85 |
py
|
sst-macro
|
sst-macro-master/skeletons/sst_component_example/run.py
|
import sst
from sst.macro import *
import sst.test
latency="1us"
comp1 = sst.Component("1", "test.dummy_switch")
comp1.addParam("id", 1)
comp1.addParam("latency", latency)
comp2 = sst.Component("2", "test.dummy_switch")
comp2.addParam("id", 2)
comp2.addParam("latency", latency)
port=0
comp1Id=1
comp2Id=2
makeBiNetworkLink(comp1,comp1Id,port,
comp2,comp2Id,port,
latency)
| 413 | 18.714286 | 47 |
py
|
SimCSE
|
SimCSE-main/setup.py
|
import io
from setuptools import setup, find_packages
with io.open('./README.md', encoding='utf-8') as f:
readme = f.read()
setup(
name='simcse',
packages=['simcse'],
version='0.4',
license='MIT',
description='A sentence embedding tool based on SimCSE',
author='Tianyu Gao, Xingcheng Yao, Danqi Chen',
author_email='[email protected]',
url='https://github.com/princeton-nlp/SimCSE',
download_url='https://github.com/princeton-nlp/SimCSE/archive/refs/tags/0.4.tar.gz',
keywords=['sentence', 'embedding', 'simcse', 'nlp'],
install_requires=[
"tqdm",
"scikit-learn",
"scipy>=1.5.4,<1.6",
"transformers",
"torch",
"numpy>=1.19.5,<1.20",
"setuptools"
]
)
| 767 | 26.428571 | 88 |
py
|
SimCSE
|
SimCSE-main/evaluation.py
|
import sys
import io, os
import numpy as np
import logging
import argparse
from prettytable import PrettyTable
import torch
import transformers
from transformers import AutoModel, AutoTokenizer
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
# Set PATHs
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def print_table(task_names, scores):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
print(tb)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str,
help="Transformers' model name or path")
parser.add_argument("--pooler", type=str,
choices=['cls', 'cls_before_pooler', 'avg', 'avg_top2', 'avg_first_last'],
default='cls',
help="Which pooler to use")
parser.add_argument("--mode", type=str,
choices=['dev', 'test', 'fasttest'],
default='test',
help="What evaluation mode to use (dev: fast mode, dev results; test: full mode, test results); fasttest: fast mode, test results")
parser.add_argument("--task_set", type=str,
choices=['sts', 'transfer', 'full', 'na'],
default='sts',
help="What set of tasks to evaluate on. If not 'na', this will override '--tasks'")
parser.add_argument("--tasks", type=str, nargs='+',
default=['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC',
'SICKRelatedness', 'STSBenchmark'],
help="Tasks to evaluate on. If '--task_set' is specified, this will be overridden")
args = parser.parse_args()
# Load transformers' model checkpoint
model = AutoModel.from_pretrained(args.model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Set up the tasks
if args.task_set == 'sts':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
elif args.task_set == 'transfer':
args.tasks = ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
elif args.task_set == 'full':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
args.tasks += ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
# Set params for SentEval
if args.mode == 'dev' or args.mode == 'fasttest':
# Fast mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
elif args.mode == 'test':
# Full mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
params['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
else:
raise NotImplementedError
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch, max_length=None):
# Handle rare token encoding issues in the dataset
if len(batch) >= 1 and len(batch[0]) >= 1 and isinstance(batch[0][0], bytes):
batch = [[word.decode('utf-8') for word in s] for s in batch]
sentences = [' '.join(s) for s in batch]
# Tokenization
if max_length is not None:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
max_length=max_length,
truncation=True
)
else:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
# Move to the correct device
for k in batch:
batch[k] = batch[k].to(device)
# Get raw embeddings
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
# Apply different poolers
if args.pooler == 'cls':
# There is a linear+activation layer after CLS representation
return pooler_output.cpu()
elif args.pooler == 'cls_before_pooler':
return last_hidden[:, 0].cpu()
elif args.pooler == "avg":
return ((last_hidden * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)).cpu()
elif args.pooler == "avg_first_last":
first_hidden = hidden_states[1]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
elif args.pooler == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
else:
raise NotImplementedError
results = {}
for task in args.tasks:
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval(task)
results[task] = result
# Print evaluation results
if args.mode == 'dev':
print("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['dev']['spearman'][0] * 100))
else:
scores.append("0.00")
print_table(task_names, scores)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['devacc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
elif args.mode == 'test' or args.mode == 'fasttest':
print("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
if task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
scores.append("%.2f" % (results[task]['all']['spearman']['all'] * 100))
else:
scores.append("%.2f" % (results[task]['test']['spearman'].correlation * 100))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['acc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
if __name__ == "__main__":
main()
| 8,127 | 38.456311 | 165 |
py
|
SimCSE
|
SimCSE-main/simcse_to_huggingface.py
|
"""
Convert SimCSE's checkpoints to Huggingface style.
"""
import argparse
import torch
import os
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="Path of SimCSE checkpoint folder")
args = parser.parse_args()
print("SimCSE checkpoint -> Huggingface checkpoint for {}".format(args.path))
state_dict = torch.load(os.path.join(args.path, "pytorch_model.bin"), map_location=torch.device("cpu"))
new_state_dict = {}
for key, param in state_dict.items():
# Replace "mlp" to "pooler"
if "mlp" in key:
key = key.replace("mlp", "pooler")
# Delete "bert" or "roberta" prefix
if "bert." in key:
key = key.replace("bert.", "")
if "roberta." in key:
key = key.replace("roberta.", "")
new_state_dict[key] = param
torch.save(new_state_dict, os.path.join(args.path, "pytorch_model.bin"))
# Change architectures in config.json
config = json.load(open(os.path.join(args.path, "config.json")))
for i in range(len(config["architectures"])):
config["architectures"][i] = config["architectures"][i].replace("ForCL", "Model")
json.dump(config, open(os.path.join(args.path, "config.json"), "w"), indent=2)
if __name__ == "__main__":
main()
| 1,327 | 29.181818 | 107 |
py
|
SimCSE
|
SimCSE-main/train.py
|
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union, List, Dict, Tuple
import torch
import collections
import random
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorWithPadding,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
EvalPrediction,
BertModel,
BertForPreTraining,
RobertaModel
)
from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTrainedTokenizerBase
from transformers.trainer_utils import is_main_process
from transformers.data.data_collator import DataCollatorForLanguageModeling
from transformers.file_utils import cached_property, torch_required, is_torch_available, is_torch_tpu_available
from simcse.models import RobertaForCL, BertForCL
from simcse.trainers import CLTrainer
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
# Huggingface's original arguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
# SimCSE's arguments
temp: float = field(
default=0.05,
metadata={
"help": "Temperature for softmax."
}
)
pooler_type: str = field(
default="cls",
metadata={
"help": "What kind of pooler to use (cls, cls_before_pooler, avg, avg_top2, avg_first_last)."
}
)
hard_negative_weight: float = field(
default=0,
metadata={
"help": "The **logit** of weight for hard negatives (only effective if hard negatives are used)."
}
)
do_mlm: bool = field(
default=False,
metadata={
"help": "Whether to use MLM auxiliary objective."
}
)
mlm_weight: float = field(
default=0.1,
metadata={
"help": "Weight for MLM auxiliary objective (only effective if --do_mlm)."
}
)
mlp_only_train: bool = field(
default=False,
metadata={
"help": "Use MLP only during training"
}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
# Huggingface's original arguments.
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
# SimCSE's arguments
train_file: Optional[str] = field(
default=None,
metadata={"help": "The training data file (.txt or .csv)."}
)
max_seq_length: Optional[int] = field(
default=32,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
mlm_probability: float = field(
default=0.15,
metadata={"help": "Ratio of tokens to mask for MLM (only effective if --do_mlm)"}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
@dataclass
class OurTrainingArguments(TrainingArguments):
# Evaluation
## By default, we evaluate STS (dev) during training (for selecting best checkpoints) and evaluate
## both STS and transfer tasks (dev) at the end of training. Using --eval_transfer will allow evaluating
## both STS and transfer tasks (dev) during training.
eval_transfer: bool = field(
default=False,
metadata={"help": "Evaluate transfer task dev sets (in validation)."}
)
@cached_property
@torch_required
def _setup_devices(self) -> "torch.device":
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
self._n_gpu = 0
elif is_torch_tpu_available():
import torch_xla.core.xla_model as xm
device = xm.xla_device()
self._n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
self._n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
#
# deepspeed performs its own DDP internally, and requires the program to be started with:
# deepspeed ./program.py
# rather than:
# python -m torch.distributed.launch --nproc_per_node=2 ./program.py
if self.deepspeed:
from .integrations import is_deepspeed_available
if not is_deepspeed_available():
raise ImportError("--deepspeed requires deepspeed: `pip install deepspeed`.")
import deepspeed
deepspeed.init_distributed()
else:
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, OurTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
if extension == "csv":
datasets = load_dataset(extension, data_files=data_files, cache_dir="./data/", delimiter="\t" if "tsv" in data_args.train_file else ",")
else:
datasets = load_dataset(extension, data_files=data_files, cache_dir="./data/")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
if 'roberta' in model_args.model_name_or_path:
model = RobertaForCL.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
model_args=model_args
)
elif 'bert' in model_args.model_name_or_path:
model = BertForCL.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
model_args=model_args
)
if model_args.do_mlm:
pretrained_model = BertForPreTraining.from_pretrained(model_args.model_name_or_path)
model.lm_head.load_state_dict(pretrained_model.cls.predictions.state_dict())
else:
raise NotImplementedError
else:
raise NotImplementedError
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Prepare features
column_names = datasets["train"].column_names
sent2_cname = None
if len(column_names) == 2:
# Pair datasets
sent0_cname = column_names[0]
sent1_cname = column_names[1]
elif len(column_names) == 3:
# Pair datasets with hard negatives
sent0_cname = column_names[0]
sent1_cname = column_names[1]
sent2_cname = column_names[2]
elif len(column_names) == 1:
# Unsupervised datasets
sent0_cname = column_names[0]
sent1_cname = column_names[0]
else:
raise NotImplementedError
def prepare_features(examples):
# padding = longest (default)
# If no sentence in the batch exceed the max length, then use
# the max sentence length in the batch, otherwise use the
# max sentence length in the argument and truncate those that
# exceed the max length.
# padding = max_length (when pad_to_max_length, for pressure test)
# All sentences are padded/truncated to data_args.max_seq_length.
total = len(examples[sent0_cname])
# Avoid "None" fields
for idx in range(total):
if examples[sent0_cname][idx] is None:
examples[sent0_cname][idx] = " "
if examples[sent1_cname][idx] is None:
examples[sent1_cname][idx] = " "
sentences = examples[sent0_cname] + examples[sent1_cname]
# If hard negative exists
if sent2_cname is not None:
for idx in range(total):
if examples[sent2_cname][idx] is None:
examples[sent2_cname][idx] = " "
sentences += examples[sent2_cname]
sent_features = tokenizer(
sentences,
max_length=data_args.max_seq_length,
truncation=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
features = {}
if sent2_cname is not None:
for key in sent_features:
features[key] = [[sent_features[key][i], sent_features[key][i+total], sent_features[key][i+total*2]] for i in range(total)]
else:
for key in sent_features:
features[key] = [[sent_features[key][i], sent_features[key][i+total]] for i in range(total)]
return features
if training_args.do_train:
train_dataset = datasets["train"].map(
prepare_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
@dataclass
class OurDataCollatorWithPadding:
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
mlm: bool = True
mlm_probability: float = data_args.mlm_probability
def __call__(self, features: List[Dict[str, Union[List[int], List[List[int]], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
special_keys = ['input_ids', 'attention_mask', 'token_type_ids', 'mlm_input_ids', 'mlm_labels']
bs = len(features)
if bs > 0:
num_sent = len(features[0]['input_ids'])
else:
return
flat_features = []
for feature in features:
for i in range(num_sent):
flat_features.append({k: feature[k][i] if k in special_keys else feature[k] for k in feature})
batch = self.tokenizer.pad(
flat_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if model_args.do_mlm:
batch["mlm_input_ids"], batch["mlm_labels"] = self.mask_tokens(batch["input_ids"])
batch = {k: batch[k].view(bs, num_sent, -1) if k in special_keys else batch[k].view(bs, num_sent, -1)[:, 0] for k in batch}
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
def mask_tokens(
self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
inputs = inputs.clone()
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
data_collator = default_data_collator if data_args.pad_to_max_length else OurDataCollatorWithPadding(tokenizer)
trainer = CLTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.model_args = model_args
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer too for easy upload
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
results = trainer.evaluate(eval_senteval_transfer=True)
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24,040 | 39.955707 | 144 |
py
|
SimCSE
|
SimCSE-main/simcse/tool.py
|
import logging
from tqdm import tqdm
import numpy as np
from numpy import ndarray
import torch
from torch import Tensor, device
import transformers
from transformers import AutoModel, AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from typing import List, Dict, Tuple, Type, Union
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class SimCSE(object):
"""
A class for embedding sentences, calculating similarities, and retriving sentences by SimCSE.
"""
def __init__(self, model_name_or_path: str,
device: str = None,
num_cells: int = 100,
num_cells_in_search: int = 10,
pooler = None):
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.model = AutoModel.from_pretrained(model_name_or_path)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = device
self.index = None
self.is_faiss_index = False
self.num_cells = num_cells
self.num_cells_in_search = num_cells_in_search
if pooler is not None:
self.pooler = pooler
elif "unsup" in model_name_or_path:
logger.info("Use `cls_before_pooler` for unsupervised models. If you want to use other pooling policy, specify `pooler` argument.")
self.pooler = "cls_before_pooler"
else:
self.pooler = "cls"
def encode(self, sentence: Union[str, List[str]],
device: str = None,
return_numpy: bool = False,
normalize_to_unit: bool = True,
keepdim: bool = False,
batch_size: int = 64,
max_length: int = 128) -> Union[ndarray, Tensor]:
target_device = self.device if device is None else device
self.model = self.model.to(target_device)
single_sentence = False
if isinstance(sentence, str):
sentence = [sentence]
single_sentence = True
embedding_list = []
with torch.no_grad():
total_batch = len(sentence) // batch_size + (1 if len(sentence) % batch_size > 0 else 0)
for batch_id in tqdm(range(total_batch)):
inputs = self.tokenizer(
sentence[batch_id*batch_size:(batch_id+1)*batch_size],
padding=True,
truncation=True,
max_length=max_length,
return_tensors="pt"
)
inputs = {k: v.to(target_device) for k, v in inputs.items()}
outputs = self.model(**inputs, return_dict=True)
if self.pooler == "cls":
embeddings = outputs.pooler_output
elif self.pooler == "cls_before_pooler":
embeddings = outputs.last_hidden_state[:, 0]
else:
raise NotImplementedError
if normalize_to_unit:
embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)
embedding_list.append(embeddings.cpu())
embeddings = torch.cat(embedding_list, 0)
if single_sentence and not keepdim:
embeddings = embeddings[0]
if return_numpy and not isinstance(embeddings, ndarray):
return embeddings.numpy()
return embeddings
def similarity(self, queries: Union[str, List[str]],
keys: Union[str, List[str], ndarray],
device: str = None) -> Union[float, ndarray]:
query_vecs = self.encode(queries, device=device, return_numpy=True) # suppose N queries
if not isinstance(keys, ndarray):
key_vecs = self.encode(keys, device=device, return_numpy=True) # suppose M keys
else:
key_vecs = keys
# check whether N == 1 or M == 1
single_query, single_key = len(query_vecs.shape) == 1, len(key_vecs.shape) == 1
if single_query:
query_vecs = query_vecs.reshape(1, -1)
if single_key:
key_vecs = key_vecs.reshape(1, -1)
# returns an N*M similarity array
similarities = cosine_similarity(query_vecs, key_vecs)
if single_query:
similarities = similarities[0]
if single_key:
similarities = float(similarities[0])
return similarities
def build_index(self, sentences_or_file_path: Union[str, List[str]],
use_faiss: bool = None,
faiss_fast: bool = False,
device: str = None,
batch_size: int = 64):
if use_faiss is None or use_faiss:
try:
import faiss
assert hasattr(faiss, "IndexFlatIP")
use_faiss = True
except:
logger.warning("Fail to import faiss. If you want to use faiss, install faiss through PyPI. Now the program continues with brute force search.")
use_faiss = False
# if the input sentence is a string, we assume it's the path of file that stores various sentences
if isinstance(sentences_or_file_path, str):
sentences = []
with open(sentences_or_file_path, "r") as f:
logging.info("Loading sentences from %s ..." % (sentences_or_file_path))
for line in tqdm(f):
sentences.append(line.rstrip())
sentences_or_file_path = sentences
logger.info("Encoding embeddings for sentences...")
embeddings = self.encode(sentences_or_file_path, device=device, batch_size=batch_size, normalize_to_unit=True, return_numpy=True)
logger.info("Building index...")
self.index = {"sentences": sentences_or_file_path}
if use_faiss:
quantizer = faiss.IndexFlatIP(embeddings.shape[1])
if faiss_fast:
index = faiss.IndexIVFFlat(quantizer, embeddings.shape[1], min(self.num_cells, len(sentences_or_file_path)), faiss.METRIC_INNER_PRODUCT)
else:
index = quantizer
if (self.device == "cuda" and device != "cpu") or device == "cuda":
if hasattr(faiss, "StandardGpuResources"):
logger.info("Use GPU-version faiss")
res = faiss.StandardGpuResources()
res.setTempMemory(20 * 1024 * 1024 * 1024)
index = faiss.index_cpu_to_gpu(res, 0, index)
else:
logger.info("Use CPU-version faiss")
else:
logger.info("Use CPU-version faiss")
if faiss_fast:
index.train(embeddings.astype(np.float32))
index.add(embeddings.astype(np.float32))
index.nprobe = min(self.num_cells_in_search, len(sentences_or_file_path))
self.is_faiss_index = True
else:
index = embeddings
self.is_faiss_index = False
self.index["index"] = index
logger.info("Finished")
def add_to_index(self, sentences_or_file_path: Union[str, List[str]],
device: str = None,
batch_size: int = 64):
# if the input sentence is a string, we assume it's the path of file that stores various sentences
if isinstance(sentences_or_file_path, str):
sentences = []
with open(sentences_or_file_path, "r") as f:
logging.info("Loading sentences from %s ..." % (sentences_or_file_path))
for line in tqdm(f):
sentences.append(line.rstrip())
sentences_or_file_path = sentences
logger.info("Encoding embeddings for sentences...")
embeddings = self.encode(sentences_or_file_path, device=device, batch_size=batch_size, normalize_to_unit=True, return_numpy=True)
if self.is_faiss_index:
self.index["index"].add(embeddings.astype(np.float32))
else:
self.index["index"] = np.concatenate((self.index["index"], embeddings))
self.index["sentences"] += sentences_or_file_path
logger.info("Finished")
def search(self, queries: Union[str, List[str]],
device: str = None,
threshold: float = 0.6,
top_k: int = 5) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
if not self.is_faiss_index:
if isinstance(queries, list):
combined_results = []
for query in queries:
results = self.search(query, device, threshold, top_k)
combined_results.append(results)
return combined_results
similarities = self.similarity(queries, self.index["index"]).tolist()
id_and_score = []
for i, s in enumerate(similarities):
if s >= threshold:
id_and_score.append((i, s))
id_and_score = sorted(id_and_score, key=lambda x: x[1], reverse=True)[:top_k]
results = [(self.index["sentences"][idx], score) for idx, score in id_and_score]
return results
else:
query_vecs = self.encode(queries, device=device, normalize_to_unit=True, keepdim=True, return_numpy=True)
distance, idx = self.index["index"].search(query_vecs.astype(np.float32), top_k)
def pack_single_result(dist, idx):
results = [(self.index["sentences"][i], s) for i, s in zip(idx, dist) if s >= threshold]
return results
if isinstance(queries, list):
combined_results = []
for i in range(len(queries)):
results = pack_single_result(distance[i], idx[i])
combined_results.append(results)
return combined_results
else:
return pack_single_result(distance[0], idx[0])
if __name__=="__main__":
example_sentences = [
'An animal is biting a persons finger.',
'A woman is reading.',
'A man is lifting weights in a garage.',
'A man plays the violin.',
'A man is eating food.',
'A man plays the piano.',
'A panda is climbing.',
'A man plays a guitar.',
'A woman is slicing a meat.',
'A woman is taking a picture.'
]
example_queries = [
'A man is playing music.',
'A woman is making a photo.'
]
model_name = "princeton-nlp/sup-simcse-bert-base-uncased"
simcse = SimCSE(model_name)
print("\n=========Calculate cosine similarities between queries and sentences============\n")
similarities = simcse.similarity(example_queries, example_sentences)
print(similarities)
print("\n=========Naive brute force search============\n")
simcse.build_index(example_sentences, use_faiss=False)
results = simcse.search(example_queries)
for i, result in enumerate(results):
print("Retrieval results for query: {}".format(example_queries[i]))
for sentence, score in result:
print(" {} (cosine similarity: {:.4f})".format(sentence, score))
print("")
print("\n=========Search with Faiss backend============\n")
simcse.build_index(example_sentences, use_faiss=True)
results = simcse.search(example_queries)
for i, result in enumerate(results):
print("Retrieval results for query: {}".format(example_queries[i]))
for sentence, score in result:
print(" {} (cosine similarity: {:.4f})".format(sentence, score))
print("")
| 12,092 | 41.135889 | 160 |
py
|
SimCSE
|
SimCSE-main/simcse/trainers.py
|
import collections
import inspect
import math
import sys
import os
import re
import json
import shutil
import time
import warnings
from pathlib import Path
import importlib.util
from packaging import version
from transformers import Trainer
from transformers.modeling_utils import PreTrainedModel
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from transformers.file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_torch_tpu_available,
)
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
reissue_pt_warnings,
)
from transformers.utils import logging
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
import torch
import torch.nn as nn
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
from transformers.trainer import _model_unwrap
from transformers.optimization import Adafactor, AdamW, get_scheduler
import copy
# Set path to SentEval
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
import numpy as np
from datetime import datetime
from filelock import FileLock
logger = logging.get_logger(__name__)
class CLTrainer(Trainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
eval_senteval_transfer: bool = False,
) -> Dict[str, float]:
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
batch = self.tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
for k in batch:
batch[k] = batch[k].to(self.args.device)
with torch.no_grad():
outputs = self.model(**batch, output_hidden_states=True, return_dict=True, sent_emb=True)
pooler_output = outputs.pooler_output
return pooler_output.cpu()
# Set params for SentEval (fastmode)
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
se = senteval.engine.SE(params, batcher, prepare)
tasks = ['STSBenchmark', 'SICKRelatedness']
if eval_senteval_transfer or self.args.eval_transfer:
tasks = ['STSBenchmark', 'SICKRelatedness', 'MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']
self.model.eval()
results = se.eval(tasks)
stsb_spearman = results['STSBenchmark']['dev']['spearman'][0]
sickr_spearman = results['SICKRelatedness']['dev']['spearman'][0]
metrics = {"eval_stsb_spearman": stsb_spearman, "eval_sickr_spearman": sickr_spearman, "eval_avg_sts": (stsb_spearman + sickr_spearman) / 2}
if eval_senteval_transfer or self.args.eval_transfer:
avg_transfer = 0
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
avg_transfer += results[task]['devacc']
metrics['eval_{}'.format(task)] = results[task]['devacc']
avg_transfer /= 7
metrics['eval_avg_transfer'] = avg_transfer
self.log(metrics)
return metrics
def _save_checkpoint(self, model, trial, metrics=None):
"""
Compared to original implementation, we change the saving policy to
only save the best-validation checkpoints.
"""
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
output_dir = self.args.output_dir
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Only save model when it is the best one
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
else:
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
"""
Main training entry point.
Args:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
The main difference between ours and Huggingface's original implementation is that we
also load model_args when reloading best checkpoints for evaluation.
"""
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
model = self.call_model_init(trial)
if not self.is_model_parallel:
model = model.to(self.args.device)
self.model = model
self.model_wrapped = model
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(model_path)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=(
not getattr(model.config, "gradient_checkpointing", False)
if isinstance(model, PreTrainedModel)
else True
),
)
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = 0
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(train_dataloader) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
assert train_dataset_is_sized, "currently we only support sized dataloader!"
inputs = None
last_inputs = None
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint, model_args=self.model_args)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
| 25,360 | 44.368515 | 149 |
py
|
SimCSE
|
SimCSE-main/simcse/models.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import transformers
from transformers import RobertaTokenizer
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead
from transformers.activations import gelu
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x)
return x
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class Pooler(nn.Module):
"""
Parameter-free poolers to get the sentence embedding
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
'cls_before_pooler': [CLS] representation without the original MLP pooler.
'avg': average of the last layers' hidden states at each token.
'avg_top2': average of the last two layers.
'avg_first_last': average of the first and the last layers.
"""
def __init__(self, pooler_type):
super().__init__()
self.pooler_type = pooler_type
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
def forward(self, attention_mask, outputs):
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
if self.pooler_type in ['cls_before_pooler', 'cls']:
return last_hidden[:, 0]
elif self.pooler_type == "avg":
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
elif self.pooler_type == "avg_first_last":
first_hidden = hidden_states[1]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
elif self.pooler_type == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
else:
raise NotImplementedError
def cl_init(cls, config):
"""
Contrastive learning class init function.
"""
cls.pooler_type = cls.model_args.pooler_type
cls.pooler = Pooler(cls.model_args.pooler_type)
if cls.model_args.pooler_type == "cls":
cls.mlp = MLPLayer(config)
cls.sim = Similarity(temp=cls.model_args.temp)
cls.init_weights()
def cl_forward(cls,
encoder,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
mlm_input_ids=None,
mlm_labels=None,
):
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict
ori_input_ids = input_ids
batch_size = input_ids.size(0)
# Number of sentences in one instance
# 2: pair instance; 3: pair instance with a hard negative
num_sent = input_ids.size(1)
mlm_outputs = None
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
# Get raw embeddings
outputs = encoder(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
# MLM auxiliary objective
if mlm_input_ids is not None:
mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1)))
mlm_outputs = encoder(
mlm_input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
# Pooling
pooler_output = cls.pooler(attention_mask, outputs)
pooler_output = pooler_output.view((batch_size, num_sent, pooler_output.size(-1))) # (bs, num_sent, hidden)
# If using "cls", we add an extra MLP layer
# (same as BERT's original implementation) over the representation.
if cls.pooler_type == "cls":
pooler_output = cls.mlp(pooler_output)
# Separate representation
z1, z2 = pooler_output[:,0], pooler_output[:,1]
# Hard negative
if num_sent == 3:
z3 = pooler_output[:, 2]
# Gather all embeddings if using distributed training
if dist.is_initialized() and cls.training:
# Gather hard negative
if num_sent >= 3:
z3_list = [torch.zeros_like(z3) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list=z3_list, tensor=z3.contiguous())
z3_list[dist.get_rank()] = z3
z3 = torch.cat(z3_list, 0)
# Dummy vectors for allgather
z1_list = [torch.zeros_like(z1) for _ in range(dist.get_world_size())]
z2_list = [torch.zeros_like(z2) for _ in range(dist.get_world_size())]
# Allgather
dist.all_gather(tensor_list=z1_list, tensor=z1.contiguous())
dist.all_gather(tensor_list=z2_list, tensor=z2.contiguous())
# Since allgather results do not have gradients, we replace the
# current process's corresponding embeddings with original tensors
z1_list[dist.get_rank()] = z1
z2_list[dist.get_rank()] = z2
# Get full batch embeddings: (bs x N, hidden)
z1 = torch.cat(z1_list, 0)
z2 = torch.cat(z2_list, 0)
cos_sim = cls.sim(z1.unsqueeze(1), z2.unsqueeze(0))
# Hard negative
if num_sent >= 3:
z1_z3_cos = cls.sim(z1.unsqueeze(1), z3.unsqueeze(0))
cos_sim = torch.cat([cos_sim, z1_z3_cos], 1)
labels = torch.arange(cos_sim.size(0)).long().to(cls.device)
loss_fct = nn.CrossEntropyLoss()
# Calculate loss with hard negatives
if num_sent == 3:
# Note that weights are actually logits of weights
z3_weight = cls.model_args.hard_negative_weight
weights = torch.tensor(
[[0.0] * (cos_sim.size(-1) - z1_z3_cos.size(-1)) + [0.0] * i + [z3_weight] + [0.0] * (z1_z3_cos.size(-1) - i - 1) for i in range(z1_z3_cos.size(-1))]
).to(cls.device)
cos_sim = cos_sim + weights
loss = loss_fct(cos_sim, labels)
# Calculate loss for MLM
if mlm_outputs is not None and mlm_labels is not None:
mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1))
prediction_scores = cls.lm_head(mlm_outputs.last_hidden_state)
masked_lm_loss = loss_fct(prediction_scores.view(-1, cls.config.vocab_size), mlm_labels.view(-1))
loss = loss + cls.model_args.mlm_weight * masked_lm_loss
if not return_dict:
output = (cos_sim,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=cos_sim,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def sentemb_forward(
cls,
encoder,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict
outputs = encoder(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
pooler_output = cls.pooler(attention_mask, outputs)
if cls.pooler_type == "cls" and not cls.model_args.mlp_only_train:
pooler_output = cls.mlp(pooler_output)
if not return_dict:
return (outputs[0], pooler_output) + outputs[2:]
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class BertForCL(BertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.model_args = model_kargs["model_args"]
self.bert = BertModel(config, add_pooling_layer=False)
if self.model_args.do_mlm:
self.lm_head = BertLMPredictionHead(config)
cl_init(self, config)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
sent_emb=False,
mlm_input_ids=None,
mlm_labels=None,
):
if sent_emb:
return sentemb_forward(self, self.bert,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return cl_forward(self, self.bert,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mlm_input_ids=mlm_input_ids,
mlm_labels=mlm_labels,
)
class RobertaForCL(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.model_args = model_kargs["model_args"]
self.roberta = RobertaModel(config, add_pooling_layer=False)
if self.model_args.do_mlm:
self.lm_head = RobertaLMHead(config)
cl_init(self, config)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
sent_emb=False,
mlm_input_ids=None,
mlm_labels=None,
):
if sent_emb:
return sentemb_forward(self, self.roberta,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return cl_forward(self, self.roberta,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mlm_input_ids=mlm_input_ids,
mlm_labels=mlm_labels,
)
| 13,807 | 34.405128 | 161 |
py
|
SimCSE
|
SimCSE-main/simcse/__init__.py
|
from .tool import SimCSE
| 25 | 12 | 24 |
py
|
SimCSE
|
SimCSE-main/demo/gradiodemo.py
|
import torch
from scipy.spatial.distance import cosine
from transformers import AutoModel, AutoTokenizer
import gradio as gr
# Import our models. The package will take care of downloading the models automatically
tokenizer = AutoTokenizer.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased")
model = AutoModel.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased")
def simcse(text1, text2, text3):
# Tokenize input texts
texts = [
text1,
text2,
text3
]
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
# Get the embeddings
with torch.no_grad():
embeddings = model(**inputs, output_hidden_states=True, return_dict=True).pooler_output
# Calculate cosine similarities
# Cosine similarities are in [-1, 1]. Higher means more similar
cosine_sim_0_1 = 1 - cosine(embeddings[0], embeddings[1])
cosine_sim_0_2 = 1 - cosine(embeddings[0], embeddings[2])
return {"cosine similarity":cosine_sim_0_1}, {"cosine similarity":cosine_sim_0_2}
inputs = [
gr.inputs.Textbox(lines=5, label="Input Text One"),
gr.inputs.Textbox(lines=5, label="Input Text Two"),
gr.inputs.Textbox(lines=5, label="Input Text Three")
]
outputs = [
gr.outputs.Label(type="confidences",label="Cosine similarity between text one and two"),
gr.outputs.Label(type="confidences", label="Cosine similarity between text one and three")
]
title = "SimCSE"
description = "demo for Princeton-NLP SimCSE. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.08821'>SimCSE: Simple Contrastive Learning of Sentence Embeddings</a> | <a href='https://github.com/princeton-nlp/SimCSE'>Github Repo</a></p>"
examples = [
["There's a kid on a skateboard.",
"A kid is skateboarding.",
"A kid is inside the house."]
]
gr.Interface(simcse, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
| 2,105 | 40.294118 | 219 |
py
|
SimCSE
|
SimCSE-main/demo/flaskdemo.py
|
import json
import argparse
import torch
import os
import random
import numpy as np
import requests
import logging
import math
import copy
import string
from tqdm import tqdm
from time import time
from flask import Flask, request, jsonify
from flask_cors import CORS
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from simcse import SimCSE
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def run_simcse_demo(port, args):
app = Flask(__name__, static_folder='./static')
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
sentence_path = os.path.join(args.sentences_dir, args.example_sentences)
query_path = os.path.join(args.sentences_dir, args.example_query)
embedder = SimCSE(args.model_name_or_path)
embedder.build_index(sentence_path)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api', methods=['GET'])
def api():
query = request.args['query']
top_k = int(request.args['topk'])
threshold = float(request.args['threshold'])
start = time()
results = embedder.search(query, top_k=top_k, threshold=threshold)
ret = []
out = {}
for sentence, score in results:
ret.append({"sentence": sentence, "score": score})
span = time() - start
out['ret'] = ret
out['time'] = "{:.4f}".format(span)
return jsonify(out)
@app.route('/files/<path:path>')
def static_files(path):
return app.send_static_file('files/' + path)
@app.route('/get_examples', methods=['GET'])
def get_examples():
with open(query_path, 'r') as fp:
examples = [line.strip() for line in fp.readlines()]
return jsonify(examples)
addr = args.ip + ":" + args.port
logger.info(f'Starting Index server at {addr}')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(port)
IOLoop.instance().start()
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', default=None, type=str)
parser.add_argument('--device', default='cpu', type=str)
parser.add_argument('--sentences_dir', default=None, type=str)
parser.add_argument('--example_query', default=None, type=str)
parser.add_argument('--example_sentences', default=None, type=str)
parser.add_argument('--port', default='8888', type=str)
parser.add_argument('--ip', default='http://127.0.0.1')
parser.add_argument('--load_light', default=False, action='store_true')
args = parser.parse_args()
run_simcse_demo(args.port, args)
| 2,839 | 32.809524 | 113 |
py
|
SimCSE
|
SimCSE-main/SentEval/setup.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from setuptools import setup, find_packages
with io.open('./README.md', encoding='utf-8') as f:
readme = f.read()
setup(
name='SentEval',
version='0.1.0',
url='https://github.com/facebookresearch/SentEval',
packages=find_packages(exclude=['examples']),
license='Attribution-NonCommercial 4.0 International',
long_description=readme,
)
| 568 | 24.863636 | 61 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/infersent.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
InferSent models. See https://github.com/facebookresearch/InferSent.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
# get models.py from InferSent repo
from models import InferSent
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_W2V = 'PATH/TO/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
params_senteval['infersent'] = model.cuda()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,463 | 31 | 92 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/bow.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import sys
import io
import numpy as np
import logging
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# PATH_TO_VEC = 'glove/glove.840B.300d.txt'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# Create dictionary
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
# Get word vectors from vocabulary (glove, word2vec, fasttext ..)
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
# if word2vec or fasttext file : skip first line "next(f)"
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
# SentEval prepare and batcher
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def batcher(params, batch):
batch = [sent if sent != [] else ['.'] for sent in batch]
embeddings = []
for sent in batch:
sentvec = []
for word in sent:
if word in params.word_vec:
sentvec.append(params.word_vec[word])
if not sentvec:
vec = np.zeros(params.wvec_dim)
sentvec.append(vec)
sentvec = np.mean(sentvec, 0)
embeddings.append(sentvec)
embeddings = np.vstack(embeddings)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 3,423 | 29.300885 | 82 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/googleuse.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division
import os
import sys
import logging
import tensorflow as tf
import tensorflow_hub as hub
tf.logging.set_verbosity(0)
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# tensorflow session
session = tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params['google_use'](batch)
return embeddings
def make_embed_fn(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
# Start TF session and load Google Universal Sentence Encoder
encoder = make_embed_fn("https://tfhub.dev/google/universal-sentence-encoder-large/2")
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['google_use'] = encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,205 | 31.441176 | 86 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/models.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import numpy as np
import time
import torch
import torch.nn as nn
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
| 9,875 | 36.12782 | 94 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/gensen.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Clone GenSen repo here: https://github.com/Maluuba/gensen.git
And follow instructions for loading the model used in batcher
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
# import GenSen package
from gensen import GenSen, GenSenSingle
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
# Load GenSen model
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
reps_h, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['gensen'] = gensen_encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,429 | 31.4 | 82 |
py
|
SimCSE
|
SimCSE-main/SentEval/examples/skipthought.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
"""
Example of file for SkipThought in SentEval
"""
import logging
import sys
sys.setdefaultencoding('utf8')
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data/senteval_data/'
PATH_TO_SKIPTHOUGHT = ''
assert PATH_TO_SKIPTHOUGHT != '', 'Download skipthought and set correct PATH'
# import skipthought and Senteval
sys.path.insert(0, PATH_TO_SKIPTHOUGHT)
import skipthoughts
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def prepare(params, samples):
return
def batcher(params, batch):
batch = [str(' '.join(sent), errors="ignore") if sent != [] else '.' for sent in batch]
embeddings = skipthoughts.encode(params['encoder'], batch,
verbose=False, use_eos=True)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10, 'batch_size': 512}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load SkipThought model
params_senteval['encoder'] = skipthoughts.load_model()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,048 | 32.048387 | 97 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/engine.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Generic sentence evaluation scripts wrapper
'''
from __future__ import absolute_import, division, unicode_literals
from senteval import utils
from senteval.binary import CREval, MREval, MPQAEval, SUBJEval
from senteval.snli import SNLIEval
from senteval.trec import TRECEval
from senteval.sick import SICKEntailmentEval, SICKEval
from senteval.mrpc import MRPCEval
from senteval.sts import STS12Eval, STS13Eval, STS14Eval, STS15Eval, STS16Eval, STSBenchmarkEval, SICKRelatednessEval, STSBenchmarkFinetune
from senteval.sst import SSTEval
from senteval.rank import ImageCaptionRetrievalEval
from senteval.probing import *
class SE(object):
def __init__(self, params, batcher, prepare=None):
# parameters
params = utils.dotdict(params)
params.usepytorch = True if 'usepytorch' not in params else params.usepytorch
params.seed = 1111 if 'seed' not in params else params.seed
params.batch_size = 128 if 'batch_size' not in params else params.batch_size
params.nhid = 0 if 'nhid' not in params else params.nhid
params.kfold = 5 if 'kfold' not in params else params.kfold
if 'classifier' not in params or not params['classifier']:
params.classifier = {'nhid': 0}
assert 'nhid' in params.classifier, 'Set number of hidden units in classifier config!!'
self.params = params
# batcher and prepare
self.batcher = batcher
self.prepare = prepare if prepare else lambda x, y: None
self.list_tasks = ['CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKRelatedness', 'SICKEntailment', 'STSBenchmark',
'SNLI', 'ImageCaptionRetrieval', 'STS12', 'STS13',
'STS14', 'STS15', 'STS16',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion', 'SICKRelatedness-finetune', 'STSBenchmark-finetune', 'STSBenchmark-fix']
def eval(self, name):
# evaluate on evaluation [name], either takes string or list of strings
if (isinstance(name, list)):
self.results = {x: self.eval(x) for x in name}
return self.results
tpath = self.params.task_path
assert name in self.list_tasks, str(name) + ' not in ' + str(self.list_tasks)
# Original SentEval tasks
if name == 'CR':
self.evaluation = CREval(tpath + '/downstream/CR', seed=self.params.seed)
elif name == 'MR':
self.evaluation = MREval(tpath + '/downstream/MR', seed=self.params.seed)
elif name == 'MPQA':
self.evaluation = MPQAEval(tpath + '/downstream/MPQA', seed=self.params.seed)
elif name == 'SUBJ':
self.evaluation = SUBJEval(tpath + '/downstream/SUBJ', seed=self.params.seed)
elif name == 'SST2':
self.evaluation = SSTEval(tpath + '/downstream/SST/binary', nclasses=2, seed=self.params.seed)
elif name == 'SST5':
self.evaluation = SSTEval(tpath + '/downstream/SST/fine', nclasses=5, seed=self.params.seed)
elif name == 'TREC':
self.evaluation = TRECEval(tpath + '/downstream/TREC', seed=self.params.seed)
elif name == 'MRPC':
self.evaluation = MRPCEval(tpath + '/downstream/MRPC', seed=self.params.seed)
elif name == 'SICKRelatedness':
self.evaluation = SICKRelatednessEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'STSBenchmark':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'STSBenchmark-fix':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark-fix', seed=self.params.seed)
elif name == 'STSBenchmark-finetune':
self.evaluation = STSBenchmarkFinetune(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'SICKRelatedness-finetune':
self.evaluation = SICKEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SICKEntailment':
self.evaluation = SICKEntailmentEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SNLI':
self.evaluation = SNLIEval(tpath + '/downstream/SNLI', seed=self.params.seed)
elif name in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
fpath = name + '-en-test'
self.evaluation = eval(name + 'Eval')(tpath + '/downstream/STS/' + fpath, seed=self.params.seed)
elif name == 'ImageCaptionRetrieval':
self.evaluation = ImageCaptionRetrievalEval(tpath + '/downstream/COCO', seed=self.params.seed)
# Probing Tasks
elif name == 'Length':
self.evaluation = LengthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'WordContent':
self.evaluation = WordContentEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Depth':
self.evaluation = DepthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'TopConstituents':
self.evaluation = TopConstituentsEval(tpath + '/probing', seed=self.params.seed)
elif name == 'BigramShift':
self.evaluation = BigramShiftEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Tense':
self.evaluation = TenseEval(tpath + '/probing', seed=self.params.seed)
elif name == 'SubjNumber':
self.evaluation = SubjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'ObjNumber':
self.evaluation = ObjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'OddManOut':
self.evaluation = OddManOutEval(tpath + '/probing', seed=self.params.seed)
elif name == 'CoordinationInversion':
self.evaluation = CoordinationInversionEval(tpath + '/probing', seed=self.params.seed)
self.params.current_task = name
self.evaluation.do_prepare(self.params, self.prepare)
self.results = self.evaluation.run(self.params, self.batcher)
return self.results
| 6,525 | 49.2 | 139 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/rank.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Image-Caption Retrieval with COCO dataset
'''
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import logging
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from senteval.tools.ranking import ImageSentenceRankingPytorch
class ImageCaptionRetrievalEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task: Image Caption Retrieval *****\n\n')
# Get captions and image features
self.seed = seed
train, dev, test = self.loadFile(task_path)
self.coco_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.coco_data['train']['sent'] + \
self.coco_data['dev']['sent'] + \
self.coco_data['test']['sent']
prepare(params, samples)
def loadFile(self, fpath):
coco = {}
for split in ['train', 'valid', 'test']:
list_sent = []
list_img_feat = []
if sys.version_info < (3, 0):
with open(os.path.join(fpath, split + '.pkl')) as f:
cocodata = pickle.load(f)
else:
with open(os.path.join(fpath, split + '.pkl'), 'rb') as f:
cocodata = pickle.load(f, encoding='latin1')
for imgkey in range(len(cocodata['features'])):
assert len(cocodata['image_to_caption_ids'][imgkey]) >= 5, \
cocodata['image_to_caption_ids'][imgkey]
for captkey in cocodata['image_to_caption_ids'][imgkey][0:5]:
sent = cocodata['captions'][captkey]['cleaned_caption']
sent += ' .' # add punctuation to end of sentence in COCO
list_sent.append(sent.encode('utf-8').split())
list_img_feat.append(cocodata['features'][imgkey])
assert len(list_sent) == len(list_img_feat) and \
len(list_sent) % 5 == 0
list_img_feat = np.array(list_img_feat).astype('float32')
coco[split] = {'sent': list_sent, 'imgfeat': list_img_feat}
return coco['train'], coco['valid'], coco['test']
def run(self, params, batcher):
coco_embed = {'train': {'sentfeat': [], 'imgfeat': []},
'dev': {'sentfeat': [], 'imgfeat': []},
'test': {'sentfeat': [], 'imgfeat': []}}
for key in self.coco_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
self.coco_data[key]['sent'] = np.array(self.coco_data[key]['sent'])
self.coco_data[key]['sent'], idx_sort = np.sort(self.coco_data[key]['sent']), np.argsort(self.coco_data[key]['sent'])
idx_unsort = np.argsort(idx_sort)
coco_embed[key]['X'] = []
nsent = len(self.coco_data[key]['sent'])
for ii in range(0, nsent, params.batch_size):
batch = self.coco_data[key]['sent'][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
coco_embed[key]['sentfeat'].append(embeddings)
coco_embed[key]['sentfeat'] = np.vstack(coco_embed[key]['sentfeat'])[idx_unsort]
coco_embed[key]['imgfeat'] = np.array(self.coco_data[key]['imgfeat'])
logging.info('Computed {0} embeddings'.format(key))
config = {'seed': self.seed, 'projdim': 1000, 'margin': 0.2}
clf = ImageSentenceRankingPytorch(train=coco_embed['train'],
valid=coco_embed['dev'],
test=coco_embed['test'],
config=config)
bestdevscore, r1_i2t, r5_i2t, r10_i2t, medr_i2t, \
r1_t2i, r5_t2i, r10_t2i, medr_t2i = clf.run()
logging.debug("\nTest scores | Image to text: \
{0}, {1}, {2}, {3}".format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
logging.debug("Test scores | Text to image: \
{0}, {1}, {2}, {3}\n".format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
return {'devacc': bestdevscore,
'acc': [(r1_i2t, r5_i2t, r10_i2t, medr_i2t),
(r1_t2i, r5_t2i, r10_t2i, medr_t2i)],
'ndev': len(coco_embed['dev']['sentfeat']),
'ntest': len(coco_embed['test']['sentfeat'])}
| 4,643 | 41.605505 | 129 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/snli.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SNLI - Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import codecs
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SNLIEval(object):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : SNLI Entailment*****\n\n')
self.seed = seed
train1 = self.loadFile(os.path.join(taskpath, 's1.train'))
train2 = self.loadFile(os.path.join(taskpath, 's2.train'))
trainlabels = io.open(os.path.join(taskpath, 'labels.train'),
encoding='utf-8').read().splitlines()
valid1 = self.loadFile(os.path.join(taskpath, 's1.dev'))
valid2 = self.loadFile(os.path.join(taskpath, 's2.dev'))
validlabels = io.open(os.path.join(taskpath, 'labels.dev'),
encoding='utf-8').read().splitlines()
test1 = self.loadFile(os.path.join(taskpath, 's1.test'))
test2 = self.loadFile(os.path.join(taskpath, 's2.test'))
testlabels = io.open(os.path.join(taskpath, 'labels.test'),
encoding='utf-8').read().splitlines()
# sort data (by s2 first) to reduce padding
sorted_train = sorted(zip(train2, train1, trainlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
train2, train1, trainlabels = map(list, zip(*sorted_train))
sorted_valid = sorted(zip(valid2, valid1, validlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
valid2, valid1, validlabels = map(list, zip(*sorted_valid))
sorted_test = sorted(zip(test2, test1, testlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
test2, test1, testlabels = map(list, zip(*sorted_test))
self.samples = train1 + train2 + valid1 + valid2 + test1 + test2
self.data = {'train': (train1, train2, trainlabels),
'valid': (valid1, valid2, validlabels),
'test': (test1, test2, testlabels)
}
def do_prepare(self, params, prepare):
return prepare(params, self.samples)
def loadFile(self, fpath):
with codecs.open(fpath, 'rb', 'latin-1') as f:
return [line.split() for line in
f.read().splitlines()]
def run(self, params, batcher):
self.X, self.y = {}, {}
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
for key in self.data:
if key not in self.X:
self.X[key] = []
if key not in self.y:
self.y[key] = []
input1, input2, mylabels = self.data[key]
enc_input = []
n_labels = len(mylabels)
for ii in range(0, n_labels, params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
enc_input.append(np.hstack((enc1, enc2, enc1 * enc2,
np.abs(enc1 - enc2))))
if (ii*params.batch_size) % (20000*params.batch_size) == 0:
logging.info("PROGRESS (encoding): %.2f%%" %
(100 * ii / n_labels))
self.X[key] = np.vstack(enc_input)
self.y[key] = [dico_label[y] for y in mylabels]
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'cudaEfficient': True,
'nhid': params.nhid, 'noreg': True}
config_classifier = copy.deepcopy(params.classifier)
config_classifier['max_epoch'] = 15
config_classifier['epoch_size'] = 1
config['classifier'] = config_classifier
clf = SplitClassifier(self.X, self.y, config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1} for SNLI\n'
.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.data['valid'][0]),
'ntest': len(self.data['test'][0])}
| 4,577 | 39.157895 | 75 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/utils.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import re
import inspect
from torch import optim
def create_dictionary(sentences):
words = {}
for s in sentences:
for word in s:
if word in words:
words[word] += 1
else:
words[word] = 1
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
# words['<UNK>'] = 1e9 + 1
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
| 2,713 | 27.270833 | 79 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/binary.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA
'''
from __future__ import absolute_import, division, unicode_literals
import io
import os
import numpy as np
import logging
from senteval.tools.validation import InnerKFoldClassifier
class BinaryClassifierEval(object):
def __init__(self, pos, neg, seed=1111):
self.seed = seed
self.samples, self.labels = pos + neg, [1] * len(pos) + [0] * len(neg)
self.n_samples = len(self.samples)
def do_prepare(self, params, prepare):
# prepare is given the whole text
return prepare(params, self.samples)
# prepare puts everything it outputs in "params" : params.word2id etc
# Those output will be further used by "batcher".
def loadFile(self, fpath):
with io.open(fpath, 'r', encoding='latin-1') as f:
return [line.split() for line in f.read().splitlines()]
def run(self, params, batcher):
enc_input = []
# Sort to reduce padding
sorted_corpus = sorted(zip(self.samples, self.labels),
key=lambda z: (len(z[0]), z[1]))
sorted_samples = [x for (x, y) in sorted_corpus]
sorted_labels = [y for (x, y) in sorted_corpus]
logging.info('Generating sentence embeddings')
for ii in range(0, self.n_samples, params.batch_size):
batch = sorted_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
enc_input.append(embeddings)
enc_input = np.vstack(enc_input)
logging.info('Generated sentence embeddings')
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = InnerKFoldClassifier(enc_input, np.array(sorted_labels), config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1}\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': self.n_samples,
'ntest': self.n_samples}
class CREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : CR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'custrev.pos'))
neg = self.loadFile(os.path.join(task_path, 'custrev.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class SUBJEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SUBJ *****\n\n')
obj = self.loadFile(os.path.join(task_path, 'subj.objective'))
subj = self.loadFile(os.path.join(task_path, 'subj.subjective'))
super(self.__class__, self).__init__(obj, subj, seed)
class MPQAEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MPQA *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'mpqa.pos'))
neg = self.loadFile(os.path.join(task_path, 'mpqa.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
| 3,712 | 38.924731 | 79 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/mrpc.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
MRPC : Microsoft Research Paraphrase (detection) Corpus
'''
from __future__ import absolute_import, division, unicode_literals
import os
import logging
import numpy as np
import io
from senteval.tools.validation import KFoldClassifier
from sklearn.metrics import f1_score
class MRPCEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : MRPC *****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path,
'msr_paraphrase_train.txt'))
test = self.loadFile(os.path.join(task_path,
'msr_paraphrase_test.txt'))
self.mrpc_data = {'train': train, 'test': test}
def do_prepare(self, params, prepare):
# TODO : Should we separate samples in "train, test"?
samples = self.mrpc_data['train']['X_A'] + \
self.mrpc_data['train']['X_B'] + \
self.mrpc_data['test']['X_A'] + self.mrpc_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
mrpc_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
mrpc_data['X_A'].append(text[3].split())
mrpc_data['X_B'].append(text[4].split())
mrpc_data['y'].append(text[0])
mrpc_data['X_A'] = mrpc_data['X_A'][1:]
mrpc_data['X_B'] = mrpc_data['X_B'][1:]
mrpc_data['y'] = [int(s) for s in mrpc_data['y'][1:]]
return mrpc_data
def run(self, params, batcher):
mrpc_embed = {'train': {}, 'test': {}}
for key in self.mrpc_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
text_data = {}
sorted_corpus = sorted(zip(self.mrpc_data[key]['X_A'],
self.mrpc_data[key]['X_B'],
self.mrpc_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
text_data['A'] = [x for (x, y, z) in sorted_corpus]
text_data['B'] = [y for (x, y, z) in sorted_corpus]
text_data['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['A', 'B']:
mrpc_embed[key][txt_type] = []
for ii in range(0, len(text_data['y']), params.batch_size):
batch = text_data[txt_type][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
mrpc_embed[key][txt_type].append(embeddings)
mrpc_embed[key][txt_type] = np.vstack(mrpc_embed[key][txt_type])
mrpc_embed[key]['y'] = np.array(text_data['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = mrpc_embed['train']['A']
trainB = mrpc_embed['train']['B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = mrpc_embed['train']['y']
# Test
testA = mrpc_embed['test']['A']
testB = mrpc_embed['test']['B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = mrpc_embed['test']['y']
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = KFoldClassifier(train={'X': trainF, 'y': trainY},
test={'X': testF, 'y': testY}, config=config)
devacc, testacc, yhat = clf.run()
testf1 = round(100*f1_score(testY, yhat), 2)
logging.debug('Dev acc : {0} Test acc {1}; Test F1 {2} for MRPC.\n'
.format(devacc, testacc, testf1))
return {'devacc': devacc, 'acc': testacc, 'f1': testf1,
'ndev': len(trainA), 'ntest': len(testA)}
| 4,202 | 39.028571 | 80 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/sts.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
STS-{2012,2013,2014,2015,2016} (unsupervised) and
STS-benchmark (supervised) tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import numpy as np
import logging
from scipy.stats import spearmanr, pearsonr
from senteval.utils import cosine
from senteval.sick import SICKEval
class STSEval(object):
def loadFile(self, fpath):
self.data = {}
self.samples = []
for dataset in self.datasets:
sent1, sent2 = zip(*[l.split("\t") for l in
io.open(fpath + '/STS.input.%s.txt' % dataset,
encoding='utf8').read().splitlines()])
raw_scores = np.array([x for x in
io.open(fpath + '/STS.gs.%s.txt' % dataset,
encoding='utf8')
.read().splitlines()])
not_empty_idx = raw_scores != ''
gs_scores = [float(x) for x in raw_scores[not_empty_idx]]
sent1 = np.array([s.split() for s in sent1])[not_empty_idx]
sent2 = np.array([s.split() for s in sent2])[not_empty_idx]
# sort data by length to minimize padding in batcher
sorted_data = sorted(zip(sent1, sent2, gs_scores),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
sent1, sent2, gs_scores = map(list, zip(*sorted_data))
self.data[dataset] = (sent1, sent2, gs_scores)
self.samples += sent1 + sent2
def do_prepare(self, params, prepare):
if 'similarity' in params:
self.similarity = params.similarity
else: # Default similarity is cosine
self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
return prepare(params, self.samples)
def run(self, params, batcher):
results = {}
all_sys_scores = []
all_gs_scores = []
for dataset in self.datasets:
sys_scores = []
input1, input2, gs_scores = self.data[dataset]
for ii in range(0, len(gs_scores), params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
# we assume get_batch already throws out the faulty ones
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
for kk in range(enc2.shape[0]):
sys_score = self.similarity(enc1[kk], enc2[kk])
sys_scores.append(sys_score)
all_sys_scores.extend(sys_scores)
all_gs_scores.extend(gs_scores)
results[dataset] = {'pearson': pearsonr(sys_scores, gs_scores),
'spearman': spearmanr(sys_scores, gs_scores),
'nsamples': len(sys_scores)}
logging.debug('%s : pearson = %.4f, spearman = %.4f' %
(dataset, results[dataset]['pearson'][0],
results[dataset]['spearman'][0]))
weights = [results[dset]['nsamples'] for dset in results.keys()]
list_prs = np.array([results[dset]['pearson'][0] for
dset in results.keys()])
list_spr = np.array([results[dset]['spearman'][0] for
dset in results.keys()])
avg_pearson = np.average(list_prs)
avg_spearman = np.average(list_spr)
wavg_pearson = np.average(list_prs, weights=weights)
wavg_spearman = np.average(list_spr, weights=weights)
all_pearson = pearsonr(all_sys_scores, all_gs_scores)
all_spearman = spearmanr(all_sys_scores, all_gs_scores)
results['all'] = {'pearson': {'all': all_pearson[0],
'mean': avg_pearson,
'wmean': wavg_pearson},
'spearman': {'all': all_spearman[0],
'mean': avg_spearman,
'wmean': wavg_spearman}}
logging.debug('ALL : Pearson = %.4f, \
Spearman = %.4f' % (all_pearson[0], all_spearman[0]))
logging.debug('ALL (weighted average) : Pearson = %.4f, \
Spearman = %.4f' % (wavg_pearson, wavg_spearman))
logging.debug('ALL (average) : Pearson = %.4f, \
Spearman = %.4f\n' % (avg_pearson, avg_spearman))
return results
class STS12Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS12 *****\n\n')
self.seed = seed
self.datasets = ['MSRpar', 'MSRvid', 'SMTeuroparl',
'surprise.OnWN', 'surprise.SMTnews']
self.loadFile(taskpath)
class STS13Eval(STSEval):
# STS13 here does not contain the "SMT" subtask due to LICENSE issue
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS13 (-SMT) *****\n\n')
self.seed = seed
self.datasets = ['FNWN', 'headlines', 'OnWN']
self.loadFile(taskpath)
class STS14Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS14 *****\n\n')
self.seed = seed
self.datasets = ['deft-forum', 'deft-news', 'headlines',
'images', 'OnWN', 'tweet-news']
self.loadFile(taskpath)
class STS15Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS15 *****\n\n')
self.seed = seed
self.datasets = ['answers-forums', 'answers-students',
'belief', 'headlines', 'images']
self.loadFile(taskpath)
class STS16Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS16 *****\n\n')
self.seed = seed
self.datasets = ['answer-answer', 'headlines', 'plagiarism',
'postediting', 'question-question']
self.loadFile(taskpath)
class STSBenchmarkEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split())
sick_data['X_B'].append(text[6].split())
sick_data['y'].append(text[4])
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
class STSBenchmarkFinetune(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split())
sick_data['X_B'].append(text[6].split())
sick_data['y'].append(text[4])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
class SICKRelatednessEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : SICKRelatedness*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[3])
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
| 9,696 | 40.797414 | 104 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/probing.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
probing tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class PROBINGEval(object):
def __init__(self, task, task_path, seed=1111):
self.seed = seed
self.task = task
logging.debug('***** (Probing) Transfer task : %s classification *****', self.task.upper())
self.task_data = {'train': {'X': [], 'y': []},
'dev': {'X': [], 'y': []},
'test': {'X': [], 'y': []}}
self.loadFile(task_path)
logging.info('Loaded %s train - %s dev - %s test for %s' %
(len(self.task_data['train']['y']), len(self.task_data['dev']['y']),
len(self.task_data['test']['y']), self.task))
def do_prepare(self, params, prepare):
samples = self.task_data['train']['X'] + self.task_data['dev']['X'] + \
self.task_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
self.tok2split = {'tr': 'train', 'va': 'dev', 'te': 'test'}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip().split('\t')
self.task_data[self.tok2split[line[0]]]['X'].append(line[-1].split())
self.task_data[self.tok2split[line[0]]]['y'].append(line[1])
labels = sorted(np.unique(self.task_data['train']['y']))
self.tok2label = dict(zip(labels, range(len(labels))))
self.nclasses = len(self.tok2label)
for split in self.task_data:
for i, y in enumerate(self.task_data[split]['y']):
self.task_data[split]['y'][i] = self.tok2label[y]
def run(self, params, batcher):
task_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
logging.info('Computing embeddings for train/dev/test')
for key in self.task_data:
# Sort to reduce padding
sorted_data = sorted(zip(self.task_data[key]['X'],
self.task_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.task_data[key]['X'], self.task_data[key]['y'] = map(list, zip(*sorted_data))
task_embed[key]['X'] = []
for ii in range(0, len(self.task_data[key]['y']), bsize):
batch = self.task_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
task_embed[key]['X'].append(embeddings)
task_embed[key]['X'] = np.vstack(task_embed[key]['X'])
task_embed[key]['y'] = np.array(self.task_data[key]['y'])
logging.info('Computed embeddings')
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
if self.task == "WordContent" and params.classifier['nhid'] > 0:
config_classifier = copy.deepcopy(config_classifier)
config_classifier['classifier']['nhid'] = 0
print(params.classifier['nhid'])
clf = SplitClassifier(X={'train': task_embed['train']['X'],
'valid': task_embed['dev']['X'],
'test': task_embed['test']['X']},
y={'train': task_embed['train']['y'],
'valid': task_embed['dev']['y'],
'test': task_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : %.1f Test acc : %.1f for %s classification\n' % (devacc, testacc, self.task.upper()))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(task_embed['dev']['X']),
'ntest': len(task_embed['test']['X'])}
"""
Surface Information
"""
class LengthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'sentence_length.txt')
# labels: bins
PROBINGEval.__init__(self, 'Length', task_path, seed)
class WordContentEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'word_content.txt')
# labels: 200 target words
PROBINGEval.__init__(self, 'WordContent', task_path, seed)
"""
Latent Structural Information
"""
class DepthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'tree_depth.txt')
# labels: bins
PROBINGEval.__init__(self, 'Depth', task_path, seed)
class TopConstituentsEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'top_constituents.txt')
# labels: 'PP_NP_VP_.' .. (20 classes)
PROBINGEval.__init__(self, 'TopConstituents', task_path, seed)
class BigramShiftEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'bigram_shift.txt')
# labels: 0 or 1
PROBINGEval.__init__(self, 'BigramShift', task_path, seed)
# TODO: Voice?
"""
Latent Semantic Information
"""
class TenseEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'past_present.txt')
# labels: 'PRES', 'PAST'
PROBINGEval.__init__(self, 'Tense', task_path, seed)
class SubjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'subj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'SubjNumber', task_path, seed)
class ObjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'obj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'ObjNumber', task_path, seed)
class OddManOutEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'odd_man_out.txt')
# labels: 'O', 'C'
PROBINGEval.__init__(self, 'OddManOut', task_path, seed)
class CoordinationInversionEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'coordination_inversion.txt')
# labels: 'O', 'I'
PROBINGEval.__init__(self, 'CoordinationInversion', task_path, seed)
| 6,786 | 38.459302 | 120 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/sick.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SICK Relatedness and Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
from senteval.tools.relatedness import RelatednessPytorch
from senteval.tools.validation import SplitClassifier
class SICKEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Relatedness*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sick_data['train']['X_A'] + \
self.sick_data['train']['X_B'] + \
self.sick_data['dev']['X_A'] + \
self.sick_data['dev']['X_B'] + \
self.sick_data['test']['X_A'] + self.sick_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[3])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
sick_embed[key]['y'] = np.array(self.sick_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = self.encode_labels(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = self.encode_labels(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = self.encode_labels(self.sick_data['test']['y'])
config = {'seed': self.seed, 'nclasses': 5}
clf = RelatednessPytorch(train={'X': trainF, 'y': trainY},
valid={'X': devF, 'y': devY},
test={'X': testF, 'y': testY},
devscores=self.sick_data['dev']['y'],
config=config)
devspr, yhat = clf.run()
pr = pearsonr(yhat, self.sick_data['test']['y'])[0]
sr = spearmanr(yhat, self.sick_data['test']['y'])[0]
pr = 0 if pr != pr else pr
sr = 0 if sr != sr else sr
se = mean_squared_error(yhat, self.sick_data['test']['y'])
logging.debug('Dev : Spearman {0}'.format(devspr))
logging.debug('Test : Pearson {0} Spearman {1} MSE {2} \
for SICK Relatedness\n'.format(pr, sr, se))
return {'devspearman': devspr, 'pearson': pr, 'spearman': sr, 'mse': se,
'yhat': yhat, 'ndev': len(devA), 'ntest': len(testA)}
def encode_labels(self, labels, nclass=5):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype('float32')
for j, y in enumerate(labels):
for i in range(nclass):
if i+1 == np.floor(y) + 1:
Y[j, i] = y - np.floor(y)
if i+1 == np.floor(y):
Y[j, i] = np.floor(y) - y + 1
return Y
class SICKEntailmentEval(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Entailment*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
sick_data['y'] = [label2id[s] for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = np.array(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = np.array(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = np.array(self.sick_data['test']['y'])
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid}
clf = SplitClassifier(X={'train': trainF, 'valid': devF, 'test': testF},
y={'train': trainY, 'valid': devY, 'test': testY},
config=config)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SICK entailment\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(devA), 'ntest': len(testA)}
| 9,243 | 41.599078 | 80 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/__init__.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from senteval.engine import SE
| 264 | 23.090909 | 61 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/trec.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
TREC question-type classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import KFoldClassifier
class TRECEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : TREC *****\n\n')
self.seed = seed
self.train = self.loadFile(os.path.join(task_path, 'train_5500.label'))
self.test = self.loadFile(os.path.join(task_path, 'TREC_10.label'))
def do_prepare(self, params, prepare):
samples = self.train['X'] + self.test['X']
return prepare(params, samples)
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
tgt2idx = {'ABBR': 0, 'DESC': 1, 'ENTY': 2,
'HUM': 3, 'LOC': 4, 'NUM': 5}
with io.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
assert target in tgt2idx, target
trec_data['X'].append(sample)
trec_data['y'].append(tgt2idx[target])
return trec_data
def run(self, params, batcher):
train_embeddings, test_embeddings = [], []
# Sort to reduce padding
sorted_corpus_train = sorted(zip(self.train['X'], self.train['y']),
key=lambda z: (len(z[0]), z[1]))
train_samples = [x for (x, y) in sorted_corpus_train]
train_labels = [y for (x, y) in sorted_corpus_train]
sorted_corpus_test = sorted(zip(self.test['X'], self.test['y']),
key=lambda z: (len(z[0]), z[1]))
test_samples = [x for (x, y) in sorted_corpus_test]
test_labels = [y for (x, y) in sorted_corpus_test]
# Get train embeddings
for ii in range(0, len(train_labels), params.batch_size):
batch = train_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
train_embeddings.append(embeddings)
train_embeddings = np.vstack(train_embeddings)
logging.info('Computed train embeddings')
# Get test embeddings
for ii in range(0, len(test_labels), params.batch_size):
batch = test_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
test_embeddings.append(embeddings)
test_embeddings = np.vstack(test_embeddings)
logging.info('Computed test embeddings')
config_classifier = {'nclasses': 6, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'kfold': params.kfold}
clf = KFoldClassifier({'X': train_embeddings,
'y': np.array(train_labels)},
{'X': test_embeddings,
'y': np.array(test_labels)},
config_classifier)
devacc, testacc, _ = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} \
for TREC\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.train['X']), 'ntest': len(self.test['X'])}
| 3,565 | 38.622222 | 79 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/sst.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SST - binary classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SSTEval(object):
def __init__(self, task_path, nclasses=2, seed=1111):
self.seed = seed
# binary of fine-grained
assert nclasses in [2, 5]
self.nclasses = nclasses
self.task_name = 'Binary' if self.nclasses == 2 else 'Fine-Grained'
logging.debug('***** Transfer task : SST %s classification *****\n\n', self.task_name)
train = self.loadFile(os.path.join(task_path, 'sentiment-train'))
dev = self.loadFile(os.path.join(task_path, 'sentiment-dev'))
test = self.loadFile(os.path.join(task_path, 'sentiment-test'))
self.sst_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sst_data['train']['X'] + self.sst_data['dev']['X'] + \
self.sst_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
sst_data = {'X': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if self.nclasses == 2:
sample = line.strip().split('\t')
sst_data['y'].append(int(sample[1]))
sst_data['X'].append(sample[0].split())
elif self.nclasses == 5:
sample = line.strip().split(' ', 1)
sst_data['y'].append(int(sample[0]))
sst_data['X'].append(sample[1].split())
assert max(sst_data['y']) == self.nclasses - 1
return sst_data
def run(self, params, batcher):
sst_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sst_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_data = sorted(zip(self.sst_data[key]['X'],
self.sst_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.sst_data[key]['X'], self.sst_data[key]['y'] = map(list, zip(*sorted_data))
sst_embed[key]['X'] = []
for ii in range(0, len(self.sst_data[key]['y']), bsize):
batch = self.sst_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
sst_embed[key]['X'].append(embeddings)
sst_embed[key]['X'] = np.vstack(sst_embed[key]['X'])
sst_embed[key]['y'] = np.array(self.sst_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
clf = SplitClassifier(X={'train': sst_embed['train']['X'],
'valid': sst_embed['dev']['X'],
'test': sst_embed['test']['X']},
y={'train': sst_embed['train']['y'],
'valid': sst_embed['dev']['y'],
'test': sst_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SST {2} classification\n'.format(devacc, testacc, self.task_name))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(sst_embed['dev']['X']),
'ntest': len(sst_embed['test']['X'])}
| 3,946 | 39.690722 | 94 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/tools/relatedness.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Semantic Relatedness (supervised) with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import copy
import numpy as np
import torch
from torch import nn
import torch.optim as optim
from scipy.stats import pearsonr, spearmanr
class RelatednessPytorch(object):
# Can be used for SICK-Relatedness, and STS14
def __init__(self, train, valid, test, devscores, config):
# fix seed
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
torch.cuda.manual_seed(config['seed'])
self.train = train
self.valid = valid
self.test = test
self.devscores = devscores
self.inputdim = train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.l2reg = 0.
self.batch_size = 64
self.maxepoch = 1000
self.early_stop = True
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
nn.Softmax(dim=-1),
)
self.loss_fn = nn.MSELoss()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
self.loss_fn.size_average = False
self.optimizer = optim.Adam(self.model.parameters(),
weight_decay=self.l2reg)
def prepare_data(self, trainX, trainy, devX, devy, testX, testy):
# Transform probs to log-probs for KL-divergence
trainX = torch.from_numpy(trainX).float().cuda()
trainy = torch.from_numpy(trainy).float().cuda()
devX = torch.from_numpy(devX).float().cuda()
devy = torch.from_numpy(devy).float().cuda()
testX = torch.from_numpy(testX).float().cuda()
testY = torch.from_numpy(testy).float().cuda()
return trainX, trainy, devX, devy, testX, testy
def run(self):
self.nepoch = 0
bestpr = -1
early_stop_count = 0
r = np.arange(1, 6)
stop_train = False
# Preparing data
trainX, trainy, devX, devy, testX, testy = self.prepare_data(
self.train['X'], self.train['y'],
self.valid['X'], self.valid['y'],
self.test['X'], self.test['y'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
self.trainepoch(trainX, trainy, nepoches=50)
yhat = np.dot(self.predict_proba(devX), r)
pr = spearmanr(yhat, self.devscores)[0]
pr = 0 if pr != pr else pr # if NaN bc std=0
# early stop on Pearson
if pr > bestpr:
bestpr = pr
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
yhat = np.dot(self.predict_proba(testX), r)
return bestpr, yhat
def trainepoch(self, X, y, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
Xbatch = X[idx]
ybatch = y[idx]
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
if len(probas) == 0:
probas = self.model(Xbatch).data.cpu().numpy()
else:
probas = np.concatenate((probas, self.model(Xbatch).data.cpu().numpy()), axis=0)
return probas
| 4,552 | 32.725926 | 100 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/tools/validation.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Validation and classification
(train) : inner-kfold classifier
(train, test) : kfold classifier
(train, dev, test) : split classifier
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import numpy as np
from senteval.tools.classifier import MLP
import sklearn
assert(sklearn.__version__ >= "0.18.0"), \
"need to update sklearn to version >= 0.18.0"
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
def get_classif_name(classifier_config, usepytorch):
if not usepytorch:
modelname = 'sklearn-LogReg'
else:
nhid = classifier_config['nhid']
optim = 'adam' if 'optim' not in classifier_config else classifier_config['optim']
bs = 64 if 'batch_size' not in classifier_config else classifier_config['batch_size']
modelname = 'pytorch-MLP-nhid%s-%s-bs%s' % (nhid, optim, bs)
return modelname
# Pytorch version
class InnerKFoldClassifier(object):
"""
(train) split classifier : InnerKfold.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.featdim = X.shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.devresults = []
self.testresults = []
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
logging.info('Training {0} with (inner) {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
innerskf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=1111)
count = 0
for train_idx, test_idx in skf.split(self.X, self.y):
count += 1
X_train, X_test = self.X[train_idx], self.X[test_idx]
y_train, y_test = self.y[train_idx], self.y[test_idx]
scores = []
for reg in regs:
regscores = []
for inner_train_idx, inner_test_idx in innerskf.split(X_train, y_train):
X_in_train, X_in_test = X_train[inner_train_idx], X_train[inner_test_idx]
y_in_train, y_in_test = y_train[inner_train_idx], y_train[inner_test_idx]
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_in_train, y_in_train,
validation_data=(X_in_test, y_in_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_in_train, y_in_train)
regscores.append(clf.score(X_in_test, y_in_test))
scores.append(round(100*np.mean(regscores), 2))
optreg = regs[np.argmax(scores)]
logging.info('Best param found at split {0}: l2reg = {1} \
with score {2}'.format(count, optreg, np.max(scores)))
self.devresults.append(np.max(scores))
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(X_train, y_train, validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(X_train, y_train)
self.testresults.append(round(100*clf.score(X_test, y_test), 2))
devaccuracy = round(np.mean(self.devresults), 2)
testaccuracy = round(np.mean(self.testresults), 2)
return devaccuracy, testaccuracy
class KFoldClassifier(object):
"""
(train, test) split classifier : cross-validation on train.
"""
def __init__(self, train, test, config):
self.train = train
self.test = test
self.featdim = self.train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
# cross-validation
logging.info('Training {0} with {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-1, 6, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=self.seed)
scores = []
for reg in regs:
scanscores = []
for train_idx, test_idx in skf.split(self.train['X'],
self.train['y']):
# Split data
X_train, y_train = self.train['X'][train_idx], self.train['y'][train_idx]
X_test, y_test = self.train['X'][test_idx], self.train['y'][test_idx]
# Train classifier
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_train, y_train, validation_data=(X_test, y_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scanscores.append(score)
# Append mean score
scores.append(round(100*np.mean(scanscores), 2))
# evaluation
logging.info([('reg:' + str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Cross-validation : best param found is reg = {0} \
with score {1}'.format(optreg, devaccuracy))
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(self.train['X'], self.train['y'], validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.train['X'], self.train['y'])
yhat = clf.predict(self.test['X'])
testaccuracy = clf.score(self.test['X'], self.test['y'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy, yhat
class SplitClassifier(object):
"""
(train, valid, test) split classifier.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.nclasses = config['nclasses']
self.featdim = self.X['train'].shape[1]
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.cudaEfficient = False if 'cudaEfficient' not in config else \
config['cudaEfficient']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.noreg = False if 'noreg' not in config else config['noreg']
self.config = config
def run(self):
logging.info('Training {0} with standard validation..'
.format(self.modelname))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
if self.noreg:
regs = [1e-9 if self.usepytorch else 1e9]
scores = []
for reg in regs:
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
scores.append(round(100*clf.score(self.X['valid'],
self.y['valid']), 2))
logging.info([('reg:'+str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Validation : best param found is reg = {0} with score \
{1}'.format(optreg, devaccuracy))
clf = LogisticRegression(C=optreg, random_state=self.seed)
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
testaccuracy = clf.score(self.X['test'], self.y['test'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy
| 10,358 | 40.939271 | 93 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/tools/classifier.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Pytorch Classifier class in the style of scikit-learn
Classifiers include Logistic Regression and MLP
"""
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
import torch.nn.functional as F
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_split(self, X, y, validation_data=None, validation_split=None):
# Preparing validation data
assert validation_split or validation_data
if validation_data is not None:
trainX, trainy = X, y
devX, devy = validation_data
else:
permutation = np.random.permutation(len(X))
trainidx = permutation[int(validation_split * len(X)):]
devidx = permutation[0:int(validation_split * len(X))]
trainX, trainy = X[trainidx], y[trainidx]
devX, devy = X[devidx], y[devidx]
device = torch.device('cpu') if self.cudaEfficient else torch.device('cuda')
trainX = torch.from_numpy(trainX).to(device, dtype=torch.float32)
trainy = torch.from_numpy(trainy).to(device, dtype=torch.int64)
devX = torch.from_numpy(devX).to(device, dtype=torch.float32)
devy = torch.from_numpy(devy).to(device, dtype=torch.int64)
return trainX, trainy, devX, devy
def fit(self, X, y, validation_data=None, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
trainX, trainy, devX, devy = self.prepare_split(X, y, validation_data,
validation_split)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(trainX, trainy, epoch_size=self.epoch_size)
accuracy = self.score(devX, devy)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + epoch_size):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().to(X.device)
Xbatch = X[idx]
ybatch = y[idx]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, devX, devy):
self.model.eval()
correct = 0
if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
devX = torch.FloatTensor(devX).cuda()
devy = torch.LongTensor(devy).cuda()
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
ybatch = devy[i:i + self.batch_size]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
pred = output.data.max(1)[1]
correct += pred.long().eq(ybatch.data.long()).sum().item()
accuracy = 1.0 * correct / len(devX)
return accuracy
def predict(self, devX):
self.model.eval()
if not isinstance(devX, torch.cuda.FloatTensor):
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat,
output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
| 7,737 | 37.118227 | 94 |
py
|
SimCSE
|
SimCSE-main/SentEval/senteval/tools/__init__.py
| 0 | 0 | 0 |
py
|
|
SimCSE
|
SimCSE-main/SentEval/senteval/tools/ranking.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Image Annotation/Search for COCO with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import copy
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.optim as optim
class COCOProjNet(nn.Module):
def __init__(self, config):
super(COCOProjNet, self).__init__()
self.imgdim = config['imgdim']
self.sentdim = config['sentdim']
self.projdim = config['projdim']
self.imgproj = nn.Sequential(
nn.Linear(self.imgdim, self.projdim),
)
self.sentproj = nn.Sequential(
nn.Linear(self.sentdim, self.projdim),
)
def forward(self, img, sent, imgc, sentc):
# imgc : (bsize, ncontrast, imgdim)
# sentc : (bsize, ncontrast, sentdim)
# img : (bsize, imgdim)
# sent : (bsize, sentdim)
img = img.unsqueeze(1).expand_as(imgc).contiguous()
img = img.view(-1, self.imgdim)
imgc = imgc.view(-1, self.imgdim)
sent = sent.unsqueeze(1).expand_as(sentc).contiguous()
sent = sent.view(-1, self.sentdim)
sentc = sentc.view(-1, self.sentdim)
imgproj = self.imgproj(img)
imgproj = imgproj / torch.sqrt(torch.pow(imgproj, 2).sum(1, keepdim=True)).expand_as(imgproj)
imgcproj = self.imgproj(imgc)
imgcproj = imgcproj / torch.sqrt(torch.pow(imgcproj, 2).sum(1, keepdim=True)).expand_as(imgcproj)
sentproj = self.sentproj(sent)
sentproj = sentproj / torch.sqrt(torch.pow(sentproj, 2).sum(1, keepdim=True)).expand_as(sentproj)
sentcproj = self.sentproj(sentc)
sentcproj = sentcproj / torch.sqrt(torch.pow(sentcproj, 2).sum(1, keepdim=True)).expand_as(sentcproj)
# (bsize*ncontrast, projdim)
anchor1 = torch.sum((imgproj*sentproj), 1)
anchor2 = torch.sum((sentproj*imgproj), 1)
img_sentc = torch.sum((imgproj*sentcproj), 1)
sent_imgc = torch.sum((sentproj*imgcproj), 1)
# (bsize*ncontrast)
return anchor1, anchor2, img_sentc, sent_imgc
def proj_sentence(self, sent):
output = self.sentproj(sent)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
def proj_image(self, img):
output = self.imgproj(img)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
class PairwiseRankingLoss(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc,
min=0.0).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc,
min=0.0).sum()
loss = cost_sent + cost_img
return loss
class ImageSentenceRankingPytorch(object):
# Image Sentence Ranking on COCO with Pytorch
def __init__(self, train, valid, test, config):
# fix seed
self.seed = config['seed']
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.train = train
self.valid = valid
self.test = test
self.imgdim = len(train['imgfeat'][0])
self.sentdim = len(train['sentfeat'][0])
self.projdim = config['projdim']
self.margin = config['margin']
self.batch_size = 128
self.ncontrast = 30
self.maxepoch = 20
self.early_stop = True
config_model = {'imgdim': self.imgdim,'sentdim': self.sentdim,
'projdim': self.projdim}
self.model = COCOProjNet(config_model).cuda()
self.loss_fn = PairwiseRankingLoss(margin=self.margin).cuda()
self.optimizer = optim.Adam(self.model.parameters())
def prepare_data(self, trainTxt, trainImg, devTxt, devImg,
testTxt, testImg):
trainTxt = torch.FloatTensor(trainTxt)
trainImg = torch.FloatTensor(trainImg)
devTxt = torch.FloatTensor(devTxt).cuda()
devImg = torch.FloatTensor(devImg).cuda()
testTxt = torch.FloatTensor(testTxt).cuda()
testImg = torch.FloatTensor(testImg).cuda()
return trainTxt, trainImg, devTxt, devImg, testTxt, testImg
def run(self):
self.nepoch = 0
bestdevscore = -1
early_stop_count = 0
stop_train = False
# Preparing data
logging.info('prepare data')
trainTxt, trainImg, devTxt, devImg, testTxt, testImg = \
self.prepare_data(self.train['sentfeat'], self.train['imgfeat'],
self.valid['sentfeat'], self.valid['imgfeat'],
self.test['sentfeat'], self.test['imgfeat'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
logging.info('start epoch')
self.trainepoch(trainTxt, trainImg, devTxt, devImg, nepoches=1)
logging.info('Epoch {0} finished'.format(self.nepoch))
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
score = 0
for i in range(5):
devTxt_i = devTxt[i*5000:(i+1)*5000]
devImg_i = devImg[i*5000:(i+1)*5000]
# Compute dev ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg_i,
devTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
logging.info("Image to text: {0}, {1}, {2}, {3}"
.format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute dev ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg_i,
devTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
logging.info("Text to Image: {0}, {1}, {2}, {3}"
.format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
score += (r1_i2t + r5_i2t + r10_i2t +
r1_t2i + r5_t2i + r10_t2i) / 5
logging.info("Dev mean Text to Image: {0}, {1}, {2}, {3}".format(
results['t2i']['r1'], results['t2i']['r5'],
results['t2i']['r10'], results['t2i']['medr']))
logging.info("Dev mean Image to text: {0}, {1}, {2}, {3}".format(
results['i2t']['r1'], results['i2t']['r5'],
results['i2t']['r10'], results['i2t']['medr']))
# early stop on Pearson
if score > bestdevscore:
bestdevscore = score
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
# Compute test for the 5 splits
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
for i in range(5):
testTxt_i = testTxt[i*5000:(i+1)*5000]
testImg_i = testImg[i*5000:(i+1)*5000]
# Compute test ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(testImg_i, testTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(testImg_i, testTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
return bestdevscore, results['i2t']['r1'], results['i2t']['r5'], \
results['i2t']['r10'], results['i2t']['medr'], \
results['t2i']['r1'], results['t2i']['r5'], \
results['t2i']['r10'], results['t2i']['medr']
def trainepoch(self, trainTxt, trainImg, devTxt, devImg, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = list(np.random.permutation(len(trainTxt)))
all_costs = []
for i in range(0, len(trainTxt), self.batch_size):
# forward
if i % (self.batch_size*500) == 0 and i > 0:
logging.info('samples : {0}'.format(i))
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg,
devTxt)
logging.info("Image to text: {0}, {1}, {2}, {3}".format(
r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg,
devTxt)
logging.info("Text to Image: {0}, {1}, {2}, {3}".format(
r1_t2i, r5_t2i, r10_t2i, medr_t2i))
idx = torch.LongTensor(permutation[i:i + self.batch_size])
imgbatch = Variable(trainImg.index_select(0, idx)).cuda()
sentbatch = Variable(trainTxt.index_select(0, idx)).cuda()
idximgc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idxsentc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idximgc = torch.LongTensor(idximgc)
idxsentc = torch.LongTensor(idxsentc)
# Get indexes for contrastive images and sentences
imgcbatch = Variable(trainImg.index_select(0, idximgc)).view(
-1, self.ncontrast, self.imgdim).cuda()
sentcbatch = Variable(trainTxt.index_select(0, idxsentc)).view(
-1, self.ncontrast, self.sentdim).cuda()
anchor1, anchor2, img_sentc, sent_imgc = self.model(
imgbatch, sentbatch, imgcbatch, sentcbatch)
# loss
loss = self.loss_fn(anchor1, anchor2, img_sentc, sent_imgc)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def t2i(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
idxs = torch.cuda.LongTensor(range(0, len(img_embed), 5))
ims = img_embed.index_select(0, idxs)
ranks = np.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = sent_embed[5*index: 5*index + 5]
# Compute scores
scores = torch.mm(queries, ims.transpose(0, 1)).cpu().numpy()
inds = np.zeros(scores.shape)
for i in range(len(inds)):
inds[i] = np.argsort(scores[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
def i2t(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
index_list = []
ranks = np.zeros(npts)
for index in range(npts):
# Get query image
query_img = img_embed[5 * index]
# Compute scores
scores = torch.mm(query_img.view(1, -1),
sent_embed.transpose(0, 1)).view(-1)
scores = scores.cpu().numpy()
inds = np.argsort(scores)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(5*index, 5*index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
| 15,275 | 41.433333 | 109 |
py
|
RISS19
|
RISS19-master/varcalc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column
import astropy.units as u
from astropy.utils.exceptions import AstropyWarning
from lib.SM2017 import SM, kpc
import logging
import os
import sys
import argparse
import numpy as np
# Turn off the stupid warnings that Astropy emits when loading just about any fits file.
import warnings
warnings.simplefilter("ignore", category=AstropyWarning)
# configure logging
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
log = logging.getLogger("varcalc")
log.setLevel(logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group1 = parser.add_argument_group("Output parameter selection")
group1.add_argument(
"--Halpha",
dest="halpha",
action="store_true",
default=False,
help="Calculate Hα intensity (Rayleighs)",
)
group1.add_argument(
"--xi",
dest="xi",
action="store_true",
default=False,
help="Calculate ξ (dimensionless)",
)
group1.add_argument(
"--mod",
dest="m",
action="store_true",
default=False,
help="Calculate modulation index (fraction)",
)
group1.add_argument(
"--sm",
dest="sm",
action="store_true",
default=False,
help="Calculate scintillation measure (kpc m^{-20/3})",
)
group1.add_argument(
"--timescale",
dest="t0",
action="store_true",
default=False,
help="Calculate timescale of variability (years)",
)
group1.add_argument(
"--rms1y",
dest="rms",
action="store_true",
default=False,
help="Calculate rms variability over 1 year (fraction/year)",
)
group1.add_argument(
"--theta",
dest="theta",
action="store_true",
default=False,
help="Calculate the scattering disk size (deg)",
)
group1.add_argument(
"--nuzero",
dest="nuzero",
action="store_true",
default=False,
help="Calculate the transition frequency (GHz)",
)
group1.add_argument(
"--fzero",
dest="fzero",
action="store_true",
default=False,
help="Calculate the Fresnel zone (m)",
)
group1.add_argument(
"--dist",
dest="dist",
action="store_true",
default=False,
help="Calculate the model distance",
)
group1.add_argument(
"--all",
dest="do_all",
action="store_true",
default=False,
help="Include all of the above parameter calculations",
)
group2 = parser.add_argument_group("Input and output data")
group2.add_argument(
"--in", dest="infile", default=None, type=str, help="Table of coordinates"
)
group2.add_argument(
"--incol",
dest="cols",
default=("ra", "dec"),
nargs=2,
type=str,
help="Column names to read from input. [ra,dec]",
)
group2.add_argument(
"--out", dest="outfile", default=None, type=str, help="Table of results"
)
group2.add_argument(
"--append",
dest="append",
action="store_true",
default=False,
help="Append the data to the input data (write a new file)",
)
group2.add_argument(
"--pos",
dest="pos",
default=None,
nargs=2,
type=float,
help="Single coordinates in ra/dec degrees",
)
group2.add_argument(
"-g",
"--galactic",
dest="galactic",
action="store_true",
default=False,
help="Interpret input coordinates as l/b instead of ra/dec (default False)",
)
group2.add_argument(
"--debug",
dest="debug",
action="store_true",
default=False,
help="Debug mode (default False)",
)
group3 = parser.add_argument_group("Input parameter settings")
group3.add_argument(
"--freq", dest="frequency", default=185, type=float, help="Frequency in MHz"
)
group3.add_argument(
"--dist_in",
dest="dist_in",
type=float,
default=None,
help="Distance to scattering screen in kpc",
)
group3.add_argument(
"--vel",
dest="velocity",
default=10,
type=float,
help="Relative motion of screen and observer in km/s",
)
results = parser.parse_args()
if results.debug:
log.setLevel(logging.DEBUG)
if results.do_all:
results.halpha = results.sm = results.m = results.rms = True
results.xi = results.t0 = results.theta = results.nuzero = results.fzero = True
results.dist = True
# data is stored in the data dir, relative to *this* file
datadir = os.path.join(os.path.dirname(__file__), "data")
nu = results.frequency * 1e6 # GHz
v = results.velocity * 1e3 # km/s
d = results.dist_in # kpc
# For doing a one off position calculation
if results.pos is None and results.infile is None:
parser.print_usage()
sys.exit(0)
if results.galactic:
log.info("Using galactic coordinates")
frame = "galactic"
else:
log.info("Using fk5 coordinates")
frame = "fk5"
if results.pos:
ra, dec = results.pos
pos = SkyCoord([ra] * u.degree, [dec] * u.degree, frame=frame)
log.info(os.path.join(datadir, "Halpha_error.fits"))
sm = SM(
ha_file=os.path.join(datadir, "Halpha_map.fits"),
err_file=os.path.join(datadir, "Halpha_error.fits"),
nu=nu,
log=log,
d=d,
v=v,
)
if results.halpha:
logging.debug(sm.get_halpha(pos))
val, err = sm.get_halpha(pos)
print("Halpha: ", val, "(Rayleighs)")
print("err_Halpha: ", err, "(Rayleighs)")
if results.xi:
val, err = sm.get_xi(pos)
print("xi: ", val)
print("err_xi: ", err)
if results.sm:
val, err = sm.get_sm(pos)
print("sm: ", val, "kpc m^{-20/3}")
print("err_sm: ", err, "kpc m^{-20/3}")
if results.m:
val, err = sm.get_m(pos)
print("m: ", val * 100, "%")
print("err_m: ", err * 100, "%")
if results.t0:
val, err = sm.get_timescale(pos)
print("t0: ", val, "years")
print("err_t0: ", err, "years")
if results.rms:
val, err = sm.get_rms_var(pos)
print("rms: ", val * 100, "%/1year")
print("err_rms: ", err * 100, "%/1year")
if results.theta:
val, err = sm.get_theta(pos)
print("theta: ", val, "deg")
print("err_theta: ", err, "deg")
if results.nuzero:
val = sm.get_vo(pos)
print("nu0: ", val, "GHz")
if results.dist:
val = sm.get_distance(pos)
print("distance: ", val, "kpc")
if results.fzero:
val = np.degrees(sm.get_rf(pos) / (sm.get_distance(pos) * kpc.value))
print("fzero: ", val, "deg")
sys.exit(0)
if results.infile:
if not results.outfile:
print("Output file is required")
sys.exit(1)
# read the input data
tab = Table.read(results.infile)
ra = tab[results.cols[0]]
dec = tab[results.cols[1]]
# create the sky coordinate
pos = SkyCoord(ra, dec, unit=(u.deg, u.deg), frame=frame)
# make the SM object
sm = SM(
ha_file=os.path.join(datadir, "Halpha_map.fits"),
err_file=os.path.join(datadir, "Halpha_error.fits"),
nu=nu,
log=log,
d=d,
v=v,
)
# make a new table for writing and copy the ra/dec unless we are appending to the old file
if not results.append:
tab = Table()
tab.add_column(ra)
tab.add_column(dec)
else:
print("Appending results to existing table")
if results.halpha:
val, err = sm.get_halpha(pos)
tab.add_column(Column(data=val, name="Halpha"))
tab.add_column(Column(data=err, name="err_Halpha"))
if results.dist:
val = sm.get_distance(pos)
tab.add_column(Column(data=val, name="Distance"))
if results.xi:
val, err = sm.get_xi(pos)
tab.add_column(Column(data=val, name="xi"))
tab.add_column(Column(data=err, name="err_xi"))
if results.sm:
val, err = sm.get_sm(pos)
tab.add_column(Column(data=val, name="sm"))
tab.add_column(Column(data=err, name="err_sm"))
if results.m:
val, err = sm.get_m(pos)
tab.add_column(Column(data=val, name="m"))
tab.add_column(Column(data=err, name="err_m"))
if results.t0:
val, err = sm.get_timescale(pos)
tab.add_column(Column(data=val, name="t0"))
tab.add_column(Column(data=err, name="err_t0"))
if results.rms:
val, err = sm.get_rms_var(pos)
tab.add_column(Column(data=val, name="rms1yr"))
tab.add_column(Column(data=err, name="err_rms1yr"))
if results.theta:
val, err = sm.get_theta(pos)
tab.add_column(Column(data=val, name="theta_r"))
tab.add_column(Column(data=err, name="err_theta_r"))
if results.nuzero:
val = sm.get_vo(pos)
tab.add_column(Column(data=val, name="nu0"))
if results.fzero:
val = np.degrees(sm.get_rf(pos) / (sm.get_distance(pos) * kpc.value))
tab.add_column(Column(data=val, name="fzero"))
print("Writing to {0}".format(results.outfile))
tab.write(results.outfile, overwrite=True)
| 10,052 | 30.317757 | 98 |
py
|
RISS19
|
RISS19-master/HaVS.py
|
from __future__ import print_function, division
import os
import logging
import cPickle
import argparse
import numpy as np
import numpy.polynomial.polynomial as poly
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column
import astropy.units as u
from lib.new_SM17 import SM
from astropy.utils.exceptions import AstropyWarning
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
#warnings.filterwarnings("always")
#warnings.simplefilter('ignore', category=AstropyWarning)
datadir = os.path.join(os.path.dirname(__file__), 'data')
SFG=0
AGN=1
stypes=[SFG, AGN]
#sprobs=[0.84839, 1-0.84839] #0.15161
#sprobs=[1-0.84839,0.84839]
#Chetri2017 Strong Scint
#sprobs=[1/-37./347.,37./347.]
#Chetri2017 Strong + Mod Scint
sprobs=[1.-(37.+91.)/347.,(37.+91.)/347.]
parser = argparse.ArgumentParser()
parser.add_argument('-FUL', action='store', dest='FUL', default=1.,
help='Store upper flux limit (Jy)')
parser.add_argument('-FLL', action='store', dest='FLL', default=50e-3,
help='Store lower flux limit (Jy)')
parser.add_argument('-mc', action='store', dest='mc', default=0.05,
help='Store modulation cut off value')
parser.add_argument('-t', action='store', dest='obs_time', default=365.,
help='observation time in days')
parser.add_argument('-a', action='store', dest='a', default=3300.,
help='Scaling Constant for source counts')
#parser.add_argument('-scount', action='store', dest='scount', default=False, help='Number of sources')
parser.add_argument('-f', action='store', dest='nu', default=185.,
help='Frequency in MHz')
parser.add_argument('-i', action='store', dest='loops', default=20,
help='Number of iterations to run program through (30+ recommended)')
parser.add_argument('-reg', action='store', dest='region_name',
help='read in region file')
parser.add_argument('-map', action='store', dest='map', default=0,
help='Select old (0) or new (1) Ha maps')
parser.add_argument('--out', dest='outfile', default=False, type=str,
help="Output file name for results including file type (.csv)")
parser.add_argument('--fig', dest='figure', default=False,
help="Save Figure?")
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
outfile=results.outfile
class SIM(object):
def __init__(self, log=None):
if log is None:
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
self.log = logging.getLogger("SIM_new")
self.log.setLevel(logging.DEBUG)
else:
self.log=log
#Variables
self.figure=results.figure
self.nu = np.float(results.nu) * 1e6 #Hz, Default 185 MHz
self.arcsec = np.pi / (180. * 3600.)
self.mod_cutoff = np.float(results.mc) #Default 0.05
self.low_Flim = np.float(results.FLL) # Jy, Default 50e-3 Jy
self.upp_Flim = np.float(results.FUL) # Jy, Default 1 Jy
self.region_name = results.region_name
region=cPickle.load(open(self.region_name, 'rb'))
self.area = region.get_area(degrees=True)
self.obs_time = np.float(results.obs_time) * 24. * 60. * 60. # seconds, Default 183 days
self.loops=np.int(results.loops) #Default 20
self.num_scale=40
self.a=np.float(results.a) #Default 3300
self.map=float(results.map)
self.alpha=-0.8
if self.map==1:
self.ha_file = 'Ha_map_new.fits'
self.err_file = 'Ha_err_new.fits'
elif self.map==0:
self.ha_file = 'Halpha_map.fits'
self.err_file = 'Halpha_error.fits'
#self.scount=float(results.scount)
def flux_gen(self):
"""
Function to distribute flux across all points
Input: Flux Density limit, RA/DEC positions, source distribution function
Output: Flux for each RA/DEC point
"""
def franz_counts(mids):
a = [3.52, 0.307, -0.388, -0.0404, 0.0351, 0.006]
source_counts = []
for ii in range(0, len(mids)):
x = (mids[ii])
sum_counts = 0.
for i in range(0, 6):
sum_counts = sum_counts + (a[i] * (np.log10(x)) ** i)
sum_counts = 10 ** (sum_counts)
source_counts.append(sum_counts)
return (source_counts)
def hopkins_counts(mids):
a = [0.859, 0.508, 0.376, -0.049, -0.121, 0.057, -0.008]
source_counts = []
mids = mids * 1e3
for ii in range(0, len(mids)):
x = (mids[ii])
sum_counts = 0.
for i in range(0, 7):
sum_counts = sum_counts + (a[i] * (np.log10(x)) ** i)
sum_counts = 10 ** (sum_counts)
source_counts.append(sum_counts)
return (source_counts)
def linscale(freq, f0, low=50e-3, upp=1., alpha=-0.8, inc=1e-4):
edges = np.arange(low, upp, inc)
mids = []
ds = []
for i in range(0, len(edges) - 1):
mids.append((edges[i + 1] + edges[i]) / 2.)
ds.append(edges[i + 1] - edges[i])
mids = np.array(mids)
ds = np.array(ds)
return mids, ds, edges
def fran_gen(freq=185e6, alpha=-0.8, inc=1e-4):
f0 = 154e6
low = 1e-3
upp = 75.
fr = ((freq * 1.0) / f0) ** (alpha)
stats = linscale(freq, f0, low, upp, alpha, inc / fr)
mid, ds, edg = stats
counts = franz_counts(mid)
numcounts = counts * ds * mid ** (-2.5)
return mid * fr, numcounts, np.sum(numcounts), fr
def hop_gen(freq=185e6, alpha=-0.8, inc=1e-4):
f0 = 1400e6
low = 0.05e-3
upp = 1.
fr = ((freq * 1.0) / f0) ** (alpha)
stats = linscale(freq, f0, low, upp, alpha, inc / fr)
mid, ds, edg = stats
counts = hopkins_counts(mid)
numcounts = counts * ds * mid ** (-2.5)
return mid * fr, numcounts, np.sum(numcounts), fr
def weight(freq=185e6, alpha=-0.8, inc=1e-4):
fmid, fcounts, ftotal, fratio = fran_gen(freq, alpha=alpha, inc=inc)
hmid, hcounts, htotal, hratio = hop_gen(freq, alpha=alpha, inc=inc)
f0_f = 154e6
f0_h = 1400e6
# WEIGHTING
dF = np.abs(freq - f0_f)
dH = np.abs(freq - f0_h)
fw1 = 1. - (np.abs(dF) / (dF + dH))
fw2 = 1. - (np.abs(dH) / (dF + dH))
if freq <= 154e6:
mids = np.array(fmid)
ncounts = np.array(fcounts)
elif freq >= 1400e6:
mids = np.array(hmid)
ncounts = np.array(hcounts)
else:
franz_upp = np.max(fmid)
franz_low = np.min(fmid)
hop_low = np.min(hmid)
hop_upp = np.max(hmid)
# OVERLAP
maskff = np.where((fmid >= hop_low) & (fmid <= hop_upp))
maskhh = np.where((hmid >= franz_low) & (hmid <= franz_upp))
m1 = (fmid[maskff] * fw1) + (hmid[maskhh] * fw2)
n1 = (fcounts[maskff] * fw1) + (hcounts[maskhh] * fw2)
# OUTER EDGES
maskf1 = np.where(fmid < hop_low)
m2 = fmid[maskf1]
n2 = fcounts[maskf1]
maskf2 = np.where(fmid > hop_upp)
m4 = fmid[maskf2]
n4 = fcounts[maskf2]
maskh1 = np.where(hmid < franz_low)
m3 = hmid[maskh1]
n3 = hcounts[maskh1]
maskh2 = np.where(hmid > franz_upp)
m5 = hmid[maskh2]
n5 = hcounts[maskh2]
ncounts = np.concatenate([n2, n3, n1, n4, n5])
mids = np.concatenate([m2, m3, m1, m4, m5])
mids = np.array(mids)
ncounts = np.array(ncounts)
return mids, ncounts, np.sum(ncounts), fmid, hmid, fcounts, hcounts, ftotal, htotal
def limit(low, upp, freq=185e6, alpha=-0.8, inc=1e-4):
mids, ncounts, tcounts, fmid, hmid, fcounts, hcounts, ftotal, htotal = weight(freq, alpha, inc)
x = np.log10(mids)
y = np.log10(ncounts)
xf = np.log10(fmid)
yf = np.log10(fcounts)
xh = np.log10(hmid)
yh = np.log10(hcounts)
deg = 15
z = poly.polyfit(x, y, deg=deg)
zf = poly.polyfit(xf, yf, deg=deg)
zh = poly.polyfit(xh, yh, deg=deg)
x0 = np.arange(low, upp, inc)
x0 = np.log10(x0)
p = 10 ** poly.polyval(x0, z)
pf = 10 ** poly.polyval(x0, zf)
ph = 10 ** poly.polyval(x0, zh)
x0 = 10 ** x0
mids, edges, fmid, hmid = x0[:-1] + inc, x0, x0[:-1] + inc, x0[:-1] + inc
ncounts, fcounts, hcounts = p[:-1], pf[:-1], ph[:-1]
return mids, ncounts, np.sum(ncounts), edges
mids, norm_counts, total_counts, edges= limit(self.low_Flim, self.upp_Flim, self.nu, self.alpha)
Area = self.area * (np.pi ** 2.) / (180. ** 2.)
FLUX = []
num_sources = norm_counts * Area
tcounts = total_counts * Area
for i in range(0, len(edges) - 1):
count = num_sources[i]
p = count - int(count)
leftover_count = np.random.choice([0, 1], p=[1 - p, p])
count = int(count) + leftover_count
FLUX.extend(np.random.uniform(edges[i], edges[i + 1], size=int(count)))
flux_arr = np.random.permutation(np.array(FLUX))
return flux_arr, len(flux_arr)
def pos_gen(self):
"""
A function to generate a number of random points in RA/DEC
Input: Number of points to generate from flux_gen function
Output: List of RA/DEC (2,1) array in (2D) Cartesian coordiantes.
"""
num_sources=self.flux_gen()[1]
num=num_sources*200.
lim = int(num)
x = []
y = []
z = []
r = []
i = 0
#Generating cube
x1 = np.array(np.random.uniform(-1.0, 1.0, lim))
y1 = np.array(np.random.uniform(-1.0, 1.0, lim))
z1 = np.array(np.random.uniform(-1.0, 1.0, lim))
rad = (x1 ** 2.0 + y1 ** 2.0 + z1 ** 2.0) ** (0.5)
#Getting points inside sphere of radius 1
for i in range(0, len(rad)):
if rad[i] <= 1.:
x.append(x1[i])
y.append(y1[i])
z.append(z1[i])
r.append(rad[i])
x, y, z = np.array(x) / np.array(r), np.array(y) / np.array(r), np.array(z) / np.array(r)
r0 = (x ** 2.0 + y ** 2.0 + z ** 2.0) ** 0.5
#converting back to cartesian cooridantes
theta = np.arccos(z / r0) * 180 / np.pi
theta = theta - 90.
theta = theta
phi = np.arctan2(y, x) * 180. / (np.pi)
phi = phi + 180.
phi = phi
return np.array(phi), np.array(theta)
def region_gen(self, reg_file):
"""
Takes in a list of positions and removes points outside the MIMAS region
Input: RA/DEC positions and MIMAS region file.
Output: List of RA/DEC inside the correct region.
"""
# reg_ra = []
# reg_dec = []
# region = cPickle.load(open(reg_file, 'rb'))
# flux, num=self.flux_gen()
# while len(reg_ra) < num:
# RA, DEC = self.pos_gen()
# reg_arr = region.sky_within(RA, DEC, degin=True)
# reg_ra.extend(RA[reg_arr])
# reg_dec.extend(DEC[reg_arr])
reg_ra = []
reg_dec = []
region = cPickle.load(open(reg_file, 'rb'))
flux, num = self.flux_gen()
while len(reg_ra) < num:
RA, DEC = self.pos_gen()
c = SkyCoord(l=RA*u.degree, b=DEC*u.degree, frame='galactic')
ra=c.fk5.ra.deg
dec=c.fk5.dec.deg
reg_arr = region.sky_within(ra, dec, degin=True)
print(max(RA), max(DEC))
#for i in range(0, len(reg_arr)):
reg_ra.extend(RA[reg_arr])
reg_dec.extend(DEC[reg_arr])
print(max(reg_ra), max(reg_dec))
reg_dec= np.array(reg_dec[:num])
reg_ra = np.array(reg_ra[:num])
return reg_ra, reg_dec, flux, num
def stype_gen(self):
"""
Function to determine if a source is of type compact or extended
Input: RA/DEC list (source_size?)
Output: compact (1?) or extended (0?)
"""
ra,dec,flux=self.region_gen(self.region_name)[0:3]
arr=ra
stype_arr=(np.random.choice(stypes, p=sprobs, size=len(arr)))
return stype_arr, ra, dec, flux
def ssize_gen(self):
"""
Generates source size based stype given.
Input: Flux and source type
Output: Source size
"""
stype,ra, dec, flux=self.stype_gen()
def ang_size(flux, freq, alpha=-0.8):
f0 = 1400e6
flux = np.array(flux)
fr = ((freq * 1.0) / f0) ** (alpha)
Sn = (flux) * fr
a = 2. * Sn ** 0.3
return a/3600., Sn
ssize_arr=ang_size(flux,freq=self.nu)[0]
ssize_arr=np.array(ssize_arr)
agn_mask = np.where(stype == 1)
if len(agn_mask[0])>=1:
agn_ssize=(1e-3) / 3600.
ssize_arr[agn_mask]=agn_ssize
return ssize_arr, stype, ra, dec ,flux
def output_gen(self):
"""
Function to use SM2017 to get Modulation, Timescale, Halpha, Theta and other values.
Input: RA, DEC, Source Size
Output: Modulation, Timescale, Halpha, Theta
"""
ssize, stype, ra, dec ,flux=self.ssize_gen()
nu = np.float(self.nu)
frame = 'galactic'
#frame='fk5'
tab = Table()
# create the sky coordinate
pos = SkyCoord(ra * u.degree, dec * u.degree, frame=frame)
# make the SM object
sm = SM(ha_file=os.path.join(datadir, self.ha_file),
err_file=os.path.join(datadir, self.err_file),
nu=nu)
# Halpha
Ha, err_Ha = sm.get_halpha(pos)
# xi
#xi, err_xi = sm.get_xi(pos)
# theta
theta, err_theta = sm.get_theta(pos)
#sm
#sm, err_sm = sm.get_sm(pos)
# mod
m, err_m = sm.get_m(pos, ssize)
# t0
t0, err_t0 = sm.get_timescale(pos,ssize)
# rms
#val6, err6 = sm.get_rms_var(pos, stype, ssize)
#tau
#tau, err_tau=sm.get_tau(pos)
tau=1
err_tau=1
print(max(m))
return m, err_m, t0, err_t0, Ha, err_Ha , theta, err_theta, tau, err_tau,ssize, stype, ra, dec ,flux
def areal_gen(self):
"""
Function to generate the areal sky density (ASD) values
Uses: Flux, Region, Stype, Ssize, Output (Ha, mod, t0, theta), Obs_Yrs
Output: ASD, modulation, timescale, Ha, Theta
"""
#stype = self.stype_gen()
#ssize = self.ssize_gen()
mod, err_m, t0, err_t0, Ha, err_Ha, theta, err_theta, tau, err_tau,ssize, stype, RA, DEC ,flux= self.output_gen()
obs_yrs = self.obs_time / (3600. * 24. * 365.25)
t_mask=np.where(np.float(obs_yrs)<=t0)
mod[t_mask] = mod[t_mask] * (np.float(obs_yrs)/ t0[t_mask])
err_m[t_mask] = err_m[t_mask] * (np.float(obs_yrs) / t0[t_mask])
#mp = np.random.normal(loc=mod, scale=err_m)
print(np.max(mod))
mp= np.random.uniform(low=mod-err_m, high= mod+err_m)
print(np.max(mp))
v_mask=np.where(mp*flux>=self.low_Flim*3.)
m_mask=np.where(mp>=self.mod_cutoff)
var_mask=np.where((mp*flux>=self.low_Flim*3.) & (mp>=self.mod_cutoff))
vcount = len(v_mask[0])
mcount = len(m_mask[0])
varcount = len(var_mask[0])
#print(vcount,mcount)
#print(np.nanmean(theta*3600))
mareal = float(mcount) / self.area
vareal = float(vcount) / self.area
varareal=float(varcount)/ self.area
print(mcount, vcount, varcount)
datatab1 = Table()
mvar=int(self.map)
datafile = self.region_name[8:-4] + '_test_19' +'_m{0}_data.csv'.format(mvar)
#print('mod_mean',np.mean(mod))
### DATA FILE
datatab1.add_column(Column(data=RA, name='RA'))
datatab1.add_column(Column(data=DEC, name='DEC'))
datatab1.add_column(Column(data=flux, name='flux'))
datatab1.add_column(Column(data=Ha, name='H_Alpha'))
datatab1.add_column(Column(data=err_Ha, name='H_Alpha err'))
datatab1.add_column(Column(data=mod, name='Modulation'))
datatab1.add_column(Column(data=err_m, name='Modulation err'))
datatab1.add_column(Column(data=t0, name='Timescale'))
datatab1.add_column(Column(data=err_t0, name='Timescale err'))
datatab1.add_column(Column(data=theta, name='Theta'))
datatab1.add_column(Column(data=err_theta, name='Theta err'))
#datatab1.add_column(Column(data=tau, name='Tau'))
#datatab1.add_column(Column(data=err_tau, name='Tau err'))
datatab1.write(datafile, overwrite=True)
return varareal, mp, t0, Ha, theta, flux, mareal, vareal, varareal,ssize, stype, RA, DEC ,flux
def repeat(self):
"""
Function to repeate the ASD calculation
Input: Number of iterations set at beginning
Output: Arrays of Modulation, Timescale, Halpha, Theta as well as other statistics.
"""
areal_arr = []
mod_arr = np.empty((self.loops,2))
t0_arr = np.empty((self.loops,2))
Ha_arr = np.empty((self.loops,2))
theta_arr = np.empty((self.loops,2))
NSources = []
count = 0
for i in range(0, self.loops):
INPUT = self.areal_gen()
varareal, mp, t0, Ha, theta, flux, mareal, vareal, varareal, ssize, stype, RA, DEC, flux = INPUT
areal_arr.append(INPUT[0])
mod_arr[i,:]=[np.mean(INPUT[1]), np.std(INPUT[1])]
t0_arr[i,:]=[np.mean(INPUT[2]), np.std(INPUT[2])]
Ha_arr[i,:]=[np.mean(INPUT[3]), np.std(INPUT[3])]
theta_arr[i,:]=[np.mean(INPUT[4]), np.std(INPUT[4])]
count=count+1
NSources.append(len(INPUT[1]))
areal_arr = np.array(areal_arr)
NSources = np.array(NSources)
return areal_arr, mod_arr,t0_arr, Ha_arr, theta_arr, count, NSources, self.area, self.low_Flim, self.upp_Flim, self.obs_time, self.nu, self.mod_cutoff,ssize, stype, RA, DEC ,flux
def test():
"""
This section collects runs the previous functions and outputs them to two different files.
Data file: Includes raw data from each iteration.
Results file: Returns averaged results.
"""
sim=SIM()
areal_arr, mod_arr, t0_arr, Ha_arr, theta_arr, count, NSources, area, low_Flim, upp_Flim, obs_time, nu, mod_cutoff,ssize, stype, RA, DEC ,flux=sim.repeat()
datatab=Table()
resultstab=Table()
if outfile != False:
datafile=outfile[:-4]+'_data'+outfile[-4:]
### DATA FILE
datatab.add_column(Column(data=np.arange(1,len(areal_arr)+1,1), name='Interations'))
datatab.add_column(Column(data=Ha_arr[:,0], name='H_Alpha Mean'))
datatab.add_column(Column(data=Ha_arr[:,1], name='H_Alpha STD'))
datatab.add_column(Column(data=mod_arr[:,0], name='Modulation Mean'))
datatab.add_column(Column(data=mod_arr[:,1], name='Modulation STD'))
datatab.add_column(Column(data=t0_arr[:,0], name='Timescale Mean'))
datatab.add_column(Column(data=t0_arr[:,1], name='Timescale STD'))
datatab.add_column(Column(data=theta_arr[:,0], name='Theta Mean'))
datatab.add_column(Column(data=theta_arr[:,1], name='Theta STD'))
datatab.add_column(Column(data=areal_arr, name='Areal Sky Density'))
datatab.write(datafile, overwrite=True)
##RESUTLS FILE
resultsfile = outfile[:-4] + '_results' + outfile[-4:]
Stats = ['H_Alpha Mean','H_Alpha STD', 'Modulation Mean', 'Modulation STD', 'Timescale Mean (yrs)', 'Timescale STD (yrs)',
'Theta Mean (deg)', 'Theta STD (deg)', 'Areal Sky Desnity Mean', 'Areal Sky Desnity STD']
Stats_vals = [np.mean(Ha_arr[:,0]),np.std(Ha_arr[:,0]),np.mean(mod_arr[:,0]),np.std(mod_arr[:,0]),
np.mean(t0_arr[:,0]),np.std(t0_arr[:,0]), np.mean(theta_arr[:,0]),np.std(theta_arr[:,0]),
np.mean(areal_arr), np.std(areal_arr)]
Params = ['Avg # Sources', 'Avg Variables','Area (deg^2)', 'Lower Flux Limit (Jy)', 'Upper Flux Limit (Jy)', 'Observation time (days)', 'Frequency (MHz)', 'Modulation Cutoff']
Params.extend(["",""])
Param_vals =[ np.mean(NSources),area*np.mean(areal_arr), area, low_Flim, upp_Flim, obs_time/(24.*3600.), nu/(1E6), mod_cutoff]
Param_vals.extend(["", ""])
resultstab.add_column(Column(data=Stats, name='Statistics'))
resultstab.add_column(Column(data=Stats_vals, name='Results'))
resultstab.add_column(Column(data=Params, name='Parameters'))
resultstab.add_column(Column(data=Param_vals, name='Values'))
resultstab.write(resultsfile, overwrite=True)
if outfile == False:
print("Array: {0}".format(areal_arr))
print("Avg Areal: {0}".format(np.mean(areal_arr)))
print("Iterations: {0}".format(len(areal_arr)))
print("Num Sources: {0}".format(np.mean(NSources)))
print("Area: {0}".format(np.round(area,2)))
print("Num Variable: {0}".format(np.mean(areal_arr)*area))
print("% Variable: {0}".format(np.mean(areal_arr) * area*100./np.mean(NSources)))
print("Avg Modulation: {0}".format(np.round(np.mean(mod_arr),5)))
print("Avg TScatt: {0}".format(np.round(np.mean(theta_arr),5)))
print("Avg Source Size: {0}".format(np.round(np.mean(ssize),5)))
test()
| 22,316 | 39.948624 | 186 |
py
|
RISS19
|
RISS19-master/lib/__init__.py
|
__author__ = ['Paul Hancock', 'Elliott Charlton']
__version__ = 0.9
| 67 | 33 | 49 |
py
|
RISS19
|
RISS19-master/lib/Tau.py
|
from astropy.constants import kpc, c
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
import astropy.units as u
import numpy as np
import os
import logging
from scipy.special import gamma
class SM(object):
"""
:param ha_file:
:param err_file:
:param nu: freq in Hz
:param d: distance in kpc
:param v: in m/s
:param log:
"""
def __init__(self, ha_file, err_file=None, nu=185e6, log=None, d=1, v=10e3):
if log is None:
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
self.log = logging.getLogger("SM2017")
self.log.setLevel(logging.DEBUG)
else:
self.log=log
# define some of the constants that we need
# i'm saving these here to allow for different instances to have different values
self.nu = nu # Hz
self.kpc = kpc.value # in m
self.t4 = 0.8 # t/1e4 K
self.lo = 1e18/(self.kpc*1e-3) # pc
self.eps = 1
self.D = d # kpc - distance to the screen
self.c = c.value
self.beta = 11/3
self.re = 2.817e-15 # m
self.rf = np.sqrt(self.c * self.D * self.kpc / (2*np.pi*self.nu)) # Fresnel scale
self.v = v # relative velocity of source/observer in m/s
#self.log.debug("data:{0} err:{1}".format(ha_file,err_file))
self.file = ha_file
self.err_file = err_file
self._load_file()
#self.tau_file= tau_file
def _load_file(self):
self.hdu = fits.getheader(self.file, ignore_missing_end=True)
self.wcs = WCS(self.hdu)
self.data = fits.open(self.file, memmap=True, ignore_missing_end=True)[0].data
#self.tau = fits.open(self.tau_file, memmap=True, ignore_missing_end=True)[0].data
if self.err_file:
self.err_hdu = fits.getheader(self.err_file,ignore_missing_end=True)
self.err_wcs = WCS(self.err_hdu)
self.err_data = fits.open(self.err_file, memmap=True, ignore_missing_end=True)[0].data
else:
self.err_hud = self.err_wcs = self.err_data = None
return
def get_distance(self,position):
"""
:param position: sky position
:return: Distance to scattering screen in kpc
"""
gal_r = 40. # kpc
sun_r = 8. # kpc
gal_h = 1. # kpc
theta = position.galactic.l.radian # angle from the GC along the plane
phi = position.galactic.b.radian # angle from the GC perp to the plane
far_edge = sun_r*np.cos(theta) + np.sqrt(gal_r**2. - sun_r**2.*np.sin(theta)**2.)
top = (gal_h/2. / np.abs(np.sin(phi)))
mask = np.where(top>far_edge)
screen_dist = top
if len(mask[0])>=1:
screen_dist[mask] = far_edge[mask]
return screen_dist/2.
def get_halpha(self, position):
"""
Return the Halpha for a given location on the sky.
:param position: astropy.coordinates.SkyCoord
:return:
"""
# The coordinates we request need to be the same as that in the WCS header
# for the files in this repo, this currently means galactic coordinates.
x, y = zip(*self.wcs.all_world2pix(zip(position.galactic.l.degree, position.galactic.b.degree), 0))
x = np.int64(np.floor(x))
x = np.clip(x, 0, self.hdu['NAXIS1'])
y = np.int64(np.floor(y))
y = np.clip(y, 0, self.hdu['NAXIS2'])
iha = self.data[y, x]
err_iha = self.err_data[y, x]
return iha, err_iha
def get_tau(self, position):
"""
Return the Halpha for a given location on the sky.
:param position: astropy.coordinates.SkyCoord
:return:
"""
# The coordinates we request need to be the same as that in the WCS header
# for the files in this repo, this currently means galactic coordinates.
x, y = zip(*self.wcs.all_world2pix(zip(position.galactic.l.degree, position.galactic.b.degree), 0))
x = np.int64(np.floor(x))
x = np.clip(x, 0, self.hdu['NAXIS1'])
y = np.int64(np.floor(y))
y = np.clip(y, 0, self.hdu['NAXIS2'])
tau = self.data[y, x]
#m = tau ** (3. / 2.)
return tau
def get_m(self, position):
tau=self.get_tau(position)
m=tau**(3./2.)
return m
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord([0, 4, 8, 12, 16, 20]*u.hour, [-90, -45, 0, 45, 90, -26]*u.degree)
print pos
print sm.get_tau(pos)
| 4,586 | 32.977778 | 107 |
py
|
RISS19
|
RISS19-master/lib/tau.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from astropy.constants import kpc, c
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
import astropy.units as u
from astropy.table import Table, Column
import numpy as np
import os
import logging
from scipy.special import gamma
import matplotlib.pyplot as plt
SFG = 0
AGN = 1
seconds_per_year = 3600 * 24 * 365.25
alpha = 3.86
import warnings
warnings.filterwarnings("ignore")
class SM(object):
"""
:param ha_file:
:param err_file:
:param nu: freq in Hz
:param d: distance in kpc
:param v: in m/s
:param log:
"""
def __init__(self, ha_file, err_file=None, nu=185e6, log=None, v=10e3):
if log is None:
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
self.log = logging.getLogger("SM2017")
self.log.setLevel(logging.DEBUG)
else:
self.log = log
# define some of the constants that we need
# i'm saving these here to allow for different instances to have different values
self.nu = nu # Hz
self.kpc = kpc.value # in m
self.t4 = 0.8 # t/1e4 K
self.lo = 1e18 / (self.kpc * 1e-3) # pc
self.eps = 1
self.c = c.value
self.beta = 11 / 3.
self.seconds_per_year = 3600 * 24 * 365.25
self.re = 2.817e-15 # m
self.v = v # relative velocity of source/observer in m/s
# self.log.debug("data:{0} err:{1}".format(ha_file,err_file))
self.file = ha_file
self.err_file = err_file
self.tau_file = "data/tau_map_near.fits"
#self.tau_file = "data/ymw16tau_map_lin.fits"
self._load_file()
def _load_file(self):
self.hdu = fits.getheader(self.file, ignore_missing_end=True)
self.wcs = WCS(self.hdu)
self.data = fits.open(self.file, memmap=True, ignore_missing_end=True)[0].data
self.tau = fits.open(self.tau_file, memmap=True, ignore_missing_end=True)[0].data
if self.err_file:
self.err_hdu = fits.getheader(self.err_file, ignore_missing_end=True)
self.err_wcs = WCS(self.err_hdu)
self.err_data = fits.open(self.err_file, memmap=True, ignore_missing_end=True)[0].data
else:
self.err_hud = self.err_wcs = self.err_data = None
return
def get_tau(self, position):
"""
Return the Halpha for a given location on the sky.
:param position: astropy.coordinates.SkyCoord
:return:
"""
# The coordinates we request need to be the same as that in the WCS header
# for the files in this repo, this currently means galactic coordinates.
x, y = zip(*self.wcs.all_world2pix(zip(position.galactic.l.degree, position.galactic.b.degree), 0))
x = np.int64(np.floor(x))
x = np.clip(x, 0, self.hdu['NAXIS1'])
y = np.int64(np.floor(y))
y = np.clip(y, 0, self.hdu['NAXIS2'])
tau = self.tau[y, x]
alpha = 3.86
tau = (tau/1e3) * ((self.nu / 1e9) ** (-alpha)) #In seconds
err_tau = 0.1*tau
return tau, err_tau
def get_halpha(self, position):
"""
Return the Halpha for a given location on the sky.
:param position: astropy.coordinates.SkyCoord
:return:
"""
# The coordinates we request need to be the same as that in the WCS header
# for the files in this repo, this currently means galactic coordinates.
x, y = zip(*self.wcs.all_world2pix(zip(position.galactic.l.degree, position.galactic.b.degree), 0))
x = np.int64(np.floor(x))
x = np.clip(x, 0, self.hdu['NAXIS1'])
y = np.int64(np.floor(y))
y = np.clip(y, 0, self.hdu['NAXIS2'])
iha = self.data[y, x]
err_iha = self.err_data[y, x]
return iha, err_iha
def get_sm(self, position):
"""
Return the scintillation measure for a given location on the sky.
Units are kpc m^{-20/3}
:param position: astropy.coordinates.SkyCoord
:return:
"""
iha, err_iha = self.get_halpha(position)
# Cordes2002
sm2 = iha / 198 * self.t4 ** 0.9 * self.eps ** 2 / (1 + self.eps ** 2) * self.lo ** (-2 / 3)
err_sm2 = (err_iha / iha) * sm2
return sm2, err_sm2
def get_rdiff(self, position):
"""
Calculate the diffractive scale at the given sky coord
:param position: astropy.coordinates.SkyCoord
:return: parameter r_diff in m
"""
sm2, err_sm2 = self.get_sm(position)
# ^ units are kpc m^{-20/3}, but we want m^{-17/3} so we have to multiply by kpc below
# r_diff as per Mcquart & Koay 2013, eq 7a.
rdiff = (2 ** (2 - self.beta) * (
np.pi * self.re ** 2 * (self.c / self.nu) ** 2 * self.beta) * sm2 * self.kpc *
gamma(-self.beta / 2) / gamma(self.beta / 2)) ** (1 / (2 - self.beta))
err_rdiff = abs((1 / (2 - self.beta)) * (err_sm2 / sm2) * rdiff)
# rdiff = metres
return rdiff, err_rdiff
def get_rf(self, position):
rdiff, err_rdiff = self.get_rdiff(position)
tau, err_tau = self.get_tau(position)
rf = rdiff * (4.*np.pi*self.nu*tau) ** (1. / 2.)
err_rf = ((err_rdiff / rdiff)**2.0 + (1. / 2. * (err_tau / tau))**2.0) * rf
# rf = rdiff (m) x sqrt(tau(s) x nu(1/s))
# rf = metres
return rf, err_rf
def get_rref(self, position):
"""
Calculate the refractive scale at the given sky coord
:param position: astropy.coordinates.SkyCoord
:return: parameter r_ref in m
"""
# Narayan 1992 eq 4.2
rdiff, err_rdiff = self.get_rdiff(position)
rf, err_rf = self.get_rf(position)
rref = (rf ** 2.0) / rdiff
err_rref = np.sqrt((err_rdiff / rdiff)**2.0 + (2.0* (err_rf / rf))**2.0) * rref
# rref = metres
return rref, err_rref
def get_xi(self, position):
"""
calculate the parameter ξ for a given sky coord
Parameter is dimensionless
:param position: astropy.coordinates.SkyCoord
:return: parameter ξ
"""
rdiff, err_rdiff = self.get_rdiff(position)
# Narayan 1992, uses r_F/r_diff = \xi without explicitly stating that this is being done
# Compare Narayan 1992 eq 3.5 with Walker 1998 eq 6
rf, err_rf = self.get_rf(position)
xi = rf / rdiff
err_xi = np.sqrt((err_rdiff / rdiff)**2.0 + (err_rf / rf)**2.0) * xi
# xi = unitless
return xi, err_xi
def get_theta(self, position):
"""
calculate the size of the scattering disk for a given sky coord
:param position: astropy.coordinates.SkyCoord
:return: scattering disk in degrees
"""
# See Narayan 1992 eq 4.10 and discussion immediately prior
rdiff, err_rdiff = self.get_rdiff(position)
theta = np.degrees((self.c / (2.*np.pi*self.nu)) / rdiff)
err_theta = theta * np.degrees(err_rdiff / rdiff)
#theta = radians then converted to degrees
return theta, err_theta
def get_mold(self, position, ssize=0.):
"""
calculate the modulation index using parameter ξ for a given sky coord
:param position: astropy.coordinates.SkyCoord
:param stype: Ignored
:param ssize: source size in deg
:return:
"""
xi, err_xi = self.get_xi(position)
m = xi ** (-1. / 3.)
err_m = (1. / 3.) * (err_xi / xi) * m
theta, err_theta = self.get_theta(position)
large = np.where(ssize > theta)
if len(large[0]) >= 1:
m[large] *= (theta[large] / ssize[large]) ** (7. / 6.)
err_m[large] = np.sqrt(err_m[large]**2.0 +((7. / 6.) * (err_theta[large] / theta[large]))**2.0) *m[large]
return m, err_m
def get_m(self, position, ssize=0.):
xi, err_xi = self.get_xi(position)
m = xi ** (-1. / 3.)
err_m = (1. / 3.) * (err_xi / xi) * m
theta, err_theta = self.get_theta(position)
large = np.where(ssize > theta)
if len(large[0]) > 1:
m[large] =m[large] * (theta[large] / ssize[large]) ** (7. / 6.)
err_m[large] = np.sqrt((err_m[large]/m[large])**(2.0) + ((7. / 6.) * (err_theta[large] / theta[large]))**2.) * m[large]
# m = unitless
return m, err_m
def get_timescale(self, position, ssize=0.):
"""
calculate the refractive timescale using parameter ξ for a given sky coord
timescale is in years
:param position: astropy.coordinates.SkyCoord
:return:
"""
xi, err_xi = self.get_xi(position)
rf, err_rf = self.get_rf(position)
tref = rf * xi / self.v / self.seconds_per_year
err_tref = np.sqrt((err_xi / xi)**2.0 + (err_rf / rf)**2.0) * tref
# timescale is longer for 'large' sources
theta, err_theta = self.get_theta(position)
large = np.where(ssize > theta)
if len(large[0]) > 1:
tref[large] *= ssize / theta[large]
err_tref[large] = np.sqrt((err_tref[large]/tref[large])**2. + (err_theta[large] / theta[large])**2.0) * tref[large]
#tref = m/(m/s) = seconds converted into years
return tref, err_tref
def get_rms_var(self, position, ssize=0., nyears=1):
"""
calculate the expected modulation index observed when measured on nyears timescales
at a given sky coord
:param position: astropy.coordinates.SkyCoord
:param nyears: timescale of interest
:param ssize: source size in deg
:return:
"""
tref, err_tref = self.get_timescale(position, ssize=ssize)
m, err_m = self.get_m(position, ssize=ssize)
short = np.where(nyears < tref)
if (len(short[0]) > 1):
m[short] *= (nyears / tref[short])
err_m[short] = np.sqrt((err_m[short]/m[short])**2. + (err_tref[short] / tref[short])**2.) * m[short]
return m, err_m
def get_all(self, position, ssize=0):
Ha, err_Ha = self.get_halpha(position)
xi, err_xi = self.get_xi(position)
theta, err_theta = self.get_theta(position)
sm, err_sm = self.get_sm(position)
m, err_m = self.get_m(position, ssize)
t0, err_t0 = self.get_timescale(position)
rms, err_rms = self.get_rms_var(position, ssize)
tau, err_tau = self.get_tau(position)
return Ha, err_Ha, xi, err_xi, theta, err_theta, sm,err_sm, m, err_m, t0, err_t0, rms, err_rms, tau, err_tau
def test_all_params():
print("Testing with single positions")
#original map
#sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
#new map
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord([0], [0], unit=(u.hour, u.degree))
print("Hα = {0}".format(sm.get_halpha(pos)))
print("Tau = {0}".format(sm.get_tau(pos)))
print("ξ = {0}".format(sm.get_xi(pos)))
print("m = {0}".format(sm.get_m(pos)))
print("sm = {0} (m^-17/3)".format(sm.get_sm(pos)[0]*sm.kpc))
print("t0 = {0} (sec)".format(sm.get_timescale(pos)))
print("r_diff = {0} (m)".format(sm.get_rdiff(pos)))
print("r_ref = {0} (m)".format(sm.get_rref(pos)))
print("r_F = {0} (m)".format(sm.get_rf))
print("rms = {0}".format(sm.get_rms_var(pos)))
print("theta = {0} (rad)".format(np.radians(sm.get_theta(pos))))
gl=np.arange(0,360,1)
def test_multi_pos():
print("Testing with list of positions")
# original map
# sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
# new map
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord([0, 4, 8, 12, 16, 20]*u.hour, [-90, -45, 0, 45, 90, -26]*u.degree)
print("Hα = {0}".format(sm.get_halpha(pos)))
print("Tau = {0}".format(sm.get_tau(pos)))
print("ξ = {0}".format(sm.get_xi(pos)))
print("m = {0}".format(sm.get_m(pos)))
print("sm = {0}".format(sm.get_sm(pos)))
print("t0 = {0}".format(sm.get_timescale(pos)))
print("rms = {0}".format(sm.get_rms_var(pos)))
print("theta = {0}".format(sm.get_theta(pos)))
def write_multi_pos():
RA=[]
DEC=[]
mult=1./10.
for i in range(0, int(360*mult)):
for j in range(int(-90*mult), int(90*mult)):
#print(i)
RA.append(np.float(i*(1./mult)))
DEC.append(np.float(j*(1./mult)))
#RA=np.append(np.arange(0,360),np.arange(0,360))
#DEC=np.append(np.append(np.append(np.arange(-90,90),np.arange(-90,90)),np.arange(-90,90)),np.arange(-90,90))
#Original map
#sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
# new map
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord(RA * u.degree, DEC * u.degree)
mvar=int(1)
Ha,err_Ha=sm.get_halpha(pos)
mod,err_m=sm.get_m(pos)
#print(mod)
t0,err_t0=sm.get_timescale(pos)
theta,err_theta=sm.get_theta(pos)
tau,err_tau=sm.get_tau(pos)
datatab1 = Table()
datafile ='tau_skytest_m{0}.csv'.format(mvar)
#print(np.shape(err_tau),np.shape(Ha), np.shape(RA))
#print('mod_mean', np.mean(mod))
### DATA FILE
datatab1.add_column(Column(data=RA, name='RA'))
datatab1.add_column(Column(data=DEC, name='DEC'))
datatab1.add_column(Column(data=Ha, name='H_Alpha'))
datatab1.add_column(Column(data=err_Ha, name='H_Alpha err'))
datatab1.add_column(Column(data=tau, name='Tau'))
datatab1.add_column(Column(data=err_tau, name='Tau err'))
datatab1.add_column(Column(data=mod, name='Modulation'))
datatab1.add_column(Column(data=err_m, name='Modulation err'))
datatab1.add_column(Column(data=t0, name='Timescale'))
datatab1.add_column(Column(data=err_t0, name='Timescale err'))
datatab1.add_column(Column(data=theta, name='Theta'))
datatab1.add_column(Column(data=err_theta, name='Theta err'))
datatab1.write(datafile, overwrite=True)
def test_poss():
r=np.arange(0,360,1)
d=np.arange(-90,90,1)
RA=np.random.choice(r,size=10000)
DEC=np.random.choice(d,size=10000)
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord(RA * u.degree, DEC * u.degree)
print('rdiff',np.where(np.isnan(sm.get_rdiff(pos))==True))
""" Ha, err_Ha, xi, err_xi, theta, err_theta, sm,err_sm, m, err_m, t0, err_t0, rms, err_rms, tau, err_tau= sm.get_all(pos)
print('ha',np.where(np.isnan(Ha)==True))
print('tau',np.where(np.isnan(tau)==True))
print('xi',np.where(np.isnan(xi)==True))
print('m',np.where(np.isnan(m)==True))
#print('sm',np.where(np.isnan(theta)==True))
print('time',np.where(np.isnan(t0)==True))
print('rms',np.where(np.isnan(rms)==True))
print('theta',np.where(np.isnan(theta)==True))
"""
if __name__ == "__main__":
#test_all_params()
#test_multi_pos()
#write_multi_pos()
test_poss()
#sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
#sm2 = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
#pos = SkyCoord([0, 4, 8, 12, 16, 20]*u.hour, [-90, -45, 0, 45, 90, -26]*u.degree)
#pos = SkyCoord([0, 4, 8, 12, 16, 20]*u.hour, [-90, -45, 0, 45, 90, -26]*u.degree)
#pos2 = SkyCoord([0, 4, 8, 12, 16, 20]*u.degree, [-90, -45, 0, 45, 90, -26]*u.degree)
| 15,788 | 38.972152 | 131 |
py
|
RISS19
|
RISS19-master/lib/SM2017.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
NE2001 for extragalactic work.
"""
from astropy.constants import kpc, c, au
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
import astropy.units as u
import numpy as np
import os
import logging
from scipy.special import gamma
__author__ = ['Paul Hancock', 'Elliott Charlton']
__date__ = '2020-03-06'
SECONDS_PER_YEAR = 3600 * 24 * 365.25
class SM(object):
"""
:param ha_file:
:param err_file:
:param nu: freq in Hz
:param d: distance in kpc
:param v: in m/s
:param log:
"""
def __init__(self, ha_file, err_file=None, nu=185e6, log=None, d=None, v=10e3):
if log is None:
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
self.log = logging.getLogger("SM2017")
self.log.setLevel(logging.DEBUG)
else:
self.log=log
# define some of the constants that we need
# i'm saving these here to allow for different instances to have different values
self.nu = nu # Hz
self.kpc = kpc.value # in m
self.t4 = 0.8 # t/1e4 K
self.lo = 1e18/(self.kpc*1e-3) # 1e18m expressed in pc (also armstrong_electron_1985 !)
self.eps = 1
self.D = d # kpc - distance to the screen
self.c = c.value
self.beta = 11/3
self.re = 2.817e-15 # m
self.v = v # relative velocity of source/observer in m/s
self.file = ha_file
self.err_file = err_file
self._load_file()
def _load_file(self):
self.hdu = fits.getheader(self.file, ignore_missing_end=True)
self.wcs = WCS(self.hdu)
self.data = fits.open(self.file, memmap=True, ignore_missing_end=True)[0].data
if self.err_file:
self.err_hdu = fits.getheader(self.err_file, ignore_missing_end=True)
self.err_wcs = WCS(self.err_hdu)
self.err_data = fits.open(self.err_file, memmap=True, ignore_missing_end=True)[0].data
else:
self.err_hud = self.err_wcs = self.err_data = None
return
def get_distance(self, position):
"""
:param position: sky position
:return: Distance to scattering screen in kpc
"""
if self.D is not None:
return np.ones(np.shape(position))*self.D
# TODO: sort out gal_r and find a reference for it
gal_r = 16.2 # kpc
sun_r = 8.09 # kpc
gal_h = 1. # kpc
theta = position.galactic.l.radian # angle from the GC along the plane
phi = position.galactic.b.radian # angle from the GC perp to the plane
far_edge = sun_r*np.cos(theta) + np.sqrt(gal_r**2 - (sun_r*np.sin(theta))**2)
top = (gal_h/2.) / np.abs(np.sin(phi))
mask = np.where(top > far_edge)
screen_dist = top
screen_dist[mask] = far_edge[mask]
return screen_dist/2.
def get_rf(self, position):
"""
:param position: Sky position
:return: Fresnel scale in m
"""
return np.sqrt(self.c * self.get_distance(position) * self.kpc / (2 * np.pi * self.nu))
def get_halpha(self, position):
"""
Return the Halpha for a given location on the sky.
:param position: astropy.coordinates.SkyCoord
:return:
"""
# The coordinates we request need to be the same as that in the WCS header
# for the files in this repo, this currently means galactic coordinates.
x, y = zip(*self.wcs.all_world2pix(list(zip(position.galactic.l.degree, position.galactic.b.degree)), 0))
x = np.int64(np.floor(x))
x = np.clip(x, 0, self.hdu['NAXIS1'])
y = np.int64(np.floor(y))
y = np.clip(y, 0, self.hdu['NAXIS2'])
iha = self.data[y, x]
err_iha = self.err_data[y, x]
return iha, err_iha
def get_sm(self, position):
"""
Return the scintillation measure for a given location on the sky.
Units are kpc m^{-20/3}
:param position: astropy.coordinates.SkyCoord
:return:
"""
iha, err_iha = self.get_halpha(position)
# Cordes2002
sm2 = iha / 198 * self.t4 ** 0.9 * self.eps ** 2 / (1 + self.eps ** 2) * self.lo ** (-2 / 3)
err_sm2 = (err_iha / iha) * sm2
return sm2, err_sm2
def get_rdiff(self, position):
"""
Calculate the diffractive scale at the given sky coord
:param position: astropy.coordinates.SkyCoord
:return: parameter r_diff in m
"""
sm2, err_sm2 = self.get_sm(position)
# ^ units are kpc m^{-20/3}, but we want m^{-17/3} so we have to multiply by kpc below
# r_diff as per Mcquart & Koay 2013, eq 7a.
rdiff = (2 ** (2 - self.beta) * (
np.pi * self.re ** 2 * (self.c / self.nu) ** 2 * self.beta) * sm2 * self.kpc *
gamma(-self.beta / 2) / gamma(self.beta / 2)) ** (1 / (2 - self.beta))
err_rdiff = abs((1 / (2 - self.beta)) * (err_sm2 / sm2) * rdiff)
return rdiff, err_rdiff
def get_xi(self, position):
"""
calculate the parameter ξ for a given sky coord
Parameter is dimensionless
:param position: astropy.coordinates.SkyCoord
:return: parameter ξ
"""
rdiff, err_rdiff = self.get_rdiff(position)
# Narayan 1992, uses r_F/r_diff = \xi without explicitly stating that this is being done
# Compare Narayan 1992 eq 3.5 with Walker 1998 eq 6
rf = self.get_rf(position)
xi = rf / rdiff
err_xi = (err_rdiff/rdiff)*xi
return xi, err_xi
def get_theta(self, position):
"""
calculate the size of the scattering disk for a given sky coord
:param position: astropy.coordinates.SkyCoord
:return: scattering disk in degrees
"""
# See Narayan 1992 eq 4.10 and discussion immediately prior
rdiff, err_rdiff = self.get_rdiff(position)
theta = np.degrees((self.c/self.nu)/(2.* np.pi*rdiff))
err_theta = np.degrees(err_rdiff / rdiff)*theta
return theta, err_theta
def get_m(self, position, ssize=0):
"""
calculate the modulation index using parameter ξ for a given sky coord
:param position: astropy.coordinates.SkyCoord
:param ssize: source size in deg
:return:
"""
ssize = np.zeros(len(position)) + ssize
xi, err_xi = self.get_xi(position)
m = xi ** (-1. / 3.)
err_m = (1. / 3.) * (err_xi / xi) * m
theta, err_theta = self.get_theta(position)
mask = np.where(ssize > theta)
m[mask] = m[mask] * (theta[mask] / ssize[mask]) ** (7. / 6.)
err_m[mask] = np.sqrt((err_m[mask]/m[mask]) ** (2.0) + ((7. / 6.) * (err_theta[mask] / theta[mask])) ** 2.) * m[mask]
return m, err_m
def get_timescale(self, position, ssize=0):
"""
calculate the refractive timescale using parameter ξ for a given sky coord
timescale is in years
:param position: astropy.coordinates.SkyCoord
:param ssize: source size in deg
:return: timescale in years
"""
xi, err_xi = self.get_xi(position)
ssize = np.zeros(len(position)) + ssize
rf = self.get_rf(position)
tref = rf * xi / self.v / SECONDS_PER_YEAR
err_tref = (err_xi/xi)*tref
ssize = np.zeros(len(xi)) + ssize
# timescale is longer for 'large' sources
theta, err_theta = self.get_theta(position)
large = np.where(ssize > theta)
tref[large] = tref[large] * ssize[large] / theta[large]
err_tref[large] = tref[large] * np.sqrt((err_tref[large]/tref[large])**2. + (err_theta[large]/theta[large])**2.)
return tref, err_tref
def get_rms_var(self, position, ssize=0, nyears=1):
"""
calculate the expected modulation index observed when measured on nyears timescales
at a given sky coord
:param position: astropy.coordinates.SkyCoord
:param ssize: source size in deg
:param nyears: timescale of interest
:param ssize: source size in deg
:return: fractional variability
"""
ssize = np.zeros(len(position)) + ssize
tref, err_tref = self.get_timescale(position, ssize=ssize)
m, err_m = self.get_m(position, ssize=ssize)
short = np.where(nyears < tref)
m[short] *= (nyears / tref[short])
err_m[short] = np.sqrt((err_m[short]/m[short]) ** 2. + (err_tref[short] / tref[short]) ** 2.) * m[short]
return m, err_m
def get_vo(self, position):
"""
Calculate the transition frequency at a given sky location
:param position:
:return: Transition frequency in GHz
"""
sm2, _ = self.get_sm(position)
pow = (1 / (2 - self.beta))
A = (2 ** (2 - self.beta) * (np.pi * self.re ** 2 * self.beta) * sm2 * self.kpc *
gamma(-self.beta / 2) / gamma(self.beta / 2)) ** pow
D = self.get_distance(position)
vo = self.c * (np.sqrt(D*self.kpc/(2*np.pi)) / A)**(1/(0.5 - 2*pow))
return vo/1e9
def test_all_params():
print("Testing with single positions")
#original map
#sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
#new map
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord([0], [0], unit=(u.hour, u.degree))
print("Hα = {0}".format(sm.get_halpha(pos)))
print("ξ = {0}".format(sm.get_xi(pos)))
print("m = {0}".format(sm.get_m(pos)))
print("sm = {0} (m^-17/3)".format(sm.get_sm(pos)[0]*sm.kpc))
print("t0 = {0} (sec)".format(sm.get_timescale(pos)))
print("r_diff = {0} (m)".format(sm.get_rdiff(pos)))
print("r_ref = {0} (m)".format(sm.get_rref(pos)))
print("r_F = {0} (m)".format(sm.get_rf(pos)))
print("rms = {0}".format(sm.get_rms_var(pos)))
print("theta = {0} (rad)".format(np.radians(sm.get_theta(pos))))
print("nu_0 = {0} (GHz)".format(sm.get_vo(pos)))
print("Distance = {0}".format(sm.get_distance(pos)))
def test_multi_pos():
print("Testing with list of positions")
# original map
# sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
# new map
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord([0, 4, 8, 12, 16, 20]*u.hour, [-90, -45, 0, 45, 90, -26]*u.degree)
print("Hα = {0}".format(sm.get_halpha(pos)))
print("ξ = {0}".format(sm.get_xi(pos)))
print("m = {0}".format(sm.get_m(pos)))
print("sm = {0}".format(sm.get_sm(pos)))
print("t0 = {0}".format(sm.get_timescale(pos)))
print("rms = {0}".format(sm.get_rms_var(pos)))
print("theta = {0}".format(sm.get_theta(pos)))
print("Distance = {0}".format(sm.get_distance(pos)))
def write_multi_pos():
from astropy.table import Table, Column
RA=np.append(np.arange(0,360),np.arange(0,360))
DEC=np.append(np.append(np.append(np.arange(-90,90),np.arange(-90,90)),np.arange(-90,90)),np.arange(-90,90))
#Original map
#sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'), nu=1e8)
sm = SM(os.path.join('data', 'Ha_map_new.fits'), os.path.join('data', 'Ha_err_new.fits'), nu=1e8)
pos = SkyCoord(RA * u.degree, DEC * u.degree)
mvar=int(1)
Ha,err_Ha=sm.get_halpha(pos)
mod,err_m=sm.get_m(pos)
t0,err_t0=sm.get_timescale(pos)
theta,err_theta=sm.get_theta(pos)
#tau,err_tau=sm.get_tau(pos)
datatab1 = Table()
datafile ='SM2017_test_m{0}.csv'.format(mvar)
### DATA FILE
datatab1.add_column(Column(data=RA, name='RA'))
datatab1.add_column(Column(data=DEC, name='DEC'))
datatab1.add_column(Column(data=Ha, name='H_Alpha'))
datatab1.add_column(Column(data=err_Ha, name='H_Alpha err'))
#datatab1.add_column(Column(data=tau, name='Tau'))
#datatab1.add_column(Column(data=err_tau, name='Tau err'))
datatab1.add_column(Column(data=mod, name='Modulation'))
datatab1.add_column(Column(data=err_m, name='Modulation err'))
datatab1.add_column(Column(data=t0, name='Timescale'))
datatab1.add_column(Column(data=err_t0, name='Timescale err'))
datatab1.add_column(Column(data=theta, name='Theta'))
datatab1.add_column(Column(data=err_theta, name='Theta err'))
datatab1.write(datafile, overwrite=True)
def test_get_distance_empty_mask():
print("Testing get_distance where the mask is empty")
sm = SM(os.path.join('data', 'Halpha_map.fits'), os.path.join('data', 'Halpha_error.fits'))
pos = SkyCoord([0, 0, 0, 12, 16, 20]*u.degree, [0.5, 1, 1.2, 90, 90, -90]*u.degree, frame='galactic')
print("Hα = {0}".format(sm.get_halpha(pos)))
print("ξ = {0}".format(sm.get_xi(pos)))
print("m = {0}".format(sm.get_m(pos)))
print("sm = {0}".format(sm.get_sm(pos)))
print("t0 = {0}".format(sm.get_timescale(pos)))
print("rms = {0}".format(sm.get_rms_var(pos)))
print("theta = {0}".format(sm.get_theta(pos)))
print("Distance = {0}".format(sm.get_distance(pos)))
if __name__ == "__main__":
test_all_params()
test_multi_pos()
test_get_distance_empty_mask()
| 13,420 | 39.303303 | 125 |
py
|
qfactor
|
qfactor-master/setup.py
|
from os import path
from setuptools import setup, find_namespace_packages
# Use READEME for long description.
root_dir = path.abspath( path.dirname( __file__ ) )
readme_path = path.join( root_dir, "README.md" )
with open( readme_path, encoding = "utf-8" ) as f:
long_description = f.read()
# use requirements.txt for requirements.
req_path = path.join( root_dir, "requirements.txt" )
with open( req_path ) as f:
requirements = f.read().split( '\n' )
requirements.remove( '' )
setup( name = "qfactor",
version = "1.0.1",
description = "Quantum Fast Circuit Optimizer",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/edyounis/qfactor",
author = "Ed Younis",
author_email = "[email protected]",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Compilers"
],
keywords = "quantum circuit optimization optimizer",
project_urls = {
"Bug Tracker": "https://github.com/edyounis/qfactor/issues",
"Source Code": "https://github.com/edyounis/qfactor"
},
packages = find_namespace_packages( exclude = [ "tests*",
"examples*" ] ),
install_requires = requirements,
python_requires = ">=3.5, <4",
)
| 1,980 | 36.377358 | 71 |
py
|
qfactor
|
qfactor-master/benchmarks/fixed_time/param_problem.py
|
import numpy as np
import itertools as it
from scipy.stats import unitary_group
import qfactor
from qfactor import CnotGate, Gate, optimize
from qfactor.tensors import CircuitTensor
import qsearch
import qfast
from qfast.decomposition.models.fixedmodel import FixedModel
from qfast.decomposition.optimizers.lbfgs import LBFGSOptimizer
class ParamOptimizationProblem():
def __init__ ( self, gate_size, num_qubits, locations, native ):
self.gate_size = gate_size
self.num_qubits = num_qubits
self.locations = [ tuple( [ int( x ) for x in location ] )
for location in locations ]
self.native = native
self.target = CircuitTensor( np.identity( 2 ** self.num_qubits ),
self.get_qfactor() ).utry
self.data = {}
@staticmethod
def generate_circuit ( gate_size, num_qubits, length ):
native = gate_size <= 1
gate_size = 2 if native else gate_size
locations = list( it.combinations( range( num_qubits ), gate_size ) )
locations = np.array( locations )
idxs = np.random.choice( len( locations ), length, replace = True )
locations = locations[ idxs ]
return ParamOptimizationProblem( gate_size, num_qubits, locations, native )
def get_qfactor ( self ):
circuit = []
if self.native:
for pair in self.locations:
circuit.append( CnotGate( pair[0], pair[1] ) )
circuit.append( Gate( unitary_group.rvs( 2 ), ( pair[0], ) ) )
circuit.append( Gate( unitary_group.rvs( 2 ), ( pair[1], ) ) )
return circuit
for location in self.locations:
circuit.append( Gate( unitary_group.rvs( 2 ** self.gate_size ),
location ) )
return circuit
def get_qsearch ( self ):
if not self.native:
return None
steps = []
u30 = qsearch.gates.U3Gate()
u31 = qsearch.gates.U3Gate()
for pair in self.locations:
min_idx = min( pair )
max_idx = max( pair )
cnot = qsearch.gates.NonadjacentCNOTGate( max_idx - min_idx + 1,
pair[0] - min_idx,
pair[1] - min_idx )
if max_idx - min_idx == 1:
u_layer = qsearch.gates.KroneckerGate( u30, u31 )
else:
mid_layer = qsearch.gates.IdentityGate( max_idx - min_idx - 1 )
u_layer = qsearch.gates.KroneckerGate( u30, mid_layer, u31 )
p_layer = qsearch.gates.ProductGate( cnot, u_layer )
up = qsearch.gates.IdentityGate( min_idx )
down = qsearch.gates.IdentityGate( self.num_qubits - max_idx - 1 )
steps.append( qsearch.gates.KroneckerGate( up, p_layer, down ) )
return qsearch.gates.ProductGate( *steps )
def get_qfast ( self ):
return FixedModel( self.target, self.gate_size, [],
LBFGSOptimizer(), success_threshold = 1e-8,
structure = self.locations )
| 3,199 | 36.647059 | 83 |
py
|
qfactor
|
qfactor-master/benchmarks/fixed_time/gen_scripts.py
|
import os
import csv
script_fmt = ( "NUMQUBITS=%d\n"
"GATESIZE=%d\n"
"DEPTH=%d\n"
"TIMEOUT=%d\n"
"TESTQFACTOR=%s\n"
"python fixed_time_exp.py $NUMQUBITS $GATESIZE $DEPTH $POINTS $TIMEOUT\n" )
script_name_fmt = "run_%s_%dq_%dg_%dd_%ds.sh"
timeout = 7200
for numqubits in [4, 5, 6, 7, 8]:
for gatesize in [1, 2, 3]:
for depth in [5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]:
# Write qfactor script to file
script = script_fmt % ( numqubits, gatesize, depth, timeout, "--testqfactor" )
script_name = script_name_fmt % ( "qfactor", numqubits, gatesize, depth, timeout )
with open( script_name, "w" ) as f:
f.write( script )
# Make script executable
mode = os.stat( script_name ).st_mode
mode |= (mode & 0o444) >> 2
os.chmod( script_name, mode )
# Write comparison script to file
script = script_fmt % ( numqubits, gatesize, depth, timeout, "\"\"" )
programname = "qfast" if gatesize > 1 else "qsearch"
script_name = script_name_fmt % ( programname, numqubits, gatesize, depth, timeout )
with open( script_name, "w" ) as f:
f.write( script )
# Make script executable
mode = os.stat( script_name ).st_mode
mode |= (mode & 0o444) >> 2
os.chmod( script_name, mode )
| 1,526 | 35.357143 | 96 |
py
|
qfactor
|
qfactor-master/benchmarks/fixed_time/fixed_time_exp.py
|
import pickle
import signal
import argparse
import numpy as np
import itertools as it
from timeit import default_timer as timer
from scipy.stats import unitary_group
import qfactor
from qfactor import CnotGate, Gate, optimize
from qfactor.tensors import CircuitTensor
import qsearch
import qfast
from qfast.decomposition.models.fixedmodel import FixedModel
from qfast.decomposition.optimizers.lbfgs import LBFGSOptimizer
from param_problem import ParamOptimizationProblem
def qfactor_solve ( pt ):
dist = 1
pt.data[ "retries" ] = 0
pt.data[ "retry_times" ] = []
while dist > 1e-8:
start = timer()
pt.data[ "retries" ] += 1
res = optimize( pt.get_qfactor(), pt.target, min_iters = 0 )
dist = qfactor.get_distance( res, pt.target )
end = timer()
pt.data[ "retry_times" ].append( end - start )
def qfast_solve ( pt ):
dist = 1
pt.data[ "retries" ] = 0
pt.data[ "retry_times" ] = []
while dist > 1e-8:
start = timer()
pt.data[ "retries" ] += 1
model = pt.get_qfast()
model.optimize( fine = True )
dist = model.distance()
end = timer()
pt.data[ "retry_times" ].append( end - start )
def qsearch_solve ( pt ):
solver = qsearch.solvers.LeastSquares_Jac_SolverNative()
options = qsearch.options.Options()
options.target = pt.target
pt.data[ "retries" ] = 0
dist = 1
circ = pt.get_qsearch()
pt.data[ "retry_times" ] = []
while dist > 1e-8:
start = timer()
pt.data[ "retries" ] += 1
U, xopts = solver.solve_for_unitary( circ, options )
dist = 1 - ( np.abs( np.trace( pt.target.conj().T @ U ) ) / U.shape[0] )
end = timer()
pt.data[ "retry_times" ].append( end - start )
class TrialTerminatedException ( Exception ):
"""Custom timeout or interrupt Exception."""
def term_trial ( signal_number, frame ):
"""Terminate a Trial"""
msg = "Error"
if signal_number == signal.SIGINT:
msg = "Manually Interrupted"
if signal_number == signal.SIGALRM:
msg = "Timed-out"
print( msg )
raise TrialTerminatedException()
def run_benchmark ( num_qubits, gate_size, length, test_qfactor, timeout = 120*60 ):
# Register Signal Handlers
signal.signal( signal.SIGALRM, term_trial )
signal.signal( signal.SIGINT, term_trial )
# Set Seed
np.random.seed( 21211411 )
pts = []
# Solve Param Problems
num_solved = 0
signal.alarm( timeout )
try:
while True:
pt = ParamOptimizationProblem.generate_circuit( gate_size = gate_size,
num_qubits = num_qubits,
length = length )
pts.append( pt )
if test_qfactor:
qfactor_solve( pt )
elif gate_size == 1:
qsearch_solve( pt )
else:
qfast_solve( pt )
num_solved += 1
except TrialTerminatedException:
print( "Times Up! Solved: %d " % num_solved )
# Save results
filename = "%dq_%dg_%dd_%ds.dat" % ( num_qubits, gate_size, length, timeout )
if test_qfactor:
filename = "qfactor_" + filename
elif gate_size == 1:
filename = "qsearch_" + filename
else:
filename = "qfast_" + filename
with open( filename, "wb" ) as f:
pickle.dump( num_solved, f )
pickle.dump( pts, f )
if __name__ == "__main__":
description_info = "Generate and optimize random circuits."
parser = argparse.ArgumentParser( description = description_info )
parser.add_argument( "numqubits", type = int,
help = "Total number of qubits." )
parser.add_argument( "gatesize", type = int,
help = "Gate Size (1 for native gates)." )
parser.add_argument( "length", type = int,
help = "Length of ansatz." )
parser.add_argument( "timeout", type = int,
help = "Timeout in seconds for each trial." )
parser.add_argument( "--testqfactor", action = "store_true",
help = "Test Qfactor or the other stuff." )
args = parser.parse_args()
run_benchmark( args.numqubits, args.gatesize, args.length, args.testqfactor, args.timeout )
| 4,407 | 28.386667 | 95 |
py
|
qfactor
|
qfactor-master/benchmarks/fixed_num_problems/gen_scripts.py
|
import os
import csv
script_fmt = ( "NUMQUBITS=%d\n"
"GATESIZE=%d\n"
"DEPTH=%d\n"
"POINTS=%d\n"
"TIMEOUT=%d\n"
"python rand_seq.py $NUMQUBITS $GATESIZE $DEPTH $POINTS $TIMEOUT\n" )
script_name_fmt = "run_%dq_%dg_%dd_%dp.sh"
with open( "exp.csv", newline='' ) as csvfile:
r = csv.reader( csvfile )
for i, row in enumerate( r ):
# Skip Header
if i == 0:
continue
# Write script to file
script = script_fmt % tuple( [ int( x ) for x in row ] )
script_name = script_name_fmt % tuple( [ int( x ) for x in row[:-1] ] )
with open( script_name, "w" ) as f:
f.write( script )
# Make script executable
mode = os.stat( script_name ).st_mode
mode |= (mode & 0o444) >> 2
os.chmod( script_name, mode )
| 950 | 28.71875 | 84 |
py
|
qfactor
|
qfactor-master/benchmarks/fixed_num_problems/rand_seq.py
|
import pickle
import signal
import argparse
import numpy as np
import itertools as it
from timeit import default_timer as timer
from scipy.stats import unitary_group
import qfactor
from qfactor import CnotGate, Gate, optimize
from qfactor.tensors import CircuitTensor
import qsearch
import qfast
from qfast.decomposition.models.fixedmodel import FixedModel
from qfast.decomposition.optimizers.lbfgs import LBFGSOptimizer
np.random.seed( 21211411 )
class TrialTerminatedException ( Exception ):
"""Custom timeout or interrupt Exception."""
def term_trial ( signal_number, frame ):
"""Terminate a Trial"""
msg = "Error"
if signal_number == signal.SIGINT:
msg = "Manually Interrupted"
if signal_number == signal.SIGALRM:
msg = "Timed-out"
print( msg )
raise TrialTerminatedException()
class CircuitDataPoint():
def __init__ ( self, gate_size, num_qubits, locations, native ):
self.gate_size = gate_size
self.num_qubits = num_qubits
self.locations = [ tuple( [ int( x ) for x in location ] )
for location in locations ]
self.native = native
self.target = CircuitTensor( np.identity( 2 ** self.num_qubits ),
self.get_qfactor() ).utry
self.data = {}
@staticmethod
def generate_circuit ( gate_size, num_qubits, length ):
native = gate_size <= 1
gate_size = 2 if native else gate_size
locations = list( it.combinations( range( num_qubits ), 2 ) )
locations = np.array( locations )
idxs = np.random.choice( len( locations ), length, replace = True )
locations = locations[ idxs ]
return CircuitDataPoint( gate_size, num_qubits, locations, native )
def get_qfactor ( self ):
circuit = []
if self.native:
for pair in self.locations:
circuit.append( CnotGate( pair[0], pair[1] ) )
circuit.append( Gate( unitary_group.rvs( 2 ), ( pair[0], ) ) )
circuit.append( Gate( unitary_group.rvs( 2 ), ( pair[1], ) ) )
return circuit
for location in self.locations:
circuit.append( Gate( unitary_group.rvs( 2 ** self.gate_size ),
location ) )
return circuit
def get_qsearch ( self ):
if not self.native:
return None
steps = []
u30 = qsearch.gates.U3Gate()
u31 = qsearch.gates.U3Gate()
for pair in self.locations:
min_idx = min( pair )
max_idx = max( pair )
cnot = qsearch.gates.NonadjacentCNOTGate( max_idx - min_idx + 1,
pair[0] - min_idx,
pair[1] - min_idx )
if max_idx - min_idx == 1:
u_layer = qsearch.gates.KroneckerGate( u30, u31 )
else:
mid_layer = qsearch.gates.IdentityGate( max_idx - min_idx - 1 )
u_layer = qsearch.gates.KroneckerGate( u30, mid_layer, u31 )
p_layer = qsearch.gates.ProductGate( cnot, u_layer )
up = qsearch.gates.IdentityGate( min_idx )
down = qsearch.gates.IdentityGate( self.num_qubits - max_idx - 1 )
steps.append( qsearch.gates.KroneckerGate( up, p_layer, down ) )
return qsearch.gates.ProductGate( *steps )
def get_qfast ( self ):
return FixedModel( self.target, self.gate_size, [],
LBFGSOptimizer(), success_threshold = 1e-8,
structure = self.locations )
def count_qfactor_tries ( self ):
dist = 1
tries = 0
self.data[ "qfactor_retry_times" ] = []
while dist > 1e-8:
start = timer()
tries += 1
res = optimize( self.get_qfactor(), self.target, min_iters = 0 )
dist = qfactor.get_distance( res, self.target )
end = timer()
self.data[ "qfactor_retry_times" ].append( end - start )
self.data[ "qfactor_retries" ] = tries
def count_qfast_tries ( self ):
dist = 1
tries = 0
self.data[ "qfast_retry_times" ] = []
while dist > 1e-8:
start = timer()
tries += 1
model = self.get_qfast()
model.optimize( fine = True )
dist = model.distance()
end = timer()
self.data[ "qfast_retry_times" ].append( end - start )
self.data[ "qfast_retries" ] = tries
def count_qsearch_tries ( self ):
solver = qsearch.solvers.LeastSquares_Jac_SolverNative()
options = qsearch.options.Options()
options.target = self.target
tries = 0
dist = 1
circ = self.get_qsearch()
self.data[ "qsearch_retry_times" ] = []
while dist > 1e-8:
start = timer()
tries += 1
U, xopts = solver.solve_for_unitary( circ, options )
dist = 1 - ( np.abs( np.trace( self.target.conj().T @ U ) ) / U.shape[0] )
end = timer()
self.data[ "qsearch_retry_times" ].append( end - start )
self.data[ "qsearch_retries" ] = tries
def run_benchmark ( gate_size, length, num_qubits, num_circs, timeout = 60*60 ):
# Register Signal Handlers
signal.signal( signal.SIGALRM, term_trial )
signal.signal( signal.SIGINT, term_trial )
# Generate Circuits
pts = [ CircuitDataPoint.generate_circuit( gate_size = gate_size,
num_qubits = num_qubits,
length = length )
for x in range( num_circs ) ]
# Optimize Circuits
for pt in pts:
try:
# Count Qfactor Tries
signal.alarm( timeout )
pt.count_qfactor_tries()
# Count either QFAST or QSearch
signal.alarm( timeout )
if gate_size == 1:
pt.count_qsearch_tries()
else:
pt.count_qfast_tries()
except TrialTerminatedException:
pt.data[ "failed" ] = True
print( pt.data )
filename = "%dq_%dg_%dd_%dp.dat" % ( num_qubits, gate_size,
length, num_circs )
with open( filename, "wb" ) as f:
pickle.dump( pts, f )
if __name__ == "__main__":
description_info = "Generate and optimize random circuits."
parser = argparse.ArgumentParser( description = description_info )
parser.add_argument( "numqubits", type = int,
help = "Total number of qubits." )
parser.add_argument( "gatesize", type = int,
help = "Gate Size (1 for native gates)." )
parser.add_argument( "length", type = int,
help = "Length of ansatz." )
parser.add_argument( "numcircs", type = int,
help = "Total number of circuits or data points." )
parser.add_argument( "timeout", type = int,
help = "Timeout in seconds for each trial." )
args = parser.parse_args()
run_benchmark( args.gatesize, args.length, args.numqubits, args.numcircs, args.timeout )
| 7,311 | 32.695853 | 92 |
py
|
qfactor
|
qfactor-master/examples/param_extraction.py
|
"""
Optimize a 3-qubit circuit to be a toffoli gate.
In this example, we use a more native structure and extract
angles for RzGates from the optimized result.
"""
import numpy as np
from qfactor import Gate, optimize, CnotGate, RzGate
# The next two lines start qfactor's logger.
import logging
logging.getLogger( "qfactor" ).setLevel( logging.INFO )
# We will optimize towards the toffoli unitary.
toffoli = np.array( [ [ 1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 1, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 1, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 1, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 1 ],
[ 0, 0, 0, 0, 0, 0, 1, 0 ] ] )
# Start with the circuit structure
# and an initial guess for the gate's parameters.
# Here, the hadamards and cnots are fixed
# and the RzGates start with theta equal to a random number.
H = np.sqrt(1/2) * np.array( [ [ 1, 1 ],
[ 1, -1 ] ] )
circuit = [ Gate( H, (2,), fixed = True ),
CnotGate( 1, 2 ),
RzGate( np.random.random(), 2 ),
CnotGate( 0, 2 ),
RzGate( np.random.random(), 2 ),
CnotGate( 1, 2 ),
RzGate( np.random.random(), 2 ),
CnotGate( 0, 2 ),
RzGate( np.random.random(), 1 ),
RzGate( np.random.random(), 2 ),
CnotGate( 0, 1 ),
Gate( H, (2,), fixed = True ),
RzGate( np.random.random(), 0 ),
RzGate( np.random.random(), 1 ),
CnotGate( 0, 1 ) ]
# Call the optimize function
ans = optimize( circuit, toffoli, # <--- These are the only required args
diff_tol_a = 1e-12, # Stopping criteria for distance change
diff_tol_r = 1e-6, # Relative criteria for distance change
dist_tol = 1e-12, # Stopping criteria for distance
max_iters = 100000, # Maximum number of iterations
min_iters = 1000, # Minimum number of iterations
slowdown_factor = 0 ) # Larger numbers slowdown optimization
# to avoid local minima
# The result "ans" is another circuit object (list[Gate])
# with the gate's unitaries changed from the input circuit.
print( ans )
# If you would like to convert the unitary operations to native gates,
# you should use the KAK decomposition for 2 qubit unitaries, or
# qsearch or qfast for 3+ qubit unitaries.
| 2,589 | 36 | 77 |
py
|
qfactor
|
qfactor-master/examples/toffoli_synthesis.py
|
"""Optimize a 3-qubit circuit to be a toffoli gate."""
import numpy as np
from scipy.stats import unitary_group
from qfactor import Gate, optimize, get_distance
# The next two lines start qfactor's logger.
import logging
logging.getLogger( "qfactor" ).setLevel( logging.INFO )
# We will optimize towards the toffoli unitary.
toffoli = np.array( [ [ 1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 1, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 1, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 1, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 1 ],
[ 0, 0, 0, 0, 0, 0, 1, 0 ] ] )
# Start with the circuit structure
# and an initial guess for the gate's unitaries.
# Here we use randomly generated unitaries for initial guess.
circuit = [ Gate( unitary_group.rvs(4), (1, 2) ),
Gate( unitary_group.rvs(4), (0, 2) ),
Gate( unitary_group.rvs(4), (1, 2) ),
Gate( unitary_group.rvs(4), (0, 2) ),
Gate( unitary_group.rvs(4), (0, 1) ) ]
# Note: the Gate object also has an optional boolean parameter "fixed"
# If "fixed" is set to true, that gate's unitary will not change.
# Call the optimize function
ans = optimize( circuit, toffoli, # <--- These are the only required args
diff_tol_a = 1e-12, # Stopping criteria for distance change
diff_tol_r = 1e-6, # Relative criteria for distance change
dist_tol = 1e-12, # Stopping criteria for distance
max_iters = 100000, # Maximum number of iterations
min_iters = 1000, # Minimum number of iterations
slowdown_factor = 0 ) # Larger numbers slowdown optimization
# to avoid local minima
# The result "ans" is another circuit object (list[Gate])
# with the gate's unitaries changed from the input circuit.
print( "Circuit: ", ans )
print( "Final Distance: ", get_distance( ans, toffoli ) )
# If you would like to convert the unitary operations to native gates,
# you should use the KAK decomposition for 2 qubit unitaries, or
# qsearch or qfast for 3+ qubit unitaries.
| 2,262 | 40.145455 | 77 |
py
|
qfactor
|
qfactor-master/qfactor/utils.py
|
"""This module contains various utility functions."""
import logging
import numpy as np
logger = logging.getLogger( "qfactor" )
def get_num_qubits ( M ):
"""Returns the size of the square matrix, M, in qubits."""
if not is_square_matrix( M ):
raise TypeError( "Invalid matrix." )
return int( np.log2( len( M ) ) )
def is_valid_location ( location, num_qubits = None ):
"""
Checks if the location is valid.
Args:
location (Tuple[int]): The location to check.
num_qubits (int or None): The total number of qubits. All qubits
should be less than this. If None, don't check.
Returns:
(bool): Valid or not
"""
if not isinstance( location, tuple ):
logger.debug( "Location is not a tuple." )
return False
if not all( [ isinstance( qubit, int ) for qubit in location ] ):
logger.debug( "Location is not a tuple of ints." )
return False
if len( location ) != len( set( location ) ):
logger.debug( "Location has duplicates." )
return False
if not all( [ x == y for x, y in zip( location, sorted( location ) ) ] ):
logger.debug( "Location not sorted." )
return False
if num_qubits is not None:
if not all( [ qubit < num_qubits for qubit in location ] ):
logger.debug( "Location has an invalid qubit." )
return False
return True
def is_matrix ( M ):
"""Checks if M is a matrix."""
if not isinstance( M, np.ndarray ):
logger.debug( "M is not an numpy array." )
return False
if len( M.shape ) != 2:
logger.debug( "M is not an 2-dimensional array." )
return False
return True
def is_square_matrix ( M ):
"""Checks if M is a square matrix."""
if not is_matrix( M ):
return False
if M.shape[0] != M.shape[1]:
return False
return True
def is_unitary ( U, tol = 1e-12 ):
"""Checks if U is a unitary matrix."""
if not is_square_matrix( U ):
return False
X = U @ U.conj().T
Y = U.conj().T @ U
I = np.identity( X.shape[0] )
if not np.allclose( X, I, rtol = 0, atol = tol ):
if logger.isEnabledFor( logging.DEBUG ):
norm = np.linalg.norm( X - I )
logger.debug( "Failed unitary condition, ||UU^d - I|| = %e" % norm )
return False
if not np.allclose( Y, I, rtol = 0, atol = tol ):
if logger.isEnabledFor( logging.DEBUG ):
norm = np.linalg.norm( Y - I )
logger.debug( "Failed unitary condition, ||U^dU - I|| = %e" % norm )
return False
return True
| 2,665 | 23.685185 | 80 |
py
|
qfactor
|
qfactor-master/qfactor/optimize.py
|
"""This module implements the main optimize function."""
import logging
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
from qfactor.tensors import CircuitTensor
logger = logging.getLogger( "qfactor" )
def optimize ( circuit, target, diff_tol_a = 1e-12, diff_tol_r = 1e-6,
dist_tol = 1e-10, max_iters = 100000, min_iters = 1000,
slowdown_factor = 0.0 ):
"""
Optimize distance between circuit and target unitary.
Args:
circuit (list[Gate]): The circuit to optimize.
target (np.ndarray): The target unitary matrix.
diff_tol_a (float): Terminate when the difference in distance
between iterations is less than this threshold.
diff_tol_r (float): Terminate when the relative difference in
distance between iterations is iless than this threshold:
|c1 - c2| <= diff_tol_a + diff_tol_r * abs( c1 )
dist_tol (float): Terminate when the distance is less than
this threshold.
max_iters (int): Maximum number of iterations.
min_iters (int): Minimum number of iterations.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
Returns:
(list[Gate]): The optimized circuit.
"""
if not isinstance( circuit, list ):
raise TypeError( "The circuit argument is not a list of gates." )
if not all( [ isinstance( g, Gate ) for g in circuit ] ):
raise TypeError( "The circuit argument is not a list of gates." )
if not utils.is_unitary( target ):
raise TypeError( "The target matrix is not unitary." )
if not isinstance( diff_tol_a, float ) or diff_tol_a > 0.5:
raise TypeError( "Invalid absolute difference threshold." )
if not isinstance( diff_tol_r, float ) or diff_tol_r > 0.5:
raise TypeError( "Invalid relative difference threshold." )
if not isinstance( dist_tol, float ) or dist_tol > 0.5:
raise TypeError( "Invalid distance threshold." )
if not isinstance( max_iters, int ) or max_iters < 0:
raise TypeError( "Invalid maximum number of iterations." )
if not isinstance( min_iters, int ) or min_iters < 0:
raise TypeError( "Invalid minimum number of iterations." )
if slowdown_factor < 0 or slowdown_factor >= 1:
raise TypeError( "Slowdown factor is a positive number less than 1." )
ct = CircuitTensor( target, circuit )
c1 = 0
c2 = 1
it = 0
while True:
# Termination conditions
if it > min_iters:
if np.abs(c1 - c2) <= diff_tol_a + diff_tol_r * np.abs( c1 ):
diff = np.abs(c1 - c2)
logger.info( f"Terminated: |c1 - c2| = {diff}"
" <= diff_tol_a + diff_tol_r * |c1|." )
break;
if it > max_iters:
logger.info( "Terminated: iteration limit reached." )
break;
it += 1
# from right to left
for k in range( len( circuit ) ):
rk = len( circuit ) - 1 - k
# Remove current gate from right of circuit tensor
ct.apply_right( circuit[rk], inverse = True )
# Update current gate
if not circuit[rk].fixed:
env = ct.calc_env_matrix( circuit[rk].location )
circuit[rk].update( env, slowdown_factor )
# Add updated gate to left of circuit tensor
ct.apply_left( circuit[rk] )
# from left to right
for k in range( len( circuit ) ):
# Remove current gate from left of circuit tensor
ct.apply_left( circuit[k], inverse = True )
# Update current gate
if not circuit[k].fixed:
env = ct.calc_env_matrix( circuit[k].location )
circuit[k].update( env, slowdown_factor )
# Add updated gate to right of circuit tensor
ct.apply_right( circuit[k] )
c2 = c1
c1 = np.abs( np.trace( ct.utry ) )
c1 = 1 - ( c1 / ( 2 ** ct.num_qubits ) )
if c1 <= dist_tol:
logger.info( f"Terminated: c1 = {c1} <= dist_tol." )
return circuit
if it % 100 == 0:
logger.info( f"iteration: {it}, cost: {c1}" )
if it % 40 == 0:
ct.reinitialize()
return circuit
def get_distance ( circuit, target ):
"""
Returns the distance between the circuit and the unitary target.
Args:
circuit (list[Gate]): The circuit.
target (np.ndarray): The unitary target.
Returns:
(float): The distance between the circuit and unitary target.
"""
ct = CircuitTensor( target, circuit )
num_qubits = utils.get_num_qubits( target )
return 1 - ( np.abs( np.trace( ct.utry ) ) / ( 2 ** num_qubits ) )
| 4,932 | 29.83125 | 78 |
py
|
qfactor
|
qfactor-master/qfactor/__init__.py
|
"""
Quantum Fast Circuit Optimizer (qFactor)
This package provides functions to optimize the unitaries in a circuit
with respect to some target unitary matrix.
"""
# Initialize Logging
import logging
_logger = logging.getLogger( "qfactor" )
_logger.setLevel(logging.CRITICAL)
_handler = logging.StreamHandler()
_handler.setLevel( logging.DEBUG )
_fmt = "%(levelname)-8s | %(message)s"
_formatter = logging.Formatter( _fmt )
_handler.setFormatter( _formatter )
_logger.addHandler( _handler )
# Main API
from .gates import Gate, RxGate, RyGate, RzGate, CnotGate
from .optimize import optimize, get_distance
| 609 | 25.521739 | 70 |
py
|
qfactor
|
qfactor-master/qfactor/tensors.py
|
"""This module implements the CircuitTensor class."""
import logging
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
logger = logging.getLogger( "qfactor" )
class CircuitTensor():
"""A CircuitTensor tracks an entire circuit as a tensor."""
def __init__ ( self, utry_target, gate_list ):
"""
CircuitTensor Constructor
Args:
utry_target (np.ndarray): Unitary target matrix
gate_list (list[Gate]): The circuit's gate list.
"""
if not utils.is_unitary( utry_target ):
raise TypeError( "Specified target matrix is not unitary." )
if not isinstance( gate_list, list ):
raise TypeError( "Gate list is not a list." )
if not all( [ isinstance( gate, Gate ) for gate in gate_list ] ):
raise TypeError( "Gate list contains non-gate objects." )
self.utry_target = utry_target
self.num_qubits = utils.get_num_qubits( self.utry_target )
if not all( [ utils.is_valid_location( gate.location, self.num_qubits )
for gate in gate_list ] ):
raise ValueError( "Gate location mismatch with circuit tensor." )
self.gate_list = gate_list
self.reinitialize()
def reinitialize ( self ):
"""Reconstruct the circuit tensor."""
logger.debug( "Reinitializing CircuitTensor" )
self.tensor = self.utry_target.conj().T
self.tensor = self.tensor.reshape( [2] * 2 * self.num_qubits )
for gate in self.gate_list:
self.apply_right( gate )
@property
def utry ( self ):
"""Calculates this circuit tensor's unitary representation."""
num_elems = 2 ** self.num_qubits
utry = self.tensor.reshape( ( num_elems, num_elems ) )
# paulis = pauli_expansion( unitary_log_no_i( utry, tol = 1e-12 ) )
# print( paulis[0] )
return utry
def apply_right ( self, gate, inverse = False ):
"""
Apply the specified gate on the right of the circuit.
.-----. .------.
0 -| |---| |-
1 -| |---| gate |-
. . '------'
. .
. .
n-1 -| |------------
'-----'
Note that apply the gate on the right is equivalent to
multiplying on the gate on the left of the tensor.
This operation is performed using tensor contraction.
Args:
gate (Gate): The gate to apply.
inverse (bool): If true, apply the inverse of gate.
"""
left_perm = list( gate.location )
mid_perm = [ x for x in range( self.num_qubits ) if x not in gate.location ]
right_perm = [ x + self.num_qubits for x in range( self.num_qubits ) ]
utry = gate.utry.conj().T if inverse else gate.utry
perm = left_perm + mid_perm + right_perm
self.tensor = self.tensor.transpose( perm )
self.tensor = self.tensor.reshape( ( 2 ** len( left_perm ), -1 ) )
self.tensor = utry @ self.tensor
self.tensor = self.tensor.reshape( [2] * 2 * self.num_qubits )
inv_perm = np.argsort( perm )
self.tensor = self.tensor.transpose( inv_perm )
def apply_left ( self, gate, inverse = False ):
"""
Apply the specified gate on the left of the circuit.
.------. .-----.
0 -| |---| |-
1 -| gate |---| |-
'------' . .
. .
. .
n-1 ------------| |-
'-----'
Note that apply the gate on the left is equivalent to
multiplying on the gate on the right of the tensor.
This operation is performed using tensor contraction.
Args:
gate (Gate): The gate to apply.
inverse (bool): If true, apply the inverse of gate.
"""
left_perm = list( range( self.num_qubits ) )
mid_perm = [ x + self.num_qubits for x in left_perm if x not in gate.location ]
right_perm = [ x + self.num_qubits for x in gate.location ]
utry = gate.utry.conj().T if inverse else gate.utry
perm = left_perm + mid_perm + right_perm
self.tensor = self.tensor.transpose( perm )
self.tensor = self.tensor.reshape( ( -1, 2 ** len( right_perm ) ) )
self.tensor = self.tensor @ utry
self.tensor = self.tensor.reshape( [2] * 2 * self.num_qubits )
inv_perm = np.argsort( perm )
self.tensor = self.tensor.transpose( inv_perm )
def calc_env_matrix ( self, location ):
"""
Calculates the environmental matrix of the tensor with
respect to the specified location.
Args:
location (iterable): Calculate the environment for this
set of qubits.
Returns:
(np.ndarray): The environmental matrix.
"""
left_perm = list( range( self.num_qubits ) )
left_perm = [ x for x in left_perm if x not in location ]
left_perm = left_perm + [ x + self.num_qubits for x in left_perm ]
right_perm = list( location ) + [ x + self.num_qubits
for x in location ]
perm = left_perm + right_perm
a = np.transpose( self.tensor, perm )
a = np.reshape( a, ( 2 ** ( self.num_qubits - len( location ) ),
2 ** ( self.num_qubits - len( location ) ),
2 ** len( location ),
2 ** len( location ) ) )
return np.trace( a )
| 5,693 | 32.892857 | 87 |
py
|
qfactor
|
qfactor-master/qfactor/gates/xx.py
|
"""This module implements the XXGate class."""
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
class XXGate ( Gate ):
"""A XXGate is a Quantum XX-rotation applied to a qubit."""
def __init__ ( self, theta, location, fixed = False, check_params = True ):
"""
Gate Constructor
Args:
theta (float): The gate's angle of rotation.
location (tuple[int]): The qubits this gate is applied to.
fixed (bool): True if the gate's unitary operation is immutable.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not isinstance( theta, float ):
raise TypeError( "Invalid theta angle, not a float." )
if not isinstance( location, tuple ):
raise TypeError( "Specified location is not valid." )
if not utils.is_valid_location( location ):
raise TypeError( "Specified location is not valid." )
if len( location ) != 2:
raise TypeError( "Specified location is not valid." )
if not isinstance( fixed, bool ):
raise TypeError( "Invalid fixed parameter." )
self.theta = theta
self.location = location
self.gate_size = len( self.location )
self.fixed = fixed
@property
def utry ( self ):
cos = np.cos( self.theta / 2 )
isin = -1j * np.sin( self.theta / 2 )
return np.array( [ [ cos, 0, 0, isin ],
[ 0, cos, isin, 0 ],
[ 0, isin, cos, 0 ],
[ isin, 0, 0, cos ] ] )
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
This method updates this gate's unitary to maximize:
Re( Tr( env * self.utry ) )
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
if self.fixed:
return
a = np.real( env[0, 0] + env[1, 1] + env[2, 2] + env[3, 3] )
b = np.imag( env[0, 3] + env[1, 2] + env[2, 1] + env[3, 0] )
new_theta = np.arccos( a / np.sqrt( a ** 2 + b ** 2 ) )
new_theta *= -2 if b < 0 else 2
self.theta = ( ( 1 - slowdown_factor ) * new_theta
+ slowdown_factor * self.theta )
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": XX(" \
+ str( self.theta ) \
+ ")"
| 2,813 | 30.266667 | 79 |
py
|
qfactor
|
qfactor-master/qfactor/gates/rz.py
|
"""This module implements the RzGate class."""
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
class RzGate ( Gate ):
"""A RzGate is a Quantum Z-rotation applied to a qubit."""
def __init__ ( self, theta, location, fixed = False, check_params = True ):
"""
Gate Constructor
Args:
theta (float): The gate's angle of rotation.
location (int or tuple[int]): The qubit this gate is applied to.
fixed (bool): True if the gate's unitary operation is immutable.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not isinstance( theta, float ):
raise TypeError( "Invalid theta angle, not a float." )
if not isinstance( location, int ):
if ( not utils.is_valid_location( location )
or len( location ) != 1 ):
raise TypeError( "Specified location is not valid." )
elif location < 0:
raise ValueError( "Invalid location value." )
if not isinstance( fixed, bool ):
raise TypeError( "Invalid fixed parameter." )
self.theta = theta
if isinstance( location, int ):
self.location = tuple( [ location ] )
else:
self.location = location
self.gate_size = len( self.location )
self.fixed = fixed
@property
def utry ( self ):
return np.array( [ [ 1, 0 ],
[ 0, np.exp( 1j * self.theta ) ] ] )
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
This method updates this gate's unitary to maximize:
Re( Tr( env * self.utry ) )
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
if self.fixed:
return
a = np.real( env[1, 1] )
b = np.imag( env[1, 1] )
arctan = np.arctan( b / a )
if a < 0 and b > 0:
arctan += np.pi
elif a < 0 and b < 0:
arctan -= np.pi
new_theta = -arctan
self.theta = ( ( 1 - slowdown_factor ) * new_theta
+ slowdown_factor * self.theta )
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": Rz(" \
+ str( self.theta ) \
+ ")"
| 2,718 | 28.236559 | 79 |
py
|
qfactor
|
qfactor-master/qfactor/gates/rx.py
|
"""This module implements the RxGate class."""
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
class RxGate ( Gate ):
"""A RxGate is a Quantum X-rotation applied to a qubit."""
def __init__ ( self, theta, location, fixed = False, check_params = True ):
"""
Gate Constructor
Args:
theta (float): The gate's angle of rotation.
location (int or tuple[int]): The qubit this gate is applied to.
fixed (bool): True if the gate's unitary operation is immutable.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not isinstance( theta, float ):
raise TypeError( "Invalid theta angle, not a float." )
if not isinstance( location, int ):
if ( not utils.is_valid_location( location )
or len( location ) != 1 ):
raise TypeError( "Specified location is not valid." )
elif location < 0:
raise ValueError( "Invalid location value." )
if not isinstance( fixed, bool ):
raise TypeError( "Invalid fixed parameter." )
self.theta = theta
if isinstance( location, int ):
self.location = tuple( [ location ] )
else:
self.location = location
self.gate_size = len( self.location )
self.fixed = fixed
@property
def utry ( self ):
cos = np.cos( self.theta / 2 )
sin = np.sin( self.theta / 2 )
return np.array( [ [ cos, -1j * sin ],
[ -1j * sin, cos ] ] )
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
This method updates this gate's unitary to maximize:
Re( Tr( env * self.utry ) )
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
if self.fixed:
return
a = np.real( env[0, 0] + env[1, 1] )
b = np.imag( env[0, 1] + env[1, 0] )
new_theta = 2 * np.arccos( a / np.sqrt( a ** 2 + b ** 2 ) )
new_theta *= -1 if b < 0 else 1
self.theta = ( ( 1 - slowdown_factor ) * new_theta
+ slowdown_factor * self.theta )
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": Rx(" \
+ str( self.theta ) \
+ ")"
| 2,744 | 29.842697 | 79 |
py
|
qfactor
|
qfactor-master/qfactor/gates/cnot.py
|
"""This module implements the CnotGate class."""
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
class CnotGate ( Gate ):
"""A CnotGate is a controlled-not applied to a pair of qubit."""
def __init__ ( self, control, target, check_params = True ):
"""
Gate Constructor
Args:
control (int): The index of the control qubit.
target (int): The index of the target qubit.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not isinstance( control, int ) or control < 0:
raise TypeError( "Invalid control qubit." )
if not isinstance( target, int ) or target < 0:
raise TypeError( "Invalid target qubit." )
self.utry = np.array( [ [ 1, 0, 0, 0 ],
[ 0, 1, 0, 0 ],
[ 0, 0, 0, 1 ],
[ 0, 0, 1, 0 ] ] )
self.location = tuple( [ control, target ] )
self.gate_size = 2
self.fixed = True
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
Note this gate is fixed and never updates. This
function is implemented to satisfy the API.
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
return
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": CNOT"
| 1,760 | 27.868852 | 72 |
py
|
qfactor
|
qfactor-master/qfactor/gates/__init__.py
|
from .gate import Gate
from .rx import RxGate
from .ry import RyGate
from .rz import RzGate
from .xx import XXGate
from .cnot import CnotGate
| 143 | 17 | 26 |
py
|
qfactor
|
qfactor-master/qfactor/gates/gate.py
|
"""This module implements the Gate class."""
import numpy as np
import scipy.linalg as la
from qfactor import utils
class Gate():
"""A Gate is a unitary operation applied to a set of qubits."""
def __init__ ( self, utry, location, fixed = False, check_params = True ):
"""
Gate Constructor
Args:
utry (np.ndarray): The gate's unitary operation.
location (tuple[int]): Set of qubits this gate is applied to.
fixed (bool): True if the gate's unitary operation is immutable.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not utils.is_unitary( utry ):
raise TypeError( "Specified matrix is not unitary." )
if not utils.is_valid_location( location ):
raise TypeError( "Specified location is not valid." )
if len( location ) != utils.get_num_qubits( utry ):
raise ValueError( "Location size does not match unitary." )
if not isinstance( fixed, bool ):
raise TypeError( "Invalid fixed parameter." )
self.utry = utry
self.location = location
self.gate_size = len( location )
self.fixed = fixed
def get_inverse ( self ):
"""Returns the inverse of this gate."""
return Gate( self.utry.conj().T, self.location, self.fixed, False )
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
This method updates this gate's unitary to maximize:
Re( Tr( env * self.utry ) )
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
u, _, v = la.svd( ( 1 - slowdown_factor ) * env
+ slowdown_factor * self.utry.conj().T )
self.utry = v.conj().T @ u.conj().T
def get_tensor_format ( self, compress_left = False,
compress_right = False ):
"""
Converts the gate's operation into a tensor network format.
Indices are counted top to bottom, left to right:
.-----.
0 -| |- n
1 -| |- n+1
. .
. .
. .
n-1 -| |- 2n-1
'-----'
Args:
compress_left (bool): Compress the left indices into one.
compress_right (bool): Compress the right indices into one.
Returns
(np.ndarray): A tensor representing this gate.
"""
if not isinstance( compress_left, bool ):
raise TypeError( "Invalid compress_left parameter." )
if not isinstance( compress_right, bool ):
raise TypeError( "Invalid compress_right parameter." )
dim = len( self.utry )
left = [ dim ] if compress_left else [2] * ( self.gate_size )
right = [ dim ] if compress_right else [2] * ( self.gate_size )
return self.utry.reshape( left + right )
def __str__ ( self ):
"""Gets the gate's string representation."""
return str( self.location ) + ":" + str( self.utry )
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": [[" \
+ str( self.utry[0][0] ) \
+ " ... " \
+ str( self.utry[-1][-1] ) \
+ "]]"
| 3,681 | 29.941176 | 78 |
py
|
qfactor
|
qfactor-master/qfactor/gates/ry.py
|
"""This module implements the RyGate class."""
import numpy as np
from qfactor import utils
from qfactor.gates import Gate
class RyGate ( Gate ):
"""A RyGate is a Quantum Y-rotation applied to a qubit."""
def __init__ ( self, theta, location, fixed = False, check_params = True ):
"""
Gate Constructor
Args:
theta (float): The gate's angle of rotation.
location (int or tuple[int]): The qubit this gate is applied to.
fixed (bool): True if the gate's unitary operation is immutable.
check_params (bool): True implies parameters are checked for
correctness.
"""
if check_params:
if not isinstance( theta, float ):
raise TypeError( "Invalid theta angle, not a float." )
if not isinstance( location, int ):
if ( not utils.is_valid_location( location )
or len( location ) != 1 ):
raise TypeError( "Specified location is not valid." )
elif location < 0:
raise ValueError( "Invalid location value." )
if not isinstance( fixed, bool ):
raise TypeError( "Invalid fixed parameter." )
self.theta = theta
if isinstance( location, int ):
self.location = tuple( [ location ] )
else:
self.location = location
self.gate_size = len( self.location )
self.fixed = fixed
@property
def utry ( self ):
cos = np.cos( self.theta / 2 )
sin = np.sin( self.theta / 2 )
return np.array( [ [ cos, -sin ],
[ sin, cos ] ] )
def update ( self, env, slowdown_factor ):
"""
Update this gate with respect to an enviroment.
This method updates this gate's unitary to maximize:
Re( Tr( env * self.utry ) )
Args:
env (np.ndarray): The enviromental matrix.
slowdown_factor (float): A positive number less than 1.
The larger this factor, the slower the optimization.
"""
if self.fixed:
return
a = np.real( env[0, 0] + env[1, 1] )
b = np.real( env[1, 0] - env[0, 1] )
new_theta = 2 * np.arccos( a / np.sqrt( a ** 2 + b ** 2 ) )
new_theta *= -1 if b > 0 else 1
self.theta = ( ( 1 - slowdown_factor ) * new_theta
+ slowdown_factor * self.theta )
def __repr__ ( self ):
"""Gets a simple gate string representation."""
return str( self.location ) \
+ ": Ry(" \
+ str( self.theta ) \
+ ")"
| 2,733 | 29.719101 | 79 |
py
|
qfactor
|
qfactor-master/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/optimize/test_correct.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.optimize import optimize, Gate
class TestOptimizeFixed ( ut.TestCase ):
def test_optimize_fixed ( self ):
u1 = unitary_group.rvs( 8 )
g1 = Gate( u1, (0, 1, 2) )
circ = optimize( [ g1 ], u1 )
self.assertTrue( np.allclose( circ[0].utry, g1.utry ) )
if __name__ == "__main__":
ut.main()
| 425 | 18.363636 | 63 |
py
|
qfactor
|
qfactor-master/tests/optimize/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/optimize/test_fixed.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.optimize import optimize, Gate
class TestOptimizeFixed ( ut.TestCase ):
def test_optimize_fixed ( self ):
g1 = Gate( unitary_group.rvs( 4 ), (0, 1), True )
g2 = Gate( unitary_group.rvs( 8 ), (1, 2, 3), True )
circ = optimize( [ g1, g2 ], unitary_group.rvs( 16 ) )
self.assertTrue( np.allclose( circ[0].utry, g1.utry ) )
self.assertTrue( np.allclose( circ[1].utry, g2.utry ) )
if __name__ == "__main__":
ut.main()
| 562 | 23.478261 | 63 |
py
|
qfactor
|
qfactor-master/tests/tensors/test_constructor.py
|
import numpy as np
import unittest as ut
from qfactor.gates import Gate
from qfactor.tensors import CircuitTensor
class TestCircuitTensorConstructor ( ut.TestCase ):
TOFFOLI = np.asarray(
[[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]] )
INVALID = np.asarray(
[[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]] )
def test_gate_constructor_invalid ( self ):
gate = Gate( self.TOFFOLI, (0, 1, 2) )
self.assertRaises( TypeError, CircuitTensor, self.INVALID, [ gate ] )
self.assertRaises( TypeError, CircuitTensor, self.TOFFOLI, [ "a", gate ] )
self.assertRaises( TypeError, CircuitTensor, self.TOFFOLI, [ gate, "a" ] )
self.assertRaises( TypeError, CircuitTensor, self.TOFFOLI, [ "a" ] )
self.assertRaises( TypeError, CircuitTensor, self.TOFFOLI, "a" )
gate = Gate( self.TOFFOLI, (0, 1, 7) )
self.assertRaises( ValueError, CircuitTensor, self.TOFFOLI, [ gate ] )
def test_gate_constructor_valid ( self ):
gate = Gate( self.TOFFOLI, (0, 1, 2) )
ct = CircuitTensor( self.TOFFOLI, [ gate ] )
self.assertTrue( np.array_equal( gate.utry, ct.gate_list[0].utry ) )
self.assertTrue( len( ct.gate_list ) == 1 )
self.assertTrue( np.allclose( ct.utry, np.identity( 8 ) ) )
if __name__ == '__main__':
ut.main()
| 2,562 | 48.288462 | 82 |
py
|
qfactor
|
qfactor-master/tests/tensors/test_apply_left.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.gates import Gate
from qfactor.tensors import CircuitTensor
class TestApplyLeft ( ut.TestCase ):
def test_apply_left ( self ):
u1 = unitary_group.rvs( 8 )
u2 = unitary_group.rvs( 4 )
g = Gate( u2, (0, 1) )
ct = CircuitTensor( u1, [] )
ct.apply_left( g )
prod = u1.conj().T @ np.kron( u2, np.identity( 2 ) )
prod_test = ct.utry
self.assertTrue( np.allclose( prod, prod_test ) )
ct.apply_left( g )
prod = prod @ np.kron( u2, np.identity( 2 ) )
prod_test = ct.utry
self.assertTrue( np.allclose( prod, prod_test ) )
def test_apply_left_invalid ( self ):
u1 = unitary_group.rvs( 8 )
ct = CircuitTensor( u1, [] )
self.assertRaises( Exception, ct.apply_left, "a" )
if __name__ == "__main__":
ut.main()
| 931 | 24.189189 | 60 |
py
|
qfactor
|
qfactor-master/tests/tensors/test_calc_env_matrix.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.gates import Gate
from qfactor.tensors import CircuitTensor
class TestCalcEnvMatrix ( ut.TestCase ):
def test_calc_env_matrix ( self ):
u1 = unitary_group.rvs( 8 )
u2 = u1.conj().T
ct = CircuitTensor( u1, [] )
env = ct.calc_env_matrix( [0, 1, 2] )
self.assertTrue( np.allclose( env, u2 ) )
def test_calc_env_matrix_invalid ( self ):
u1 = unitary_group.rvs( 8 )
u2 = u1.conj().T
ct = CircuitTensor( u1, [] )
self.assertRaises( ValueError, ct.calc_env_matrix, [0, 1, 2, 3] )
self.assertRaises( TypeError, ct.calc_env_matrix, "a" )
if __name__ == "__main__":
ut.main()
| 765 | 20.885714 | 73 |
py
|
qfactor
|
qfactor-master/tests/tensors/test_apply_right.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.gates import Gate
from qfactor.tensors import CircuitTensor
class TestApplyRight ( ut.TestCase ):
def test_apply_right ( self ):
u1 = unitary_group.rvs( 8 )
u2 = unitary_group.rvs( 4 )
g = Gate( u2, (0, 1) )
ct = CircuitTensor( u1, [] )
ct.apply_right( g )
prod = np.kron( u2, np.identity( 2 ) ) @ u1.conj().T
prod_test = ct.utry
self.assertTrue( np.allclose( prod, prod_test ) )
ct.apply_right( g )
prod = np.kron( u2, np.identity( 2 ) ) @ prod
prod_test = ct.utry
self.assertTrue( np.allclose( prod, prod_test ) )
def test_apply_right_invalid ( self ):
u1 = unitary_group.rvs( 8 )
ct = CircuitTensor( u1, [] )
self.assertRaises( Exception, ct.apply_right, "a" )
if __name__ == "__main__":
ut.main()
| 937 | 24.351351 | 60 |
py
|
qfactor
|
qfactor-master/tests/tensors/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/tensors/test_toffoli_tensor.py
|
import numpy as np
import unittest as ut
from qfast.perm import calc_permutation_matrix
from qfactor import Gate, optimize
from qfactor.tensors import CircuitTensor
class TestToffoliTensor ( ut.TestCase ):
def test_toffoli_tensor ( self ):
toffoli = np.array( [ [ 1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 1, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 1, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 1, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 1 ],
[ 0, 0, 0, 0, 0, 0, 1, 0 ] ] )
p12 = calc_permutation_matrix( 3, (1, 2) )
p02 = calc_permutation_matrix( 3, (0, 2) )
cnot = np.array( [ [ 1, 0, 0, 0 ],
[ 0, 1, 0, 0 ],
[ 0, 0, 0, 1 ],
[ 0, 0, 1, 0 ] ] )
H = (np.sqrt(2)/2) * np.array( [ [ 1, 1 ],
[ 1, -1 ] ] )
T = np.array( [ [ 1, 0 ], [ 0, np.exp( 1j * np.pi/4 ) ] ] )
I = np.identity( 2 )
u1 = np.kron( I, T.conj().T ) @ cnot @ np.kron( I, H )
u2 = np.kron( I, T ) @ cnot
u3 = np.kron( I, T.conj().T ) @ cnot
u4 = np.kron( I, H @ T ) @ cnot
u5 = cnot @ np.kron( T, T.conj().T ) @ cnot @ np.kron( I, T )
circuit = [ Gate( u1, (1, 2) ),
Gate( u2, (0, 2) ),
Gate( u3, (1, 2) ),
Gate( u4, (0, 2) ),
Gate( u5, (0, 1) ) ]
c1 = p12 @ np.kron( u1, I ) @ p12.T
c2 = p02 @ np.kron( u2, I ) @ p02.T
c3 = p12 @ np.kron( u3, I ) @ p12.T
c4 = p02 @ np.kron( u4, I ) @ p02.T
c5 = np.kron( u5, I )
self.assertTrue( np.allclose( toffoli, c5 @ c4 @ c3 @ c2 @ c1 ) )
ct = CircuitTensor( toffoli, [] )
self.assertTrue( np.allclose( ct.utry, toffoli.conj().T ) )
ct.apply_right( circuit[0] )
self.assertTrue( np.allclose( ct.utry, c1 @ toffoli.conj().T ) )
ct.apply_right( circuit[1] )
self.assertTrue( np.allclose( ct.utry, c2 @ c1 @ toffoli.conj().T ) )
ct.apply_right( circuit[2] )
self.assertTrue( np.allclose( ct.utry, c3 @ c2 @ c1 @ toffoli.conj().T ) )
ct.apply_right( circuit[3] )
self.assertTrue( np.allclose( ct.utry, c4 @ c3 @ c2 @ c1 @ toffoli.conj().T ) )
ct.apply_right( circuit[4] )
self.assertTrue( np.allclose( ct.utry, c5 @ c4 @ c3 @ c2 @ c1 @ toffoli.conj().T ) )
self.assertTrue( np.allclose( ct.utry, np.identity( 8 ) ) )
ct = CircuitTensor( toffoli, circuit )
self.assertTrue( np.allclose( ct.utry, np.identity( 8 ) ) )
if __name__ == "__main__":
ut.main()
| 2,881 | 36.921053 | 92 |
py
|
qfactor
|
qfactor-master/tests/gates/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/gates/xx/test_constructor.py
|
import scipy
import numpy as np
import unittest as ut
from qfactor import get_distance
from qfactor.gates import XXGate
class TestXXGateConstructor ( ut.TestCase ):
def test_xxgate_constructor_invalid ( self ):
self.assertRaises( TypeError, XXGate, 1, 0 )
self.assertRaises( TypeError, XXGate, "a", 0 )
self.assertRaises( TypeError, XXGate, [0, 1], 0 )
self.assertRaises( TypeError, XXGate, np.pi/2, -1 )
self.assertRaises( TypeError, XXGate, np.pi/2, [0, 1] )
self.assertRaises( TypeError, XXGate, np.pi/2, (0, 1, 2) )
self.assertRaises( TypeError, XXGate, np.pi/2, ("a") )
self.assertRaises( TypeError, XXGate, np.pi/2, 0, 0 )
def test_xxgate_constructor_valid ( self ):
gate = XXGate( np.pi, (0, 1), True )
X = np.array( [ [ 0, 1 ], [ 1, 0 ] ] )
XX = np.kron( X, X )
RXX = scipy.linalg.expm( -1j * np.pi/2 * XX )
self.assertTrue( get_distance( [ gate ], RXX ) < 1e-15 )
self.assertTrue( np.array_equal( gate.location, (0,1) ) )
self.assertEqual( gate.gate_size, 2 )
self.assertTrue( gate.fixed )
if __name__ == '__main__':
ut.main()
| 1,188 | 32.027778 | 66 |
py
|
qfactor
|
qfactor-master/tests/gates/xx/test_repr.py
|
import numpy as np
import unittest as ut
from qfactor.gates import XXGate
class TestXXGateRepr ( ut.TestCase ):
def test_xxgate_repr_1 ( self ):
gate = XXGate( 0., (0,1) )
self.assertEqual( repr( gate ), "(0, 1): XX(0.0)" )
def test_xxgate_repr_2 ( self ):
gate = XXGate( 2., (1,3) )
self.assertEqual( repr( gate ), "(1, 3): XX(2.0)" )
if __name__ == '__main__':
ut.main()
| 428 | 19.428571 | 59 |
py
|
qfactor
|
qfactor-master/tests/gates/xx/test_update.py
|
import numpy as np
import unittest as ut
from qfactor.gates import XXGate
class TestXXGateUpdate ( ut.TestCase ):
def test_xxgate_update_1 ( self ):
env = XXGate( np.pi/3, (0,1) ).utry
gate = XXGate( 0., (0,1) )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, -np.pi/3 ) )
def test_xxgate_update_2 ( self ):
env = XXGate( 2*np.pi/3, (0,1) ).utry
gate = XXGate( 0., (0,1) )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, -2*np.pi/3 ) )
def test_xxgate_update_3 ( self ):
env = XXGate( -2*np.pi/3, (0,1) ).utry
gate = XXGate( 0., (0,1) )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, 2*np.pi/3 ) )
def test_xxgate_update_4 ( self ):
env = XXGate( -np.pi/3, (0,1) ).utry
gate = XXGate( 0., (0,1) )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, np.pi/3 ) )
if __name__ == '__main__':
ut.main()
| 1,020 | 26.594595 | 64 |
py
|
qfactor
|
qfactor-master/tests/gates/xx/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/gates/rx/test_constructor.py
|
import scipy
import numpy as np
import unittest as ut
from qfactor import get_distance
from qfactor.gates import RxGate
class TestRxGateConstructor ( ut.TestCase ):
def test_rxgate_constructor_invalid ( self ):
self.assertRaises( TypeError, RxGate, 1, 0 )
self.assertRaises( TypeError, RxGate, "a", 0 )
self.assertRaises( TypeError, RxGate, [0, 1], 0 )
self.assertRaises( ValueError, RxGate, np.pi/2, -1 )
self.assertRaises( TypeError, RxGate, np.pi/2, [0, 1] )
self.assertRaises( TypeError, RxGate, np.pi/2, (0, 1) )
self.assertRaises( TypeError, RxGate, np.pi/2, ("a") )
self.assertRaises( TypeError, RxGate, np.pi/2, 0, 0 )
def test_rxgate_constructor_valid ( self ):
gate = RxGate( np.pi, 0, True )
X = np.array( [ [ 0, 1 ], [ 1, 0 ] ] )
Rx = scipy.linalg.expm( -1j * np.pi/2 * X )
self.assertTrue( get_distance( [ gate ], Rx ) < 1e-15 )
self.assertTrue( np.array_equal( gate.location, (0,) ) )
self.assertEqual( gate.gate_size, 1 )
self.assertTrue( gate.fixed )
if __name__ == '__main__':
ut.main()
| 1,148 | 31.828571 | 64 |
py
|
qfactor
|
qfactor-master/tests/gates/rx/test_repr.py
|
import numpy as np
import unittest as ut
from qfactor.gates import RxGate
class TestRxGateRepr ( ut.TestCase ):
def test_rxgate_repr_1 ( self ):
gate = RxGate( 0., 0 )
self.assertEqual( repr( gate ), "(0,): Rx(0.0)" )
def test_rxgate_repr_2 ( self ):
gate = RxGate( 2., 1 )
self.assertEqual( repr( gate ), "(1,): Rx(2.0)" )
if __name__ == '__main__':
ut.main()
| 416 | 18.857143 | 57 |
py
|
qfactor
|
qfactor-master/tests/gates/rx/test_update.py
|
import numpy as np
import unittest as ut
from qfactor.gates import RxGate
class TestRxGateUpdate ( ut.TestCase ):
def test_rxgate_update_1 ( self ):
env = RxGate( np.pi/3, 0 ).utry
gate = RxGate( 0., 0 )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, -np.pi/3 ) )
def test_rxgate_update_2 ( self ):
env = RxGate( 2*np.pi/3, 0 ).utry
gate = RxGate( 0., 0 )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, -2*np.pi/3 ) )
def test_rxgate_update_3 ( self ):
env = RxGate( -2*np.pi/3, 0 ).utry
gate = RxGate( 0., 0 )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, 2*np.pi/3 ) )
def test_rxgate_update_4 ( self ):
env = RxGate( -np.pi/3, 0 ).utry
gate = RxGate( 0., 0 )
gate.update( env, 0 )
self.assertTrue( np.allclose( gate.theta, np.pi/3 ) )
if __name__ == '__main__':
ut.main()
| 988 | 25.72973 | 64 |
py
|
qfactor
|
qfactor-master/tests/gates/rx/__init__.py
| 0 | 0 | 0 |
py
|
|
qfactor
|
qfactor-master/tests/gates/gate/test_str.py
|
import numpy as np
import unittest as ut
from qfactor.gates import Gate
class TestGateStr ( ut.TestCase ):
TOFFOLI = np.asarray(
[[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]] )
def test_gate_str_1 ( self ):
gate = Gate( self.TOFFOLI, (0, 1, 2) )
self.assertEqual( str( gate ),
str( (0, 1, 2) ) + ":" + str( self.TOFFOLI ) )
def test_gate_str_2 ( self ):
gate = Gate( self.TOFFOLI, (4, 5, 7) )
self.assertEqual( str( gate ),
str( (4, 5, 7) ) + ":" + str( self.TOFFOLI ) )
if __name__ == '__main__':
ut.main()
| 1,198 | 36.46875 | 79 |
py
|
qfactor
|
qfactor-master/tests/gates/gate/test_get_inverse.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.gates import Gate
class TestGetInverse ( ut.TestCase ):
def test_get_inverse ( self ):
utry = unitary_group.rvs( 8 )
gate = Gate( utry, (0, 1, 2) )
inv_gate = gate.get_inverse()
self.assertTrue( np.allclose( inv_gate.utry @ gate.utry,
np.identity( 8 ) ) )
self.assertTrue( np.allclose( gate.utry @ inv_gate.utry,
np.identity( 8 ) ) )
if __name__ == "__main__":
ut.main()
| 594 | 24.869565 | 64 |
py
|
qfactor
|
qfactor-master/tests/gates/gate/test_get_tensor_format.py
|
import numpy as np
import unittest as ut
from scipy.stats import unitary_group
from qfactor.gates import Gate
class TestGetTensorFormat ( ut.TestCase ):
def test_get_tensor_format ( self ):
utry = unitary_group.rvs( 8 )
gate = Gate( utry, (0, 1, 2) )
tensor = gate.get_tensor_format()
self.assertTrue( len( tensor.shape ) == 6 )
self.assertTrue( all( [ x == 2 for x in tensor.shape ] ) )
self.assertTrue( np.allclose( np.reshape( tensor, (8, 8) ), utry ) )
def test_get_tensor_format_left ( self ):
utry = unitary_group.rvs( 8 )
gate = Gate( utry, (0, 1, 2) )
tensor = gate.get_tensor_format( compress_left = True )
self.assertTrue( len( tensor.shape ) == 4 )
self.assertTrue( all( [ x == 2 for x in tensor.shape[1:] ] ) )
self.assertTrue( tensor.shape[0] == 8 )
self.assertTrue( np.allclose( np.reshape( tensor, (8, 8) ), utry ) )
def test_get_tensor_format_right ( self ):
utry = unitary_group.rvs( 8 )
gate = Gate( utry, (0, 1, 2) )
tensor = gate.get_tensor_format( compress_right = True )
self.assertTrue( len( tensor.shape ) == 4 )
self.assertTrue( all( [ x == 2 for x in tensor.shape[:-1] ] ) )
self.assertTrue( tensor.shape[-1] == 8 )
self.assertTrue( np.allclose( np.reshape( tensor, (8, 8) ), utry ) )
def test_get_tensor_format_all ( self ):
utry = unitary_group.rvs( 8 )
gate = Gate( utry, (0, 1, 2) )
tensor = gate.get_tensor_format( compress_left = True,
compress_right = True )
self.assertTrue( len( tensor.shape ) == 2 )
self.assertTrue( tensor.shape[0] == 8 )
self.assertTrue( tensor.shape[1] == 8 )
self.assertTrue( np.allclose( tensor, utry ) )
if __name__ == "__main__":
ut.main()
| 1,887 | 36.76 | 76 |
py
|
qfactor
|
qfactor-master/tests/gates/gate/test_constructor.py
|
import numpy as np
import unittest as ut
from qfactor.gates import Gate
class TestGateConstructor ( ut.TestCase ):
TOFFOLI = np.asarray(
[[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]] )
INVALID = np.asarray(
[[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]] )
def test_gate_constructor_invalid ( self ):
self.assertRaises( TypeError, Gate, 1, (0, 1) )
self.assertRaises( TypeError, Gate, np.array( [ 0, 1 ] ), (0, 1) )
self.assertRaises( TypeError, Gate, np.array( [ [ [ 0 ] ] ] ), (0, 1) )
self.assertRaises( TypeError, Gate, self.TOFFOLI, 1 )
self.assertRaises( TypeError, Gate, self.TOFFOLI, ("a", "b") )
self.assertRaises( TypeError, Gate, self.TOFFOLI, (1, 1) )
self.assertRaises( ValueError, Gate, self.TOFFOLI, (0, 1) )
self.assertRaises( ValueError, Gate, self.TOFFOLI, (0, 1, 2, 3) )
self.assertRaises( TypeError, Gate, self.TOFFOLI, (0, 1, 2), "a" )
invalid_utry_matrix = np.copy( self.TOFFOLI )
invalid_utry_matrix[4][4] = 2112.+0.j
self.assertRaises( TypeError, Gate, invalid_utry_matrix, (0, 1, 2) )
self.assertRaises( TypeError, Gate, self.INVALID, (0, 1, 2) )
def test_gate_constructor_valid ( self ):
gate = Gate( self.TOFFOLI, (0, 1, 2), True )
self.assertTrue( np.array_equal( gate.utry, self.TOFFOLI ) )
self.assertTrue( np.array_equal( gate.location, (0, 1, 2) ) )
self.assertEqual( gate.gate_size, 3 )
self.assertTrue( gate.fixed )
if __name__ == '__main__':
ut.main()
| 2,802 | 45.716667 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.