repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
z3
|
z3-master/scripts/mk_util.py
|
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Auxiliary scripts for generating Makefiles
# and Visual Studio project files.
#
# Author: Leonardo de Moura (leonardo)
############################################
import io
import sys
import os
import re
import getopt
import shutil
from mk_exception import *
import mk_genfile_common
from fnmatch import fnmatch
import sysconfig
import compileall
import subprocess
def getenv(name, default):
try:
return os.environ[name].strip(' "\'')
except:
return default
CXX=getenv("CXX", None)
CC=getenv("CC", None)
CPPFLAGS=getenv("CPPFLAGS", "")
CXXFLAGS=getenv("CXXFLAGS", "")
AR=getenv("AR", "ar")
EXAMP_DEBUG_FLAG=''
LDFLAGS=getenv("LDFLAGS", "")
JNI_HOME=getenv("JNI_HOME", None)
OCAMLC=getenv("OCAMLC", "ocamlc")
OCAMLOPT=getenv("OCAMLOPT", "ocamlopt")
OCAML_LIB=getenv("OCAML_LIB", None)
OCAMLFIND=getenv("OCAMLFIND", "ocamlfind")
DOTNET="dotnet"
# Standard install directories relative to PREFIX
INSTALL_BIN_DIR=getenv("Z3_INSTALL_BIN_DIR", "bin")
INSTALL_LIB_DIR=getenv("Z3_INSTALL_LIB_DIR", "lib")
INSTALL_INCLUDE_DIR=getenv("Z3_INSTALL_INCLUDE_DIR", "include")
INSTALL_PKGCONFIG_DIR=getenv("Z3_INSTALL_PKGCONFIG_DIR", os.path.join(INSTALL_LIB_DIR, 'pkgconfig'))
CXX_COMPILERS=['g++', 'clang++']
C_COMPILERS=['gcc', 'clang']
JAVAC=None
JAR=None
PYTHON_PACKAGE_DIR=sysconfig.get_path('purelib')
BUILD_DIR='build'
REV_BUILD_DIR='..'
SRC_DIR='src'
EXAMPLE_DIR='examples'
# Required Components
Z3_DLL_COMPONENT='api_dll'
PATTERN_COMPONENT='pattern'
UTIL_COMPONENT='util'
API_COMPONENT='api'
DOTNET_COMPONENT='dotnet'
DOTNET_CORE_COMPONENT='dotnet'
JAVA_COMPONENT='java'
ML_COMPONENT='ml'
CPP_COMPONENT='cpp'
PYTHON_COMPONENT='python'
#####################
IS_WINDOWS=False
IS_LINUX=False
IS_HURD=False
IS_OSX=False
IS_ARCH_ARM64=False
IS_FREEBSD=False
IS_NETBSD=False
IS_OPENBSD=False
IS_SUNOS=False
IS_CYGWIN=False
IS_CYGWIN_MINGW=False
IS_MSYS2=False
VERBOSE=True
DEBUG_MODE=False
SHOW_CPPS = True
VS_X64 = False
VS_ARM = False
LINUX_X64 = True
ONLY_MAKEFILES = False
Z3PY_SRC_DIR=None
Z3JS_SRC_DIR=None
VS_PROJ = False
TRACE = False
PYTHON_ENABLED=False
DOTNET_CORE_ENABLED=False
DOTNET_KEY_FILE=getenv("Z3_DOTNET_KEY_FILE", None)
ASSEMBLY_VERSION=getenv("Z2_ASSEMBLY_VERSION", None)
JAVA_ENABLED=False
ML_ENABLED=False
PYTHON_INSTALL_ENABLED=False
STATIC_LIB=False
STATIC_BIN=False
VER_MAJOR=None
VER_MINOR=None
VER_BUILD=None
VER_TWEAK=None
PREFIX=sys.prefix
GMP=False
VS_PAR=False
VS_PAR_NUM=8
GPROF=False
GIT_HASH=False
GIT_DESCRIBE=False
SLOW_OPTIMIZE=False
LOG_SYNC=False
SINGLE_THREADED=False
GUARD_CF=False
ALWAYS_DYNAMIC_BASE=False
FPMATH="Default"
FPMATH_FLAGS="-mfpmath=sse -msse -msse2"
FPMATH_ENABLED=getenv("FPMATH_ENABLED", "True")
def check_output(cmd):
out = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
if out != None:
enc = sys.getdefaultencoding()
if enc != None: return out.decode(enc).rstrip('\r\n')
else: return out.rstrip('\r\n')
else:
return ""
def git_hash():
try:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
r = check_output(['git', 'show-ref', '--abbrev=12', 'refs/heads/%s' % branch])
except:
raise MKException("Failed to retrieve git hash")
ls = r.split(' ')
if len(ls) != 2:
raise MKException("Unexpected git output " + r)
return ls[0]
def is_windows():
return IS_WINDOWS
def is_linux():
return IS_LINUX
def is_hurd():
return IS_HURD
def is_freebsd():
return IS_FREEBSD
def is_netbsd():
return IS_NETBSD
def is_openbsd():
return IS_OPENBSD
def is_sunos():
return IS_SUNOS
def is_osx():
return IS_OSX
def is_cygwin():
return IS_CYGWIN
def is_cygwin_mingw():
return IS_CYGWIN_MINGW
def is_msys2():
return IS_MSYS2
def norm_path(p):
return os.path.expanduser(os.path.normpath(p))
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in getenv("PATH", "").split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class TempFile:
def __init__(self, name):
try:
self.name = name
self.fname = open(name, 'w')
except:
raise MKException("Failed to create temporary file '%s'" % self.name)
def add(self, s):
self.fname.write(s)
def commit(self):
self.fname.close()
def __del__(self):
self.fname.close()
try:
os.remove(self.name)
except:
pass
def exec_cmd(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
new_cmd = []
first = True
for e in cmd:
if first:
first = False
new_cmd.append(e)
else:
if e != "":
se = e.split(' ')
if len(se) > 1:
for e2 in se:
if e2 != "":
new_cmd.append(e2)
else:
new_cmd.append(e)
cmd = new_cmd
null = open(os.devnull, 'wb')
try:
return subprocess.call(cmd, stdout=null, stderr=null)
except:
# Failed to create process
return 1
finally:
null.close()
# rm -f fname
def rmf(fname):
if os.path.exists(fname):
os.remove(fname)
def exec_compiler_cmd(cmd):
r = exec_cmd(cmd)
# Windows
rmf('a.exe')
# Unix
rmf('a.out')
# Emscripten
rmf('a.wasm')
rmf('a.worker.js')
return r
def test_cxx_compiler(cc):
if is_verbose():
print("Testing %s..." % cc)
t = TempFile('tst.cpp')
t.add('#include<iostream>\nint main() { return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, CXXFLAGS, 'tst.cpp', LDFLAGS]) == 0
def test_c_compiler(cc):
if is_verbose():
print("Testing %s..." % cc)
t = TempFile('tst.c')
t.add('#include<stdio.h>\nint main() { return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, 'tst.c', LDFLAGS]) == 0
def test_gmp(cc):
if is_verbose():
print("Testing GMP...")
t = TempFile('tstgmp.cpp')
t.add('#include<gmp.h>\nint main() { mpz_t t; mpz_init(t); mpz_clear(t); return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, 'tstgmp.cpp', LDFLAGS, '-lgmp']) == 0
def test_fpmath(cc):
global FPMATH_FLAGS, IS_ARCH_ARM64, IS_OSX
if FPMATH_ENABLED == "False":
FPMATH_FLAGS=""
return "Disabled"
if IS_ARCH_ARM64 and IS_OSX:
FPMATH_FLAGS = ""
return "Disabled-ARM64"
if is_verbose():
print("Testing floating point support...")
t = TempFile('tstsse.cpp')
t.add('int main() { return 42; }\n')
t.commit()
# -Werror is needed because some versions of clang warn about unrecognized
# -m flags.
# TODO(ritave): Safari doesn't allow SIMD WebAssembly extension, add a flag to build script
if exec_compiler_cmd([cc, CPPFLAGS, '-Werror', 'tstsse.cpp', LDFLAGS, '-msse -msse2 -msimd128']) == 0:
FPMATH_FLAGS='-msse -msse2 -msimd128'
return 'SSE2-EMSCRIPTEN'
if exec_compiler_cmd([cc, CPPFLAGS, '-Werror', 'tstsse.cpp', LDFLAGS, '-mfpmath=sse -msse -msse2']) == 0:
FPMATH_FLAGS="-mfpmath=sse -msse -msse2"
return "SSE2-GCC"
elif exec_compiler_cmd([cc, CPPFLAGS, '-Werror', 'tstsse.cpp', LDFLAGS, '-msse -msse2']) == 0:
FPMATH_FLAGS="-msse -msse2"
return "SSE2-CLANG"
elif exec_compiler_cmd([cc, CPPFLAGS, '-Werror', 'tstsse.cpp', LDFLAGS, '-mfpu=vfp -mfloat-abi=hard']) == 0:
FPMATH_FLAGS="-mfpu=vfp -mfloat-abi=hard"
return "ARM-VFP"
else:
FPMATH_FLAGS=""
return "UNKNOWN"
def find_jni_h(path):
for root, dirs, files in os.walk(path):
for f in files:
if f == 'jni.h':
return root
return False
def check_java():
global JNI_HOME
global JAVAC
global JAR
JDK_HOME = getenv('JDK_HOME', None) # we only need to check this locally.
if is_verbose():
print("Finding javac ...")
if JDK_HOME is not None:
if IS_WINDOWS:
JAVAC = os.path.join(JDK_HOME, 'bin', 'javac.exe')
else:
JAVAC = os.path.join(JDK_HOME, 'bin', 'javac')
if not os.path.exists(JAVAC):
raise MKException("Failed to detect javac at '%s/bin'; the environment variable JDK_HOME is probably set to the wrong path." % os.path.join(JDK_HOME))
else:
# Search for javac in the path.
ind = 'javac'
if IS_WINDOWS:
ind = ind + '.exe'
paths = os.getenv('PATH', None)
if paths:
spaths = paths.split(os.pathsep)
for i in range(0, len(spaths)):
cmb = os.path.join(spaths[i], ind)
if os.path.exists(cmb):
JAVAC = cmb
break
if JAVAC is None:
raise MKException('No java compiler in the path, please adjust your PATH or set JDK_HOME to the location of the JDK.')
if is_verbose():
print("Finding jar ...")
if IS_WINDOWS:
JAR = os.path.join(os.path.dirname(JAVAC), 'jar.exe')
else:
JAR = os.path.join(os.path.dirname(JAVAC), 'jar')
if not os.path.exists(JAR):
raise MKException("Failed to detect jar at '%s'; the environment variable JDK_HOME is probably set to the wrong path." % os.path.join(JDK_HOME))
if is_verbose():
print("Testing %s..." % JAVAC)
t = TempFile('Hello.java')
t.add('public class Hello { public static void main(String[] args) { System.out.println("Hello, World"); }}\n')
t.commit()
oo = TempFile('output')
eo = TempFile('errout')
try:
subprocess.call([JAVAC, 'Hello.java', '-verbose', '-source', '1.8', '-target', '1.8' ], stdout=oo.fname, stderr=eo.fname)
oo.commit()
eo.commit()
except:
raise MKException('Found, but failed to run Java compiler at %s' % (JAVAC))
os.remove('Hello.class')
if is_verbose():
print("Finding jni.h...")
if JNI_HOME is not None:
if not os.path.exists(os.path.join(JNI_HOME, 'jni.h')):
raise MKException("Failed to detect jni.h '%s'; the environment variable JNI_HOME is probably set to the wrong path." % os.path.join(JNI_HOME))
else:
# Search for jni.h in the library directories...
t = open('errout', 'r')
open_pat = re.compile(r"\[search path for class files: (.*)\]")
cdirs = []
for line in t:
m = open_pat.match(line)
if m:
libdirs = m.group(1).split(',')
for libdir in libdirs:
q = os.path.dirname(libdir)
if cdirs.count(q) == 0 and len(q) > 0:
cdirs.append(q)
t.close()
# ... plus some heuristic ones.
extra_dirs = []
# For the libraries, even the JDK usually uses a JRE that comes with it. To find the
# headers we have to go a little bit higher up.
for dir in cdirs:
extra_dirs.append(os.path.abspath(os.path.join(dir, '..')))
if IS_OSX: # Apparently Apple knows best where to put stuff...
extra_dirs.append('/System/Library/Frameworks/JavaVM.framework/Headers/')
cdirs[len(cdirs):] = extra_dirs
for dir in cdirs:
q = find_jni_h(dir)
if q is not False:
JNI_HOME = q
if JNI_HOME is None:
raise MKException("Failed to detect jni.h. Possible solution: set JNI_HOME with the path to JDK.")
def test_csc_compiler(c):
t = TempFile('hello.cs')
t.add('public class hello { public static void Main() {} }')
t.commit()
if is_verbose():
print ('Testing %s...' % c)
r = exec_cmd([c, 'hello.cs'])
try:
rmf('hello.cs')
rmf('hello.exe')
except:
pass
return r == 0
def check_dotnet_core():
if not IS_WINDOWS:
return
r = exec_cmd([DOTNET, '--help'])
if r != 0:
raise MKException('Failed testing dotnet. Make sure to install and configure dotnet core utilities')
def check_ml():
t = TempFile('hello.ml')
t.add('print_string "Hello world!\n";;')
t.commit()
if is_verbose():
print ('Testing %s...' % OCAMLC)
r = exec_cmd([OCAMLC, '-o', 'a.out', 'hello.ml'])
if r != 0:
raise MKException('Failed testing ocamlc compiler. Set environment variable OCAMLC with the path to the Ocaml compiler')
if is_verbose():
print ('Testing %s...' % OCAMLOPT)
r = exec_cmd([OCAMLOPT, '-o', 'a.out', 'hello.ml'])
if r != 0:
raise MKException('Failed testing ocamlopt compiler. Set environment variable OCAMLOPT with the path to the Ocaml native compiler. Note that ocamlopt may require flexlink to be in your path.')
try:
rmf('hello.cmi')
rmf('hello.cmo')
rmf('hello.cmx')
rmf('a.out')
rmf('hello.o')
except:
pass
find_ml_lib()
find_ocaml_find()
def find_ocaml_find():
global OCAMLFIND
if is_verbose():
print ("Testing %s..." % OCAMLFIND)
r = exec_cmd([OCAMLFIND, 'printconf'])
if r != 0:
OCAMLFIND = ''
def find_ml_lib():
global OCAML_LIB
if is_verbose():
print ('Finding OCAML_LIB...')
t = TempFile('output')
null = open(os.devnull, 'wb')
try:
subprocess.call([OCAMLC, '-where'], stdout=t.fname, stderr=null)
t.commit()
except:
raise MKException('Failed to find Ocaml library; please set OCAML_LIB')
finally:
null.close()
t = open('output', 'r')
for line in t:
OCAML_LIB = line[:-1]
if is_verbose():
print ('OCAML_LIB=%s' % OCAML_LIB)
t.close()
rmf('output')
return
def is64():
global LINUX_X64
if is_sunos() and sys.version_info.major < 3:
return LINUX_X64
else:
return LINUX_X64 and sys.maxsize >= 2**32
def check_ar():
if is_verbose():
print("Testing ar...")
if which(AR) is None:
raise MKException('%s (archive tool) was not found' % AR)
def find_cxx_compiler():
global CXX, CXX_COMPILERS
if CXX is not None:
if test_cxx_compiler(CXX):
return CXX
for cxx in CXX_COMPILERS:
if test_cxx_compiler(cxx):
CXX = cxx
return CXX
raise MKException('C++ compiler was not found. Try to set the environment variable CXX with the C++ compiler available in your system.')
def find_c_compiler():
global CC, C_COMPILERS
if CC is not None:
if test_c_compiler(CC):
return CC
for c in C_COMPILERS:
if test_c_compiler(c):
CC = c
return CC
raise MKException('C compiler was not found. Try to set the environment variable CC with the C compiler available in your system.')
def set_version(major, minor, build, revision):
global ASSEMBLY_VERSION, VER_MAJOR, VER_MINOR, VER_BUILD, VER_TWEAK, GIT_DESCRIBE
# We need to give the assembly a build specific version
# global version overrides local default expression
if ASSEMBLY_VERSION is not None:
versionSplits = ASSEMBLY_VERSION.split('.')
if len(versionSplits) > 3:
VER_MAJOR = versionSplits[0]
VER_MINOR = versionSplits[1]
VER_BUILD = versionSplits[2]
VER_TWEAK = versionSplits[3]
print("Set Assembly Version (BUILD):", VER_MAJOR, VER_MINOR, VER_BUILD, VER_TWEAK)
return
# use parameters to set up version if not provided by script args
VER_MAJOR = major
VER_MINOR = minor
VER_BUILD = build
VER_TWEAK = revision
# update VER_TWEAK base on github
if GIT_DESCRIBE:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
VER_TWEAK = int(check_output(['git', 'rev-list', '--count', 'HEAD']))
print("Set Assembly Version (DEFAULT):", VER_MAJOR, VER_MINOR, VER_BUILD, VER_TWEAK)
def get_version():
return (VER_MAJOR, VER_MINOR, VER_BUILD, VER_TWEAK)
def get_version_string(n):
if n == 3:
return "{}.{}.{}".format(VER_MAJOR,VER_MINOR,VER_BUILD)
return "{}.{}.{}.{}".format(VER_MAJOR,VER_MINOR,VER_BUILD,VER_TWEAK)
def build_static_lib():
return STATIC_LIB
def build_static_bin():
return STATIC_BIN
def is_cr_lf(fname):
# Check whether text files use cr/lf
f = open(fname, 'r')
line = f.readline()
f.close()
sz = len(line)
return sz >= 2 and line[sz-2] == '\r' and line[sz-1] == '\n'
# dos2unix in python
# cr/lf --> lf
def dos2unix(fname):
if is_cr_lf(fname):
fin = open(fname, 'r')
fname_new = '%s.new' % fname
fout = open(fname_new, 'w')
for line in fin:
line = line.rstrip('\r\n')
fout.write(line)
fout.write('\n')
fin.close()
fout.close()
shutil.move(fname_new, fname)
if is_verbose():
print("dos2unix '%s'" % fname)
def dos2unix_tree():
for root, dirs, files in os.walk('src'):
for f in files:
dos2unix(os.path.join(root, f))
def check_eol():
if not IS_WINDOWS:
# Linux/OSX/BSD check if the end-of-line is cr/lf
if is_cr_lf('LICENSE.txt'):
if is_verbose():
print("Fixing end of line...")
dos2unix_tree()
if os.name == 'nt':
IS_WINDOWS=True
# Visual Studio already displays the files being compiled
SHOW_CPPS=False
elif os.name == 'posix':
if os.uname()[0] == 'Darwin':
IS_OSX=True
elif os.uname()[0] == 'Linux':
IS_LINUX=True
elif os.uname()[0] == 'GNU':
IS_HURD=True
elif os.uname()[0] == 'FreeBSD':
IS_FREEBSD=True
elif os.uname()[0] == 'NetBSD':
IS_NETBSD=True
elif os.uname()[0] == 'OpenBSD':
IS_OPENBSD=True
elif os.uname()[0] == 'SunOS':
IS_SUNOS=True
elif os.uname()[0][:6] == 'CYGWIN':
IS_CYGWIN=True
if (CC != None and "mingw" in CC):
IS_CYGWIN_MINGW=True
elif os.uname()[0].startswith('MSYS_NT') or os.uname()[0].startswith('MINGW'):
IS_MSYS2=True
if os.uname()[4] == 'x86_64':
LINUX_X64=True
else:
LINUX_X64=False
if os.name == 'posix' and os.uname()[4] == 'arm64':
IS_ARCH_ARM64 = True
def display_help(exit_code):
print("mk_make.py: Z3 Makefile generator\n")
print("This script generates the Makefile for the Z3 theorem prover.")
print("It must be executed from the Z3 root directory.")
print("\nOptions:")
print(" -h, --help display this message.")
print(" -s, --silent do not print verbose messages.")
if not IS_WINDOWS:
print(" -p <dir>, --prefix=<dir> installation prefix (default: %s)." % PREFIX)
else:
print(" --parallel=num use cl option /MP with 'num' parallel processes")
print(" --pypkgdir=<dir> Force a particular Python package directory (default %s)" % PYTHON_PACKAGE_DIR)
print(" -b <subdir>, --build=<subdir> subdirectory where Z3 will be built (default: %s)." % BUILD_DIR)
print(" --githash=hash include the given hash in the binaries.")
print(" --git-describe include the output of 'git describe' in the version information.")
print(" -d, --debug compile Z3 in debug mode.")
print(" -t, --trace enable tracing in release mode.")
if IS_WINDOWS:
print(" --guardcf enable Control Flow Guard runtime checks.")
print(" -x, --x64 create 64 binary when using Visual Studio.")
else:
print(" --x86 force 32-bit x86 build on x64 systems.")
print(" --arm64=<bool> forcearm64 bit build on/off (supported for Darwin).")
print(" -m, --makefiles generate only makefiles.")
if IS_WINDOWS:
print(" -v, --vsproj generate Visual Studio Project Files.")
print(" --optimize generate optimized code during linking.")
print(" --dotnet generate .NET platform bindings.")
print(" --dotnet-key=<file> sign the .NET assembly using the private key in <file>.")
print(" --assembly-version=<x.x.x.x> provide version number for build")
print(" --java generate Java bindings.")
print(" --ml generate OCaml bindings.")
print(" --js generate JScript bindings.")
print(" --python generate Python bindings.")
print(" --staticlib build Z3 static library.")
print(" --staticbin build a statically linked Z3 binary.")
if not IS_WINDOWS:
print(" -g, --gmp use GMP.")
print(" --gprof enable gprof")
print(" --log-sync synchronize access to API log files to enable multi-thread API logging.")
print(" --single-threaded non-thread-safe build")
print("")
print("Some influential environment variables:")
if not IS_WINDOWS:
print(" CXX C++ compiler")
print(" CC C compiler")
print(" LDFLAGS Linker flags, e.g., -L<lib dir> if you have libraries in a non-standard directory")
print(" CPPFLAGS Preprocessor flags, e.g., -I<include dir> if you have header files in a non-standard directory")
print(" CXXFLAGS C++ compiler flags")
print(" JDK_HOME JDK installation directory (only relevant if -j or --java option is provided)")
print(" JNI_HOME JNI bindings directory (only relevant if -j or --java option is provided)")
print(" OCAMLC Ocaml byte-code compiler (only relevant with --ml)")
print(" OCAMLFIND Ocaml find tool (only relevant with --ml)")
print(" OCAMLOPT Ocaml native compiler (only relevant with --ml)")
print(" OCAML_LIB Ocaml library directory (only relevant with --ml)")
print(" Z3_INSTALL_BIN_DIR Install directory for binaries relative to install prefix")
print(" Z3_INSTALL_LIB_DIR Install directory for libraries relative to install prefix")
print(" Z3_INSTALL_INCLUDE_DIR Install directory for header files relative to install prefix")
print(" Z3_INSTALL_PKGCONFIG_DIR Install directory for pkgconfig files relative to install prefix")
exit(exit_code)
# Parse configuration option for mk_make script
def parse_options():
global VERBOSE, DEBUG_MODE, IS_WINDOWS, VS_X64, ONLY_MAKEFILES, SHOW_CPPS, VS_PROJ, TRACE, VS_PAR, VS_PAR_NUM
global DOTNET_CORE_ENABLED, DOTNET_KEY_FILE, ASSEMBLY_VERSION, JAVA_ENABLED, ML_ENABLED, STATIC_LIB, STATIC_BIN, PREFIX, GMP, PYTHON_PACKAGE_DIR, GPROF, GIT_HASH, GIT_DESCRIBE, PYTHON_INSTALL_ENABLED, PYTHON_ENABLED
global LINUX_X64, SLOW_OPTIMIZE, LOG_SYNC, SINGLE_THREADED
global GUARD_CF, ALWAYS_DYNAMIC_BASE, IS_ARCH_ARM64
try:
options, remainder = getopt.gnu_getopt(sys.argv[1:],
'b:df:sxa:hmcvtnp:gj',
['build=', 'debug', 'silent', 'x64', 'arm64=', 'help', 'makefiles', 'showcpp', 'vsproj', 'guardcf',
'trace', 'dotnet', 'dotnet-key=', 'assembly-version=', 'staticlib', 'prefix=', 'gmp', 'java', 'parallel=', 'gprof', 'js',
'githash=', 'git-describe', 'x86', 'ml', 'optimize', 'pypkgdir=', 'python', 'staticbin', 'log-sync', 'single-threaded'])
except:
print("ERROR: Invalid command line option")
display_help(1)
for opt, arg in options:
print('opt = %s, arg = %s' % (opt, arg))
if opt in ('-b', '--build'):
if arg == 'src':
raise MKException('The src directory should not be used to host the Makefile')
set_build_dir(arg)
elif opt in ('-s', '--silent'):
VERBOSE = False
elif opt in ('-d', '--debug'):
DEBUG_MODE = True
elif opt in ('-x', '--x64'):
if not IS_WINDOWS:
raise MKException('x64 compilation mode can only be specified when using Visual Studio')
VS_X64 = True
elif opt in ('--x86'):
LINUX_X64=False
elif opt in ('--arm64'):
IS_ARCH_ARM64 = arg in ('true','on','True','TRUE')
elif opt in ('-h', '--help'):
display_help(0)
elif opt in ('-m', '--makefiles'):
ONLY_MAKEFILES = True
elif opt in ('-c', '--showcpp'):
SHOW_CPPS = True
elif opt in ('-v', '--vsproj'):
VS_PROJ = True
elif opt in ('-t', '--trace'):
TRACE = True
elif opt in ('--dotnet',):
DOTNET_CORE_ENABLED = True
elif opt in ('--dotnet-key'):
DOTNET_KEY_FILE = arg
elif opt in ('--assembly-version'):
ASSEMBLY_VERSION = arg
elif opt in ('--staticlib'):
STATIC_LIB = True
elif opt in ('--staticbin'):
STATIC_BIN = True
elif opt in ('--optimize'):
SLOW_OPTIMIZE = True
elif not IS_WINDOWS and opt in ('-p', '--prefix'):
PREFIX = arg
elif opt in ('--pypkgdir'):
PYTHON_PACKAGE_DIR = arg
elif IS_WINDOWS and opt == '--parallel':
VS_PAR = True
VS_PAR_NUM = int(arg)
elif opt in ('-g', '--gmp'):
GMP = True
elif opt in ('-j', '--java'):
JAVA_ENABLED = True
elif opt == '--gprof':
GPROF = True
elif opt == '--githash':
GIT_HASH=arg
elif opt == '--git-describe':
GIT_DESCRIBE = True
elif opt in ('', '--ml'):
ML_ENABLED = True
elif opt in ('', '--log-sync'):
LOG_SYNC = True
elif opt == '--single-threaded':
SINGLE_THREADED = True
elif opt in ('--python'):
PYTHON_ENABLED = True
PYTHON_INSTALL_ENABLED = True
elif opt == '--guardcf':
GUARD_CF = True
ALWAYS_DYNAMIC_BASE = True # /GUARD:CF requires /DYNAMICBASE
else:
print("ERROR: Invalid command line option '%s'" % opt)
display_help(1)
# Return a list containing a file names included using '#include' in
# the given C/C++ file named fname.
def extract_c_includes(fname):
result = {}
# We look for well behaved #include directives
std_inc_pat = re.compile(r"[ \t]*#include[ \t]*\"(.*)\"[ \t]*")
system_inc_pat = re.compile(r"[ \t]*#include[ \t]*\<.*\>[ \t]*")
# We should generate and error for any occurrence of #include that does not match the previous pattern.
non_std_inc_pat = re.compile(".*#include.*")
f = io.open(fname, encoding='utf-8', mode='r')
linenum = 1
for line in f:
m1 = std_inc_pat.match(line)
if m1:
root_file_name = m1.group(1)
slash_pos = root_file_name.rfind('/')
if slash_pos >= 0 and root_file_name.find("..") < 0 : #it is a hack for lp include files that behave as continued from "src"
# print(root_file_name)
root_file_name = root_file_name[slash_pos+1:]
result[root_file_name] = m1.group(1)
elif not system_inc_pat.match(line) and non_std_inc_pat.match(line):
raise MKException("Invalid #include directive at '%s':%s" % (fname, line))
linenum = linenum + 1
f.close()
return result
# Given a path dir1/subdir2/subdir3 returns ../../..
def reverse_path(p):
# Filter out empty components (e.g. will have one if path ends in a slash)
l = list(filter(lambda x: len(x) > 0, p.split(os.sep)))
n = len(l)
r = '..'
for i in range(1, n):
r = os.path.join(r, '..')
return r
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def set_build_dir(d):
global BUILD_DIR, REV_BUILD_DIR
BUILD_DIR = norm_path(d)
REV_BUILD_DIR = reverse_path(d)
def set_z3py_dir(p):
global SRC_DIR, Z3PY_SRC_DIR
p = norm_path(p)
full = os.path.join(SRC_DIR, p)
if not os.path.exists(full):
raise MKException("Python bindings directory '%s' does not exist" % full)
Z3PY_SRC_DIR = full
if VERBOSE:
print("Python bindings directory was detected.")
_UNIQ_ID = 0
def mk_fresh_name(prefix):
global _UNIQ_ID
r = '%s_%s' % (prefix, _UNIQ_ID)
_UNIQ_ID = _UNIQ_ID + 1
return r
_Id = 0
_Components = []
_ComponentNames = set()
_Name2Component = {}
_Processed_Headers = set()
# Return the Component object named name
def get_component(name):
return _Name2Component[name]
def get_components():
return _Components
# Return the directory where the python bindings are located.
def get_z3py_dir():
return Z3PY_SRC_DIR
# Return true if in verbose mode
def is_verbose():
return VERBOSE
def is_java_enabled():
return JAVA_ENABLED
def is_ml_enabled():
return ML_ENABLED
def is_dotnet_core_enabled():
return DOTNET_CORE_ENABLED
def is_python_enabled():
return PYTHON_ENABLED
def is_python_install_enabled():
return PYTHON_INSTALL_ENABLED
def is_compiler(given, expected):
"""
Return True if the 'given' compiler is the expected one.
>>> is_compiler('g++', 'g++')
True
>>> is_compiler('/home/g++', 'g++')
True
>>> is_compiler(os.path.join('home', 'g++'), 'g++')
True
>>> is_compiler('clang++', 'g++')
False
>>> is_compiler(os.path.join('home', 'clang++'), 'clang++')
True
"""
if given == expected:
return True
if len(expected) < len(given):
return given[len(given) - len(expected) - 1] == os.sep and given[len(given) - len(expected):] == expected
return False
def is_CXX_gpp():
return is_compiler(CXX, 'g++')
def is_clang_in_gpp_form(cc):
str = check_output([cc, '--version'])
try:
version_string = str.encode('utf-8')
except:
version_string = str
clang = 'clang'.encode('utf-8')
return version_string.find(clang) != -1
def is_CXX_clangpp():
if is_compiler(CXX, 'g++'):
return is_clang_in_gpp_form(CXX)
return is_compiler(CXX, 'clang++')
def get_files_with_ext(path, ext):
return filter(lambda f: f.endswith(ext), os.listdir(path))
def get_cpp_files(path):
return get_files_with_ext(path,'.cpp')
def get_c_files(path):
return get_files_with_ext(path,'.c')
def get_cs_files(path):
return get_files_with_ext(path,'.cs')
def get_java_files(path):
return get_files_with_ext(path,'.java')
def get_ml_files(path):
return get_files_with_ext(path,'.ml')
def find_all_deps(name, deps):
new_deps = []
for dep in deps:
if dep in _ComponentNames:
if not (dep in new_deps):
new_deps.append(dep)
for dep_dep in get_component(dep).deps:
if not (dep_dep in new_deps):
new_deps.append(dep_dep)
else:
raise MKException("Unknown component '%s' at '%s'." % (dep, name))
return new_deps
class Component:
def __init__(self, name, path, deps):
global BUILD_DIR, SRC_DIR, REV_BUILD_DIR
if name in _ComponentNames:
raise MKException("Component '%s' was already defined." % name)
if path is None:
path = name
self.name = name
path = norm_path(path)
self.path = path
self.deps = find_all_deps(name, deps)
self.build_dir = path
self.src_dir = os.path.join(SRC_DIR, path)
self.to_src_dir = os.path.join(REV_BUILD_DIR, self.src_dir)
def get_link_name(self):
return os.path.join(self.build_dir, self.name) + '$(LIB_EXT)'
# Find fname in the include paths for the given component.
# ownerfile is only used for creating error messages.
# That is, we were looking for fname when processing ownerfile
def find_file(self, fname, ownerfile, orig_include=None):
full_fname = os.path.join(self.src_dir, fname)
# Store all our possible locations
possibilities = set()
# If the our file exists in the current directory, then we store it
if os.path.exists(full_fname):
# We cannot return here, as we might have files with the same
# basename, but different include paths
possibilities.add(self)
for dep in self.deps:
c_dep = get_component(dep)
full_fname = os.path.join(c_dep.src_dir, fname)
if os.path.exists(full_fname):
possibilities.add(c_dep)
if possibilities:
# We have ambiguity
if len(possibilities) > 1:
# We expect orig_path to be non-None here, so we can disambiguate
assert orig_include is not None
# Get the original directory name
orig_dir = os.path.dirname(orig_include)
# Iterate through all of the possibilities
for possibility in possibilities:
path = possibility.path.replace("\\","/")
# If we match the suffix of the path ...
if path.endswith(orig_dir):
# ... use our new match
return possibility
# This means we didn't make an exact match ...
#
# We return any one possibility, just to ensure we don't break Z3's
# builds
return possibilities.pop()
raise MKException("Failed to find include file '%s' for '%s' when processing '%s'." % (fname, ownerfile, self.name))
# Display all dependencies of file basename located in the given component directory.
# The result is displayed at out
def add_cpp_h_deps(self, out, basename):
includes = extract_c_includes(os.path.join(self.src_dir, basename))
out.write(os.path.join(self.to_src_dir, basename))
for include, orig_include in includes.items():
owner = self.find_file(include, basename, orig_include)
out.write(' %s.node' % os.path.join(owner.build_dir, include))
# Add a rule for each #include directive in the file basename located at the current component.
def add_rule_for_each_include(self, out, basename):
fullname = os.path.join(self.src_dir, basename)
includes = extract_c_includes(fullname)
for include, orig_include in includes.items():
owner = self.find_file(include, fullname, orig_include)
owner.add_h_rule(out, include)
# Display a Makefile rule for an include file located in the given component directory.
# 'include' is something of the form: ast.h, polynomial.h
# The rule displayed at out is of the form
# ast/ast_pp.h.node : ../src/util/ast_pp.h util/util.h.node ast/ast.h.node
# @echo "done" > ast/ast_pp.h.node
def add_h_rule(self, out, include):
include_src_path = os.path.join(self.to_src_dir, include)
if include_src_path in _Processed_Headers:
return
_Processed_Headers.add(include_src_path)
self.add_rule_for_each_include(out, include)
include_node = '%s.node' % os.path.join(self.build_dir, include)
out.write('%s: ' % include_node)
self.add_cpp_h_deps(out, include)
out.write('\n')
out.write('\t@echo done > %s\n' % include_node)
def add_cpp_rules(self, out, include_defs, cppfile):
self.add_rule_for_each_include(out, cppfile)
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
srcfile = os.path.join(self.to_src_dir, cppfile)
out.write('%s: ' % objfile)
self.add_cpp_h_deps(out, cppfile)
out.write('\n')
if SHOW_CPPS:
out.write('\t@echo %s\n' % os.path.join(self.src_dir, cppfile))
out.write('\t@$(CXX) $(CXXFLAGS) $(%s) $(CXX_OUT_FLAG)%s %s\n' % (include_defs, objfile, srcfile))
def mk_makefile(self, out):
include_defs = mk_fresh_name('includes')
out.write('%s =' % include_defs)
for dep in self.deps:
out.write(' -I%s' % get_component(dep).to_src_dir)
out.write(' -I%s' % os.path.join(REV_BUILD_DIR,"src"))
out.write('\n')
mk_dir(os.path.join(BUILD_DIR, self.build_dir))
if VS_PAR and IS_WINDOWS:
cppfiles = list(get_cpp_files(self.src_dir))
dependencies = set()
for cppfile in cppfiles:
dependencies.add(os.path.join(self.to_src_dir, cppfile))
self.add_rule_for_each_include(out, cppfile)
includes = extract_c_includes(os.path.join(self.src_dir, cppfile))
for include, orig_include in includes.items():
owner = self.find_file(include, cppfile, orig_include)
dependencies.add('%s.node' % os.path.join(owner.build_dir, include))
for cppfile in cppfiles:
out.write('%s$(OBJ_EXT) ' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0]))
out.write(': ')
for dep in dependencies:
out.write(dep)
out.write(' ')
out.write('\n')
out.write('\t@$(CXX) $(CXXFLAGS) /MP%s $(%s)' % (VS_PAR_NUM, include_defs))
for cppfile in cppfiles:
out.write(' ')
out.write(os.path.join(self.to_src_dir, cppfile))
out.write('\n')
out.write('\tmove *.obj %s\n' % self.build_dir)
else:
for cppfile in get_cpp_files(self.src_dir):
self.add_cpp_rules(out, include_defs, cppfile)
# Return true if the component should be included in the all: rule
def main_component(self):
return False
# Return true if the component contains an AssemblyInfo.cs file that needs to be updated.
def has_assembly_info(self):
return False
# Return true if the component needs builder to generate an install_tactics.cpp file
def require_install_tactics(self):
return False
# Return true if the component needs a def file
def require_def_file(self):
return False
# Return true if the component needs builder to generate a mem_initializer.cpp file with mem_initialize() and mem_finalize() functions.
def require_mem_initializer(self):
return False
def mk_install_deps(self, out):
return
def mk_install(self, out):
return
def mk_uninstall(self, out):
return
def is_example(self):
return False
# Invoked when creating a (windows) distribution package using components at build_path, and
# storing them at dist_path
def mk_win_dist(self, build_path, dist_path):
return
def mk_unix_dist(self, build_path, dist_path):
return
# Used to print warnings or errors after mk_make.py is done, so that they
# are not quite as easy to miss.
def final_info(self):
pass
class LibComponent(Component):
def __init__(self, name, path, deps, includes2install):
Component.__init__(self, name, path, deps)
self.includes2install = includes2install
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for lib
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
libfile = '%s$(LIB_EXT)' % os.path.join(self.build_dir, self.name)
out.write('%s:' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('\t@$(AR) $(AR_FLAGS) $(AR_OUTFLAG)%s' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('%s: %s\n\n' % (self.name, libfile))
def mk_install_deps(self, out):
return
def mk_install(self, out):
for include in self.includes2install:
MakeRuleCmd.install_files(
out,
os.path.join(self.to_src_dir, include),
os.path.join(INSTALL_INCLUDE_DIR, include)
)
def mk_uninstall(self, out):
for include in self.includes2install:
MakeRuleCmd.remove_installed_files(out, os.path.join(INSTALL_INCLUDE_DIR, include))
def mk_win_dist(self, build_path, dist_path):
mk_dir(os.path.join(dist_path, INSTALL_INCLUDE_DIR))
for include in self.includes2install:
shutil.copy(os.path.join(self.src_dir, include),
os.path.join(dist_path, INSTALL_INCLUDE_DIR, include))
def mk_unix_dist(self, build_path, dist_path):
self.mk_win_dist(build_path, dist_path)
# "Library" containing only .h files. This is just a placeholder for includes files to be installed.
class HLibComponent(LibComponent):
def __init__(self, name, path, includes2install):
LibComponent.__init__(self, name, path, [], includes2install)
def mk_makefile(self, out):
return
# Auxiliary function for sort_components
def comp_components(c1, c2):
id1 = get_component(c1).id
id2 = get_component(c2).id
return id2 - id1
# Sort components based on (reverse) definition time
def sort_components(cnames):
return sorted(cnames, key=lambda c: get_component(c).id, reverse=True)
class ExeComponent(Component):
def __init__(self, name, exe_name, path, deps, install):
Component.__init__(self, name, path, deps)
if exe_name is None:
exe_name = name
self.exe_name = exe_name
self.install = install
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for exe
exefile = '%s$(EXE_EXT)' % self.exe_name
out.write('%s:' % exefile)
deps = sort_components(self.deps)
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write('\n')
extra_opt = '-static' if not IS_WINDOWS and STATIC_BIN else ''
out.write('\t$(LINK) %s $(LINK_OUT_FLAG)%s $(LINK_FLAGS)' % (extra_opt, exefile))
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write(' $(LINK_EXTRA_FLAGS)\n')
out.write('%s: %s\n\n' % (self.name, exefile))
def require_install_tactics(self):
return ('tactic' in self.deps) and ('cmd_context' in self.deps)
def require_mem_initializer(self):
return True
# All executables (to be installed) are included in the all: rule
def main_component(self):
return self.install
def mk_install_deps(self, out):
if self.install:
exefile = '%s$(EXE_EXT)' % self.exe_name
out.write('%s' % exefile)
def mk_install(self, out):
if self.install:
exefile = '%s$(EXE_EXT)' % self.exe_name
MakeRuleCmd.install_files(out, exefile, os.path.join(INSTALL_BIN_DIR, exefile))
def mk_uninstall(self, out):
if self.install:
exefile = '%s$(EXE_EXT)' % self.exe_name
MakeRuleCmd.remove_installed_files(out, os.path.join(INSTALL_BIN_DIR, exefile))
def mk_win_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.exe' % os.path.join(build_path, self.exe_name),
'%s.exe' % os.path.join(dist_path, INSTALL_BIN_DIR, self.exe_name))
def mk_unix_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy(os.path.join(build_path, self.exe_name),
os.path.join(dist_path, INSTALL_BIN_DIR, self.exe_name))
class ExtraExeComponent(ExeComponent):
def __init__(self, name, exe_name, path, deps, install):
ExeComponent.__init__(self, name, exe_name, path, deps, install)
def main_component(self):
return False
def require_mem_initializer(self):
return False
def get_so_ext():
sysname = os.uname()[0]
if sysname == 'Darwin':
return 'dylib'
elif sysname == 'Linux' or sysname == 'GNU' or sysname == 'FreeBSD' or sysname == 'NetBSD' or sysname == 'OpenBSD':
return 'so'
elif sysname == 'CYGWIN' or sysname.startswith('MSYS_NT') or sysname.startswith('MINGW'):
return 'dll'
else:
assert(False)
return 'dll'
class DLLComponent(Component):
def __init__(self, name, dll_name, path, deps, export_files, reexports, install, static, staging_link=None):
Component.__init__(self, name, path, deps)
if dll_name is None:
dll_name = name
self.dll_name = dll_name
self.export_files = export_files
self.reexports = reexports
self.install = install
self.static = static
self.staging_link = staging_link # link a copy of the shared object into this directory on build
def get_link_name(self):
if self.static:
return os.path.join(self.build_dir, self.name) + '$(LIB_EXT)'
else:
return self.name + '$(SO_EXT)'
def dll_file(self):
"""
Return file name of component suitable for use in a Makefile
"""
return '%s$(SO_EXT)' % self.dll_name
def install_path(self):
"""
Return install location of component (relative to prefix)
suitable for use in a Makefile
"""
return os.path.join(INSTALL_LIB_DIR, self.dll_file())
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for (SO_EXT)
out.write('%s:' % self.dll_file())
deps = sort_components(self.deps)
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
# Explicitly include obj files of reexport. This fixes problems with exported symbols on Linux and OSX.
for reexport in self.reexports:
reexport = get_component(reexport)
for cppfile in get_cpp_files(reexport.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(reexport.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
if dep not in self.reexports:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write('\n')
out.write('\t$(LINK) $(SLINK_OUT_FLAG)%s $(SLINK_FLAGS)' % self.dll_file())
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
if dep not in self.reexports:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write(' $(SLINK_EXTRA_FLAGS)')
if IS_WINDOWS:
out.write(' /DEF:%s.def' % os.path.join(self.to_src_dir, self.name))
if self.staging_link:
if IS_WINDOWS:
out.write('\n\tcopy %s %s' % (self.dll_file(), self.staging_link))
elif IS_OSX:
out.write('\n\tcp %s %s' % (self.dll_file(), self.staging_link))
else:
out.write('\n\tln -f -s %s %s' % (os.path.join(reverse_path(self.staging_link), self.dll_file()), self.staging_link))
out.write('\n')
if self.static:
if IS_WINDOWS:
libfile = '%s-static$(LIB_EXT)' % self.dll_name
else:
libfile = '%s$(LIB_EXT)' % self.dll_name
self.mk_static(out, libfile)
out.write('%s: %s %s\n\n' % (self.name, self.dll_file(), libfile))
else:
out.write('%s: %s\n\n' % (self.name, self.dll_file()))
def mk_static(self, out, libfile):
# generate rule for lib
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
# we have to "reexport" all object files
for dep in self.deps:
dep = get_component(dep)
for cppfile in get_cpp_files(dep.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(dep.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
out.write('%s:' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('\t@$(AR) $(AR_FLAGS) $(AR_OUTFLAG)%s' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
def main_component(self):
return self.install
def require_install_tactics(self):
return ('tactic' in self.deps) and ('cmd_context' in self.deps)
def require_mem_initializer(self):
return True
def require_def_file(self):
return IS_WINDOWS and self.export_files
def mk_install_deps(self, out):
out.write('%s$(SO_EXT)' % self.dll_name)
if self.static:
out.write(' %s$(LIB_EXT)' % self.dll_name)
def mk_install(self, out):
if self.install:
MakeRuleCmd.install_files(out, self.dll_file(), self.install_path())
if self.static:
libfile = '%s$(LIB_EXT)' % self.dll_name
MakeRuleCmd.install_files(out, libfile, os.path.join(INSTALL_LIB_DIR, libfile))
def mk_uninstall(self, out):
MakeRuleCmd.remove_installed_files(out, self.install_path())
libfile = '%s$(LIB_EXT)' % self.dll_name
MakeRuleCmd.remove_installed_files(out, os.path.join(INSTALL_LIB_DIR, libfile))
def mk_win_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.dll' % os.path.join(build_path, self.dll_name),
'%s.dll' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.pdb' % os.path.join(build_path, self.dll_name),
'%s.pdb' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.lib' % os.path.join(build_path, self.dll_name),
'%s.lib' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
def mk_unix_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
so = get_so_ext()
shutil.copy('%s.%s' % (os.path.join(build_path, self.dll_name), so),
'%s.%s' % (os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name), so))
shutil.copy('%s.a' % os.path.join(build_path, self.dll_name),
'%s.a' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
class JsComponent(Component):
def __init__(self):
Component.__init__(self, "js", None, [])
def main_component(self):
return False
def mk_win_dist(self, build_path, dist_path):
return
def mk_unix_dist(self, build_path, dist_path):
return
def mk_makefile(self, out):
return
class PythonComponent(Component):
def __init__(self, name, libz3Component):
assert isinstance(libz3Component, DLLComponent)
global PYTHON_ENABLED
Component.__init__(self, name, None, [])
self.libz3Component = libz3Component
def main_component(self):
return False
def mk_win_dist(self, build_path, dist_path):
if not is_python_enabled():
return
src = os.path.join(build_path, 'python', 'z3')
dst = os.path.join(dist_path, INSTALL_BIN_DIR, 'python', 'z3')
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def mk_unix_dist(self, build_path, dist_path):
self.mk_win_dist(build_path, dist_path)
def mk_makefile(self, out):
return
class PythonInstallComponent(Component):
def __init__(self, name, libz3Component):
assert isinstance(libz3Component, DLLComponent)
global PYTHON_INSTALL_ENABLED
Component.__init__(self, name, None, [])
self.pythonPkgDir = None
self.in_prefix_install = True
self.libz3Component = libz3Component
if not PYTHON_INSTALL_ENABLED:
return
if IS_WINDOWS:
# Installing under Windows doesn't make sense as the install prefix is used
# but that doesn't make sense under Windows
# CMW: It makes perfectly good sense; the prefix is Python's sys.prefix,
# i.e., something along the lines of C:\Python\... At the moment we are not
# sure whether we would want to install libz3.dll into that directory though.
PYTHON_INSTALL_ENABLED = False
return
else:
PYTHON_INSTALL_ENABLED = True
if IS_WINDOWS or IS_OSX:
# Use full path that is possibly outside of install prefix
self.in_prefix_install = PYTHON_PACKAGE_DIR.startswith(PREFIX)
self.pythonPkgDir = strip_path_prefix(PYTHON_PACKAGE_DIR, PREFIX)
else:
# Use path inside the prefix (should be the normal case on Linux)
# CMW: Also normal on *BSD?
if not PYTHON_PACKAGE_DIR.startswith(PREFIX):
raise MKException(('The python package directory ({}) must live ' +
'under the install prefix ({}) to install the python bindings.' +
'Use --pypkgdir and --prefix to set the python package directory ' +
'and install prefix respectively. Note that the python package ' +
'directory does not need to exist and will be created if ' +
'necessary during install.').format(
PYTHON_PACKAGE_DIR,
PREFIX))
self.pythonPkgDir = strip_path_prefix(PYTHON_PACKAGE_DIR, PREFIX)
self.in_prefix_install = True
if self.in_prefix_install:
assert not os.path.isabs(self.pythonPkgDir)
def final_info(self):
if not PYTHON_PACKAGE_DIR.startswith(PREFIX) and PYTHON_INSTALL_ENABLED:
print("Warning: The detected Python package directory (%s) is not "
"in the installation prefix (%s). This can lead to a broken "
"Python API installation. Use --pypkgdir= to change the "
"Python package directory." % (PYTHON_PACKAGE_DIR, PREFIX))
def main_component(self):
return False
def mk_install(self, out):
if not is_python_install_enabled():
return
MakeRuleCmd.make_install_directory(out,
os.path.join(self.pythonPkgDir, 'z3'),
in_prefix=self.in_prefix_install)
MakeRuleCmd.make_install_directory(out,
os.path.join(self.pythonPkgDir, 'z3', 'lib'),
in_prefix=self.in_prefix_install)
# Sym-link or copy libz3 into python package directory
if IS_WINDOWS or IS_OSX:
MakeRuleCmd.install_files(out,
self.libz3Component.dll_file(),
os.path.join(self.pythonPkgDir, 'z3', 'lib',
self.libz3Component.dll_file()),
in_prefix=self.in_prefix_install
)
else:
# Create symbolic link to save space.
# It's important that this symbolic link be relative (rather
# than absolute) so that the install is relocatable (needed for
# staged installs that use DESTDIR).
MakeRuleCmd.create_relative_symbolic_link(out,
self.libz3Component.install_path(),
os.path.join(self.pythonPkgDir, 'z3', 'lib',
self.libz3Component.dll_file()
),
)
MakeRuleCmd.install_files(out, os.path.join('python', 'z3', '*.py'),
os.path.join(self.pythonPkgDir, 'z3'),
in_prefix=self.in_prefix_install)
if sys.version >= "3":
pythonPycacheDir = os.path.join(self.pythonPkgDir, 'z3', '__pycache__')
MakeRuleCmd.make_install_directory(out,
pythonPycacheDir,
in_prefix=self.in_prefix_install)
MakeRuleCmd.install_files(out,
os.path.join('python', 'z3', '__pycache__', '*.pyc'),
pythonPycacheDir,
in_prefix=self.in_prefix_install)
else:
MakeRuleCmd.install_files(out,
os.path.join('python', 'z3', '*.pyc'),
os.path.join(self.pythonPkgDir,'z3'),
in_prefix=self.in_prefix_install)
if PYTHON_PACKAGE_DIR != sysconfig.get_path('purelib'):
out.write('\t@echo Z3Py was installed at \'%s\', make sure this directory is in your PYTHONPATH environment variable.' % PYTHON_PACKAGE_DIR)
def mk_uninstall(self, out):
if not is_python_install_enabled():
return
MakeRuleCmd.remove_installed_files(out,
os.path.join(self.pythonPkgDir,
self.libz3Component.dll_file()),
in_prefix=self.in_prefix_install
)
MakeRuleCmd.remove_installed_files(out,
os.path.join(self.pythonPkgDir, 'z3', '*.py'),
in_prefix=self.in_prefix_install)
MakeRuleCmd.remove_installed_files(out,
os.path.join(self.pythonPkgDir, 'z3', '*.pyc'),
in_prefix=self.in_prefix_install)
MakeRuleCmd.remove_installed_files(out,
os.path.join(self.pythonPkgDir, 'z3', '__pycache__', '*.pyc'),
in_prefix=self.in_prefix_install
)
MakeRuleCmd.remove_installed_files(out,
os.path.join(self.pythonPkgDir, 'z3', 'lib',
self.libz3Component.dll_file()))
def mk_makefile(self, out):
return
def set_key_file(self):
global DOTNET_KEY_FILE
# We need to give the assembly a strong name so that it
# can be installed into the GAC with ``make install``
if not DOTNET_KEY_FILE is None:
self.key_file = DOTNET_KEY_FILE
if not self.key_file is None:
if os.path.isfile(self.key_file):
self.key_file = os.path.abspath(self.key_file)
elif os.path.isfile(os.path.join(self.src_dir, self.key_file)):
self.key_file = os.path.abspath(os.path.join(self.src_dir, self.key_file))
else:
print("Keyfile '%s' could not be found; %s.dll will be unsigned." % (self.key_file, self.dll_name))
self.key_file = None
# build for dotnet core
class DotNetDLLComponent(Component):
def __init__(self, name, dll_name, path, deps, assembly_info_dir, default_key_file):
Component.__init__(self, name, path, deps)
if dll_name is None:
dll_name = name
if assembly_info_dir is None:
assembly_info_dir = "."
self.dll_name = dll_name
self.assembly_info_dir = assembly_info_dir
self.key_file = default_key_file
def mk_makefile(self, out):
if not is_dotnet_core_enabled():
return
cs_fp_files = []
for cs_file in get_cs_files(self.src_dir):
cs_fp_files.append(os.path.join(self.to_src_dir, cs_file))
if self.assembly_info_dir != '.':
for cs_file in get_cs_files(os.path.join(self.src_dir, self.assembly_info_dir)):
cs_fp_files.append(os.path.join(self.to_src_dir, self.assembly_info_dir, cs_file))
dllfile = '%s.dll' % self.dll_name
out.write('%s: %s$(SO_EXT)' % (dllfile, get_component(Z3_DLL_COMPONENT).dll_name))
for cs_file in cs_fp_files:
out.write(' ')
out.write(cs_file)
out.write('\n')
set_key_file(self)
key = ""
if not self.key_file is None:
key = "<AssemblyOriginatorKeyFile>%s</AssemblyOriginatorKeyFile>" % self.key_file
key += "\n<SignAssembly>true</SignAssembly>"
version = get_version_string(4)
print("Version output to csproj:", version)
core_csproj_str = r"""<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netstandard1.4</TargetFramework>
<LangVersion>8.0</LangVersion>
<DefineConstants>$(DefineConstants);DOTNET_CORE</DefineConstants>
<DebugType>full</DebugType>
<AssemblyName>Microsoft.Z3</AssemblyName>
<OutputType>Library</OutputType>
<PackageId>Microsoft.Z3</PackageId>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<RuntimeFrameworkVersion>1.0.4</RuntimeFrameworkVersion>
<Version>%s</Version>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<Authors>Microsoft</Authors>
<Company>Microsoft</Company>
<EnableDefaultCompileItems>false</EnableDefaultCompileItems>
<Description>Z3 is a satisfiability modulo theories solver from Microsoft Research.</Description>
<Copyright>Copyright Microsoft Corporation. All rights reserved.</Copyright>
<PackageTags>smt constraint solver theorem prover</PackageTags>
%s
</PropertyGroup>
<ItemGroup>
<Compile Include="..\%s\*.cs;*.cs" Exclude="bin\**;obj\**;**\*.xproj;packages\**" />
</ItemGroup>
</Project>""" % (version, key, self.to_src_dir)
mk_dir(os.path.join(BUILD_DIR, 'dotnet'))
csproj = os.path.join('dotnet', 'z3.csproj')
with open(os.path.join(BUILD_DIR, csproj), 'w') as ous:
ous.write(core_csproj_str)
dotnetCmdLine = [DOTNET, "build", csproj]
dotnetCmdLine.extend(['-c'])
if DEBUG_MODE:
dotnetCmdLine.extend(['Debug'])
else:
dotnetCmdLine.extend(['Release'])
path = os.path.join(os.path.abspath(BUILD_DIR), ".")
dotnetCmdLine.extend(['-o', "\"%s\"" % path])
MakeRuleCmd.write_cmd(out, ' '.join(dotnetCmdLine))
out.write('\n')
out.write('%s: %s\n\n' % (self.name, dllfile))
def main_component(self):
return is_dotnet_core_enabled()
def has_assembly_info(self):
# TBD: is this required for dotnet core given that version numbers are in z3.csproj file?
return False
def mk_win_dist(self, build_path, dist_path):
if is_dotnet_core_enabled():
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.dll' % os.path.join(build_path, self.dll_name),
'%s.dll' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.pdb' % os.path.join(build_path, self.dll_name),
'%s.pdb' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.xml' % os.path.join(build_path, self.dll_name),
'%s.xml' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.deps.json' % os.path.join(build_path, self.dll_name),
'%s.deps.json' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
if DEBUG_MODE:
shutil.copy('%s.pdb' % os.path.join(build_path, self.dll_name),
'%s.pdb' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
def mk_unix_dist(self, build_path, dist_path):
if is_dotnet_core_enabled():
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.dll' % os.path.join(build_path, self.dll_name),
'%s.dll' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.xml' % os.path.join(build_path, self.dll_name),
'%s.xml' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
shutil.copy('%s.deps.json' % os.path.join(build_path, self.dll_name),
'%s.deps.json' % os.path.join(dist_path, INSTALL_BIN_DIR, self.dll_name))
def mk_install_deps(self, out):
pass
def mk_install(self, out):
pass
def mk_uninstall(self, out):
pass
class JavaDLLComponent(Component):
def __init__(self, name, dll_name, package_name, manifest_file, path, deps):
Component.__init__(self, name, path, deps)
if dll_name is None:
dll_name = name
self.dll_name = dll_name
self.package_name = package_name
self.manifest_file = manifest_file
self.install = not is_windows()
def mk_makefile(self, out):
global JAVAC
global JAR
if is_java_enabled():
mk_dir(os.path.join(BUILD_DIR, 'api', 'java', 'classes'))
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('libz3java$(SO_EXT): libz3$(SO_EXT) %s\n' % os.path.join(self.to_src_dir, 'Native.cpp'))
t = '\t$(CXX) $(CXXFLAGS) $(CXX_OUT_FLAG)api/java/Native$(OBJ_EXT) -I"%s" -I"%s/PLATFORM" -I%s %s/Native.cpp\n' % (JNI_HOME, JNI_HOME, get_component('api').to_src_dir, self.to_src_dir)
if IS_OSX:
t = t.replace('PLATFORM', 'darwin')
elif is_linux():
t = t.replace('PLATFORM', 'linux')
elif is_hurd():
t = t.replace('PLATFORM', 'hurd')
elif IS_FREEBSD:
t = t.replace('PLATFORM', 'freebsd')
elif IS_NETBSD:
t = t.replace('PLATFORM', 'netbsd')
elif IS_OPENBSD:
t = t.replace('PLATFORM', 'openbsd')
elif IS_SUNOS:
t = t.replace('PLATFORM', 'SunOS')
elif IS_CYGWIN:
t = t.replace('PLATFORM', 'cygwin')
elif IS_MSYS2:
t = t.replace('PLATFORM', 'win32')
else:
t = t.replace('PLATFORM', 'win32')
out.write(t)
if IS_WINDOWS: # On Windows, CL creates a .lib file to link against.
out.write('\t$(SLINK) $(SLINK_OUT_FLAG)libz3java$(SO_EXT) $(SLINK_FLAGS) %s$(OBJ_EXT) libz3$(LIB_EXT)\n' %
os.path.join('api', 'java', 'Native'))
elif IS_OSX and IS_ARCH_ARM64:
out.write('\t$(SLINK) $(SLINK_OUT_FLAG)libz3java$(SO_EXT) $(SLINK_FLAGS) -arch arm64 %s$(OBJ_EXT) libz3$(SO_EXT)\n' %
os.path.join('api', 'java', 'Native'))
else:
out.write('\t$(SLINK) $(SLINK_OUT_FLAG)libz3java$(SO_EXT) $(SLINK_FLAGS) %s$(OBJ_EXT) libz3$(SO_EXT)\n' %
os.path.join('api', 'java', 'Native'))
out.write('%s.jar: libz3java$(SO_EXT) ' % self.package_name)
deps = ''
for jfile in get_java_files(self.src_dir):
deps += ('%s ' % os.path.join(self.to_src_dir, jfile))
for jfile in get_java_files(os.path.join(self.src_dir, "enumerations")):
deps += '%s ' % os.path.join(self.to_src_dir, 'enumerations', jfile)
out.write(deps)
out.write('\n')
#if IS_WINDOWS:
JAVAC = '"%s"' % JAVAC
JAR = '"%s"' % JAR
t = ('\t%s -source 1.8 -target 1.8 %s.java -d %s\n' % (JAVAC, os.path.join(self.to_src_dir, 'enumerations', '*'), os.path.join('api', 'java', 'classes')))
out.write(t)
t = ('\t%s -source 1.8 -target 1.8 -cp %s %s.java -d %s\n' % (JAVAC,
os.path.join('api', 'java', 'classes'),
os.path.join(self.to_src_dir, '*'),
os.path.join('api', 'java', 'classes')))
out.write(t)
out.write('\t%s cfm %s.jar %s -C %s .\n' % (JAR, self.package_name,
os.path.join(self.to_src_dir, 'manifest'),
os.path.join('api', 'java', 'classes')))
out.write('java: %s.jar\n\n' % self.package_name)
def main_component(self):
return is_java_enabled()
def mk_win_dist(self, build_path, dist_path):
if JAVA_ENABLED:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.jar' % os.path.join(build_path, self.package_name),
'%s.jar' % os.path.join(dist_path, INSTALL_BIN_DIR, self.package_name))
shutil.copy(os.path.join(build_path, 'libz3java.dll'),
os.path.join(dist_path, INSTALL_BIN_DIR, 'libz3java.dll'))
shutil.copy(os.path.join(build_path, 'libz3java.lib'),
os.path.join(dist_path, INSTALL_BIN_DIR, 'libz3java.lib'))
def mk_unix_dist(self, build_path, dist_path):
if JAVA_ENABLED:
mk_dir(os.path.join(dist_path, INSTALL_BIN_DIR))
shutil.copy('%s.jar' % os.path.join(build_path, self.package_name),
'%s.jar' % os.path.join(dist_path, INSTALL_BIN_DIR, self.package_name))
so = get_so_ext()
shutil.copy(os.path.join(build_path, 'libz3java.%s' % so),
os.path.join(dist_path, INSTALL_BIN_DIR, 'libz3java.%s' % so))
def mk_install(self, out):
if is_java_enabled() and self.install:
dllfile = '%s$(SO_EXT)' % self.dll_name
MakeRuleCmd.install_files(out, dllfile, os.path.join(INSTALL_LIB_DIR, dllfile))
jarfile = '{}.jar'.format(self.package_name)
MakeRuleCmd.install_files(out, jarfile, os.path.join(INSTALL_LIB_DIR, jarfile))
def mk_uninstall(self, out):
if is_java_enabled() and self.install:
dllfile = '%s$(SO_EXT)' % self.dll_name
MakeRuleCmd.remove_installed_files(out, os.path.join(INSTALL_LIB_DIR, dllfile))
jarfile = '{}.jar'.format(self.package_name)
MakeRuleCmd.remove_installed_files(out, os.path.join(INSTALL_LIB_DIR, jarfile))
class MLComponent(Component):
def __init__(self, name, lib_name, path, deps):
Component.__init__(self, name, path, deps)
if lib_name is None:
lib_name = name
self.lib_name = lib_name
self.modules = ["z3enums", "z3native", "z3"] # dependencies in this order!
self.stubs = "z3native_stubs"
self.sub_dir = os.path.join('api', 'ml')
self.destdir = ""
self.ldconf = ""
# Calling _init_ocamlfind_paths() is postponed to later because
# OCAMLFIND hasn't been checked yet.
def _install_bindings(self):
# FIXME: Depending on global state is gross. We can't pre-compute this
# in the constructor because we haven't tested for ocamlfind yet
return OCAMLFIND != ''
def _init_ocamlfind_paths(self):
"""
Initialises self.destdir and self.ldconf
Do not call this from the MLComponent constructor because OCAMLFIND
has not been checked at that point
"""
if self.destdir != "" and self.ldconf != "":
# Initialisation already done
return
# Use Ocamlfind to get the default destdir and ldconf path
self.destdir = check_output([OCAMLFIND, 'printconf', 'destdir'])
if self.destdir == "":
raise MKException('Failed to get OCaml destdir')
if not os.path.isdir(self.destdir):
raise MKException('The destdir reported by {ocamlfind} ({destdir}) does not exist'.format(ocamlfind=OCAMLFIND, destdir=self.destdir))
self.ldconf = check_output([OCAMLFIND, 'printconf', 'ldconf'])
if self.ldconf == "":
raise MKException('Failed to get OCaml ldconf path')
def final_info(self):
if not self._install_bindings():
print("WARNING: Could not find ocamlfind utility. OCaml bindings will not be installed")
def mk_makefile(self, out):
if is_ml_enabled():
CP_CMD = 'cp'
if IS_WINDOWS:
CP_CMD='copy'
OCAML_FLAGS = ''
if DEBUG_MODE:
OCAML_FLAGS += '-g'
if OCAMLFIND:
OCAMLCF = OCAMLFIND + ' ' + 'ocamlc -package zarith' + ' ' + OCAML_FLAGS
OCAMLOPTF = OCAMLFIND + ' ' + 'ocamlopt -package zarith' + ' ' + OCAML_FLAGS
else:
OCAMLCF = OCAMLC + ' ' + OCAML_FLAGS
OCAMLOPTF = OCAMLOPT + ' ' + OCAML_FLAGS
src_dir = self.to_src_dir
mk_dir(os.path.join(BUILD_DIR, self.sub_dir))
api_src = get_component(API_COMPONENT).to_src_dir
# remove /GL and -std=c++17; the ocaml tools don't like them.
if IS_WINDOWS:
out.write('CXXFLAGS_OCAML=$(CXXFLAGS:/GL=)\n')
else:
out.write('CXXFLAGS_OCAML=$(subst -std=c++17,,$(CXXFLAGS))\n')
substitutions = { 'VERSION': "{}.{}.{}.{}".format(VER_MAJOR, VER_MINOR, VER_BUILD, VER_TWEAK) }
configure_file(os.path.join(self.src_dir, 'META.in'),
os.path.join(BUILD_DIR, self.sub_dir, 'META'),
substitutions)
stubsc = os.path.join(src_dir, self.stubs + '.c')
stubso = os.path.join(self.sub_dir, self.stubs) + '$(OBJ_EXT)'
base_dll_name = get_component(Z3_DLL_COMPONENT).dll_name
if STATIC_LIB:
z3link = 'z3-static'
z3linkdep = base_dll_name + '-static$(LIB_EXT)'
out.write('%s: %s\n' % (z3linkdep, base_dll_name + '$(LIB_EXT)'))
out.write('\tcp $< $@\n')
else:
z3link = 'z3'
z3linkdep = base_dll_name + '$(SO_EXT)'
out.write('%s: %s %s\n' % (stubso, stubsc, z3linkdep))
out.write('\t%s -ccopt "$(CXXFLAGS_OCAML) -I %s -I %s -I %s $(CXX_OUT_FLAG)%s" -c %s\n' %
(OCAMLCF, OCAML_LIB, api_src, src_dir, stubso, stubsc))
cmos = ''
for m in self.modules:
ml = os.path.join(src_dir, m + '.ml')
cmo = os.path.join(self.sub_dir, m + '.cmo')
existing_mli = os.path.join(src_dir, m + '.mli')
mli = os.path.join(self.sub_dir, m + '.mli')
cmi = os.path.join(self.sub_dir, m + '.cmi')
out.write('%s: %s %s\n' % (cmo, ml, cmos))
if (os.path.exists(existing_mli[3:])):
out.write('\t%s %s %s\n' % (CP_CMD, existing_mli, mli))
else:
out.write('\t%s -i -I %s -c %s > %s\n' % (OCAMLCF, self.sub_dir, ml, mli))
out.write('\t%s -I %s -o %s -c %s\n' % (OCAMLCF, self.sub_dir, cmi, mli))
out.write('\t%s -I %s -o %s -c %s\n' % (OCAMLCF, self.sub_dir, cmo, ml))
cmos = cmos + cmo + ' '
cmxs = ''
for m in self.modules:
ff = os.path.join(src_dir, m + '.ml')
ft = os.path.join(self.sub_dir, m + '.cmx')
out.write('%s: %s %s %s\n' % (ft, ff, cmos, cmxs))
out.write('\t%s -I %s -o %s -c %s\n' % (OCAMLOPTF, self.sub_dir, ft, ff))
cmxs = cmxs + ' ' + ft
OCAMLMKLIB = 'ocamlmklib'
LIBZ3 = '-l' + z3link + ' -lstdc++'
if is_cygwin() and not(is_cygwin_mingw()):
LIBZ3 = z3linkdep
LIBZ3 = LIBZ3 + ' ' + ' '.join(map(lambda x: '-cclib ' + x, LDFLAGS.split()))
stubs_install_path = '$$(%s printconf destdir)/stublibs' % OCAMLFIND
if not STATIC_LIB:
loadpath = '-ccopt -L' + stubs_install_path
dllpath = '-dllpath ' + stubs_install_path
LIBZ3 = LIBZ3 + ' ' + loadpath + ' ' + dllpath
if DEBUG_MODE and not(is_cygwin()):
# Some ocamlmklib's don't like -g; observed on cygwin, but may be others as well.
OCAMLMKLIB += ' -g'
z3mls = os.path.join(self.sub_dir, 'z3ml')
LIBZ3ML = ''
if STATIC_LIB:
LIBZ3ML = '-oc ' + os.path.join(self.sub_dir, 'z3ml-static')
out.write('%s.cma: %s %s %s\n' % (z3mls, cmos, stubso, z3linkdep))
out.write('\t%s -o %s %s -I %s -L. %s %s %s\n' % (OCAMLMKLIB, z3mls, LIBZ3ML, self.sub_dir, stubso, cmos, LIBZ3))
out.write('%s.cmxa: %s %s %s %s.cma\n' % (z3mls, cmxs, stubso, z3linkdep, z3mls))
out.write('\t%s -o %s %s -I %s -L. %s %s %s\n' % (OCAMLMKLIB, z3mls, LIBZ3ML, self.sub_dir, stubso, cmxs, LIBZ3))
out.write('%s.cmxs: %s.cmxa\n' % (z3mls, z3mls))
out.write('\t%s -linkall -shared -o %s.cmxs -I . -I %s %s.cmxa\n' % (OCAMLOPTF, z3mls, self.sub_dir, z3mls))
out.write('\n')
out.write('ml: %s.cma %s.cmxa %s.cmxs\n' % (z3mls, z3mls, z3mls))
if IS_OSX:
out.write('\tinstall_name_tool -id %s/libz3.dylib libz3.dylib\n' % (stubs_install_path))
out.write('\tinstall_name_tool -change libz3.dylib %s/libz3.dylib api/ml/dllz3ml.so\n' % (stubs_install_path))
out.write('\n')
if IS_WINDOWS:
out.write('ocamlfind_install: ')
self.mk_install_deps(out)
out.write('\n')
self.mk_install(out)
out.write('\n')
out.write('ocamlfind_uninstall:\n')
self.mk_uninstall(out)
out.write('\n')
# The following three functions may be out of date.
def mk_install_deps(self, out):
if is_ml_enabled() and self._install_bindings():
out.write(get_component(Z3_DLL_COMPONENT).dll_name + '$(SO_EXT) ')
out.write(os.path.join(self.sub_dir, 'META '))
out.write(os.path.join(self.sub_dir, 'z3ml.cma '))
out.write(os.path.join(self.sub_dir, 'z3ml.cmxa '))
out.write(os.path.join(self.sub_dir, 'z3ml.cmxs '))
def mk_install(self, out):
if is_ml_enabled() and self._install_bindings():
self._init_ocamlfind_paths()
in_prefix = self.destdir.startswith(PREFIX)
maybe_stripped_destdir = strip_path_prefix(self.destdir, PREFIX)
# Note that when doing a staged install with DESTDIR that modifying
# OCaml's ``ld.conf`` may fail. Therefore packagers will need to
# make their packages modify it manually at package install time
# as opposed to ``make install`` time.
MakeRuleCmd.make_install_directory(out,
maybe_stripped_destdir,
in_prefix=in_prefix)
out.write('\t@{ocamlfind} install -ldconf $(DESTDIR){ldconf} -destdir $(DESTDIR){ocaml_destdir} Z3 {metafile}'.format(
ldconf=self.ldconf,
ocamlfind=OCAMLFIND,
ocaml_destdir=self.destdir,
metafile=os.path.join(self.sub_dir, 'META')))
for m in self.modules:
mli = os.path.join(self.src_dir, m) + '.mli'
if os.path.exists(mli):
out.write(' ' + os.path.join(self.to_src_dir, m) + '.mli')
else:
out.write(' ' + os.path.join(self.sub_dir, m) + '.mli')
out.write(' ' + os.path.join(self.sub_dir, m) + '.cmi')
out.write(' ' + os.path.join(self.sub_dir, m) + '.cmx')
out.write(' %s' % ((os.path.join(self.sub_dir, 'libz3ml$(LIB_EXT)'))))
out.write(' %s' % ((os.path.join(self.sub_dir, 'z3ml$(LIB_EXT)'))))
out.write(' %s' % ((os.path.join(self.sub_dir, 'z3ml.cma'))))
out.write(' %s' % ((os.path.join(self.sub_dir, 'z3ml.cmxa'))))
out.write(' %s' % ((os.path.join(self.sub_dir, 'z3ml.cmxs'))))
out.write(' %s' % ((os.path.join(self.sub_dir, 'dllz3ml'))))
if is_windows() or is_cygwin_mingw() or is_msys2():
out.write('.dll')
else:
out.write('.so') # .so also on OSX!
out.write('\n')
def mk_uninstall(self, out):
if is_ml_enabled() and self._install_bindings():
self._init_ocamlfind_paths()
out.write('\t@{ocamlfind} remove -ldconf $(DESTDIR){ldconf} -destdir $(DESTDIR){ocaml_destdir} Z3\n'.format(
ldconf=self.ldconf,
ocamlfind=OCAMLFIND,
ocaml_destdir=self.destdir))
def main_component(self):
return is_ml_enabled()
class ExampleComponent(Component):
def __init__(self, name, path):
Component.__init__(self, name, path, [])
self.ex_dir = os.path.join(EXAMPLE_DIR, self.path)
self.to_ex_dir = os.path.join(REV_BUILD_DIR, self.ex_dir)
def is_example(self):
return True
class CppExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def compiler(self):
return "$(CXX)"
def src_files(self):
return get_cpp_files(self.ex_dir)
def mk_makefile(self, out):
dll_name = get_component(Z3_DLL_COMPONENT).dll_name
dll = '%s$(SO_EXT)' % dll_name
objfiles = ''
for cppfile in self.src_files():
objfile = '%s$(OBJ_EXT)' % (cppfile[:cppfile.rfind('.')])
objfiles = objfiles + ('%s ' % objfile)
out.write('%s: %s\n' % (objfile, os.path.join(self.to_ex_dir, cppfile)));
out.write('\t%s $(CXXFLAGS) $(OS_DEFINES) $(EXAMP_DEBUG_FLAG) $(CXX_OUT_FLAG)%s $(LINK_FLAGS)' % (self.compiler(), objfile))
# Add include dir components
out.write(' -I%s' % get_component(API_COMPONENT).to_src_dir)
out.write(' -I%s' % get_component(CPP_COMPONENT).to_src_dir)
out.write(' %s' % os.path.join(self.to_ex_dir, cppfile))
out.write('\n')
exefile = '%s$(EXE_EXT)' % self.name
out.write('%s: %s %s\n' % (exefile, dll, objfiles))
out.write('\t$(LINK) $(LINK_OUT_FLAG)%s $(LINK_FLAGS) %s ' % (exefile, objfiles))
if IS_WINDOWS:
out.write('%s.lib' % dll_name)
else:
out.write(dll)
out.write(' $(LINK_EXTRA_FLAGS)\n')
out.write('_ex_%s: %s\n\n' % (self.name, exefile))
class CExampleComponent(CppExampleComponent):
def __init__(self, name, path):
CppExampleComponent.__init__(self, name, path)
def compiler(self):
return "$(CC)"
def src_files(self):
return get_c_files(self.ex_dir)
def mk_makefile(self, out):
dll_name = get_component(Z3_DLL_COMPONENT).dll_name
dll = '%s$(SO_EXT)' % dll_name
objfiles = ''
for cfile in self.src_files():
objfile = '%s$(OBJ_EXT)' % (cfile[:cfile.rfind('.')])
objfiles = objfiles + ('%s ' % objfile)
out.write('%s: %s\n' % (objfile, os.path.join(self.to_ex_dir, cfile)));
out.write('\t%s $(CFLAGS) $(OS_DEFINES) $(EXAMP_DEBUG_FLAG) $(C_OUT_FLAG)%s $(LINK_FLAGS)' % (self.compiler(), objfile))
out.write(' -I%s' % get_component(API_COMPONENT).to_src_dir)
out.write(' %s' % os.path.join(self.to_ex_dir, cfile))
out.write('\n')
exefile = '%s$(EXE_EXT)' % self.name
out.write('%s: %s %s\n' % (exefile, dll, objfiles))
out.write('\t$(LINK) $(LINK_OUT_FLAG)%s $(LINK_FLAGS) %s ' % (exefile, objfiles))
if IS_WINDOWS:
out.write('%s.lib' % dll_name)
else:
out.write(dll)
out.write(' $(LINK_EXTRA_FLAGS)\n')
out.write('_ex_%s: %s\n\n' % (self.name, exefile))
class DotNetExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return is_dotnet_core_enabled()
def mk_makefile(self, out):
if is_dotnet_core_enabled():
proj_name = 'dotnet_example.csproj'
out.write('_ex_%s:' % self.name)
for csfile in get_cs_files(self.ex_dir):
out.write(' ')
out.write(os.path.join(self.to_ex_dir, csfile))
mk_dir(os.path.join(BUILD_DIR, 'dotnet_example'))
csproj = os.path.join('dotnet_example', proj_name)
if VS_X64:
platform = 'x64'
elif VS_ARM:
platform = 'ARM'
else:
platform = 'x86'
dotnet_proj_str = r"""<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.0</TargetFramework>
<PlatformTarget>%s</PlatformTarget>
</PropertyGroup>
<ItemGroup>
<Compile Include="..\%s/*.cs" />
<Reference Include="Microsoft.Z3">
<HintPath>..\Microsoft.Z3.dll</HintPath>
</Reference>
</ItemGroup>
</Project>""" % (platform, self.to_ex_dir)
with open(os.path.join(BUILD_DIR, csproj), 'w') as ous:
ous.write(dotnet_proj_str)
out.write('\n')
dotnetCmdLine = [DOTNET, "build", csproj]
dotnetCmdLine.extend(['-c'])
if DEBUG_MODE:
dotnetCmdLine.extend(['Debug'])
else:
dotnetCmdLine.extend(['Release'])
MakeRuleCmd.write_cmd(out, ' '.join(dotnetCmdLine))
out.write('\n')
class JavaExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return JAVA_ENABLED
def mk_makefile(self, out):
if JAVA_ENABLED:
pkg = get_component(JAVA_COMPONENT).package_name + '.jar'
out.write('JavaExample.class: %s' % (pkg))
deps = ''
for jfile in get_java_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, jfile))
if IS_WINDOWS:
deps = deps.replace('/', '\\')
out.write('%s\n' % deps)
out.write('\t%s -cp %s ' % (JAVAC, pkg))
win_ex_dir = self.to_ex_dir
for javafile in get_java_files(self.ex_dir):
out.write(' ')
out.write(os.path.join(win_ex_dir, javafile))
out.write(' -d .\n')
out.write('_ex_%s: JavaExample.class\n\n' % (self.name))
class MLExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return ML_ENABLED
def mk_makefile(self, out):
if ML_ENABLED:
out.write('ml_example.byte: api/ml/z3ml.cma')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, mlfile))
out.write('\n')
out.write('\tocamlfind %s ' % OCAMLC)
if DEBUG_MODE:
out.write('-g ')
out.write('-custom -o ml_example.byte -package zarith -I api/ml -cclib "-L. -lpthread -lstdc++ -lz3" -linkpkg z3ml.cma')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s/%s' % (self.to_ex_dir, mlfile))
out.write('\n')
out.write('ml_example$(EXE_EXT): api/ml/z3ml.cmxa')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, mlfile))
out.write('\n')
out.write('\tocamlfind %s ' % OCAMLOPT)
if DEBUG_MODE:
out.write('-g ')
out.write('-o ml_example$(EXE_EXT) -package zarith -I api/ml -cclib "-L. -lpthread -lstdc++ -lz3" -linkpkg z3ml.cmxa')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s/%s' % (self.to_ex_dir, mlfile))
out.write('\n')
out.write('_ex_%s: ml_example.byte ml_example$(EXE_EXT)\n\n' % self.name)
debug_opt = '-g ' if DEBUG_MODE else ''
if STATIC_LIB:
opam_z3_opts = '-thread -package z3-static -linkpkg'
ml_post_install_tests = [
(OCAMLC, 'ml_example_static.byte'),
(OCAMLC + ' -custom', 'ml_example_static_custom.byte'),
(OCAMLOPT, 'ml_example_static$(EXE_EXT)')
]
else:
opam_z3_opts = '-thread -package z3 -linkpkg'
ml_post_install_tests = [
(OCAMLC, 'ml_example_shared.byte'),
(OCAMLC + ' -custom', 'ml_example_shared_custom.byte'),
(OCAMLOPT, 'ml_example_shared$(EXE_EXT)')
]
for ocaml_compiler, testname in ml_post_install_tests:
out.write(testname + ':')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, mlfile))
out.write('\n')
out.write('\tocamlfind %s -o %s %s %s ' % (ocaml_compiler, debug_opt, testname, opam_z3_opts))
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s/%s' % (self.to_ex_dir, mlfile))
out.write('\n')
if STATIC_LIB:
out.write('_ex_ml_example_post_install: ml_example_static.byte ml_example_static_custom.byte ml_example_static$(EXE_EXT)\n')
else:
out.write('_ex_ml_example_post_install: ml_example_shared.byte ml_example_shared_custom.byte ml_example_shared$(EXE_EXT)\n')
out.write('\n')
class PythonExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
# Python examples are just placeholders, we just copy the *.py files when mk_makefile is invoked.
# We don't need to include them in the :examples rule
def mk_makefile(self, out):
full = os.path.join(EXAMPLE_DIR, self.path)
for py in filter(lambda f: f.endswith('.py'), os.listdir(full)):
shutil.copyfile(os.path.join(full, py), os.path.join(BUILD_DIR, 'python', py))
if is_verbose():
print("Copied Z3Py example '%s' to '%s'" % (py, os.path.join(BUILD_DIR, 'python')))
out.write('_ex_%s: \n\n' % self.name)
def mk_win_dist(self, build_path, dist_path):
full = os.path.join(EXAMPLE_DIR, self.path)
py = 'example.py'
shutil.copyfile(os.path.join(full, py),
os.path.join(dist_path, INSTALL_BIN_DIR, 'python', py))
def mk_unix_dist(self, build_path, dist_path):
self.mk_win_dist(build_path, dist_path)
def reg_component(name, c):
global _Id, _Components, _ComponentNames, _Name2Component
c.id = _Id
_Id = _Id + 1
_Components.append(c)
_ComponentNames.add(name)
_Name2Component[name] = c
if VERBOSE:
print("New component: '%s'" % name)
def add_lib(name, deps=[], path=None, includes2install=[]):
c = LibComponent(name, path, deps, includes2install)
reg_component(name, c)
def add_clib(name, deps=[], path=None, includes2install=[]):
c = CLibComponent(name, path, deps, includes2install)
reg_component(name, c)
def add_hlib(name, path=None, includes2install=[]):
c = HLibComponent(name, path, includes2install)
reg_component(name, c)
def add_exe(name, deps=[], path=None, exe_name=None, install=True):
c = ExeComponent(name, exe_name, path, deps, install)
reg_component(name, c)
def add_extra_exe(name, deps=[], path=None, exe_name=None, install=True):
c = ExtraExeComponent(name, exe_name, path, deps, install)
reg_component(name, c)
def add_dll(name, deps=[], path=None, dll_name=None, export_files=[], reexports=[], install=True, static=False, staging_link=None):
c = DLLComponent(name, dll_name, path, deps, export_files, reexports, install, static, staging_link)
reg_component(name, c)
return c
def add_dot_net_core_dll(name, deps=[], path=None, dll_name=None, assembly_info_dir=None, default_key_file=None):
c = DotNetDLLComponent(name, dll_name, path, deps, assembly_info_dir, default_key_file)
reg_component(name, c)
def add_java_dll(name, deps=[], path=None, dll_name=None, package_name=None, manifest_file=None):
c = JavaDLLComponent(name, dll_name, package_name, manifest_file, path, deps)
reg_component(name, c)
def add_python(libz3Component):
name = 'python'
reg_component(name, PythonComponent(name, libz3Component))
def add_js():
reg_component('js', JsComponent())
def add_python_install(libz3Component):
name = 'python_install'
reg_component(name, PythonInstallComponent(name, libz3Component))
def add_ml_lib(name, deps=[], path=None, lib_name=None):
c = MLComponent(name, lib_name, path, deps)
reg_component(name, c)
def add_cpp_example(name, path=None):
c = CppExampleComponent(name, path)
reg_component(name, c)
def add_c_example(name, path=None):
c = CExampleComponent(name, path)
reg_component(name, c)
def add_dotnet_example(name, path=None):
c = DotNetExampleComponent(name, path)
reg_component(name, c)
def add_java_example(name, path=None):
c = JavaExampleComponent(name, path)
reg_component(name, c)
def add_ml_example(name, path=None):
c = MLExampleComponent(name, path)
reg_component(name, c)
def add_z3py_example(name, path=None):
c = PythonExampleComponent(name, path)
reg_component(name, c)
def mk_config():
if ONLY_MAKEFILES:
return
config = open(os.path.join(BUILD_DIR, 'config.mk'), 'w')
global CXX, CC, GMP, GUARD_CF, STATIC_BIN, GIT_HASH, CPPFLAGS, CXXFLAGS, LDFLAGS, EXAMP_DEBUG_FLAG, FPMATH_FLAGS, LOG_SYNC, SINGLE_THREADED, IS_ARCH_ARM64
if IS_WINDOWS:
CXXFLAGS = '/nologo /Zi /D WIN32 /D _WINDOWS /EHsc /GS /Gd /std:c++17'
config.write(
'CC=cl\n'
'CXX=cl\n'
'CXX_OUT_FLAG=/Fo\n'
'C_OUT_FLAG=/Fo\n'
'OBJ_EXT=.obj\n'
'LIB_EXT=.lib\n'
'AR=lib\n'
'AR_OUTFLAG=/OUT:\n'
'EXE_EXT=.exe\n'
'LINK=cl\n'
'LINK_OUT_FLAG=/Fe\n'
'SO_EXT=.dll\n'
'SLINK=cl\n'
'SLINK_OUT_FLAG=/Fe\n'
'OS_DEFINES=/D _WINDOWS\n')
extra_opt = ''
link_extra_opt = ''
if LOG_SYNC:
extra_opt = '%s /DZ3_LOG_SYNC' % extra_opt
if SINGLE_THREADED:
extra_opt = '%s /DSINGLE_THREAD' % extra_opt
if GIT_HASH:
extra_opt = ' %s /D Z3GITHASH=%s' % (extra_opt, GIT_HASH)
if GUARD_CF:
extra_opt = ' %s /guard:cf' % extra_opt
link_extra_opt = ' %s /GUARD:CF' % link_extra_opt
if STATIC_BIN:
static_opt = '/MT'
else:
static_opt = '/MD'
maybe_disable_dynamic_base = '/DYNAMICBASE' if ALWAYS_DYNAMIC_BASE else '/DYNAMICBASE:NO'
if DEBUG_MODE:
static_opt = static_opt + 'd'
config.write(
'AR_FLAGS=/nologo\n'
'LINK_FLAGS=/nologo %s\n'
'SLINK_FLAGS=/nologo /LDd\n' % static_opt)
if VS_X64:
config.write(
'CXXFLAGS=/c %s /W3 /WX- /Od /Oy- /D _DEBUG /D Z3DEBUG /D _CONSOLE /D _TRACE /Gm- /RTC1 %s %s\n' % (CXXFLAGS, extra_opt, static_opt))
config.write(
'LINK_EXTRA_FLAGS=/link /DEBUG /MACHINE:X64 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT %s\n'
'SLINK_EXTRA_FLAGS=/link /DEBUG /MACHINE:X64 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 %s %s\n' % (link_extra_opt, maybe_disable_dynamic_base, link_extra_opt))
elif VS_ARM:
print("ARM on VS is unsupported")
exit(1)
else:
config.write(
'CXXFLAGS=/c %s /W3 /WX- /Od /Oy- /D _DEBUG /D Z3DEBUG /D _CONSOLE /D _TRACE /Gm- /RTC1 /arch:SSE2 %s %s\n' % (CXXFLAGS, extra_opt, static_opt))
config.write(
'LINK_EXTRA_FLAGS=/link /DEBUG /MACHINE:X86 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT %s\n'
'SLINK_EXTRA_FLAGS=/link /DEBUG /MACHINE:X86 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 %s %s\n' % (link_extra_opt, maybe_disable_dynamic_base, link_extra_opt))
else:
# Windows Release mode
LTCG=' /LTCG' if SLOW_OPTIMIZE else ''
GL = ' /GL' if SLOW_OPTIMIZE else ''
config.write(
'AR_FLAGS=/nologo %s\n'
'LINK_FLAGS=/nologo %s\n'
'SLINK_FLAGS=/nologo /LD\n' % (LTCG, static_opt))
if TRACE:
extra_opt = '%s /D _TRACE ' % extra_opt
if VS_X64:
config.write(
'CXXFLAGS=/c%s %s /W3 /WX- /O2 /D _EXTERNAL_RELEASE /D NDEBUG /D _LIB /D UNICODE /Gm- /GF /Gy /TP %s %s\n' % (GL, CXXFLAGS, extra_opt, static_opt))
config.write(
'LINK_EXTRA_FLAGS=/link%s /profile /MACHINE:X64 /SUBSYSTEM:CONSOLE /STACK:8388608 %s\n'
'SLINK_EXTRA_FLAGS=/link%s /profile /MACHINE:X64 /SUBSYSTEM:WINDOWS /STACK:8388608 %s\n' % (LTCG, link_extra_opt, LTCG, link_extra_opt))
elif VS_ARM:
print("ARM on VS is unsupported")
exit(1)
else:
config.write(
'CXXFLAGS=/c%s %s /WX- /O2 /Oy- /D _EXTERNAL_RELEASE /D NDEBUG /D _CONSOLE /D ASYNC_COMMANDS /Gm- /arch:SSE2 %s %s\n' % (GL, CXXFLAGS, extra_opt, static_opt))
config.write(
'LINK_EXTRA_FLAGS=/link%s /DEBUG /MACHINE:X86 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT %s\n'
'SLINK_EXTRA_FLAGS=/link%s /DEBUG /MACHINE:X86 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 %s %s\n' % (LTCG, link_extra_opt, LTCG, maybe_disable_dynamic_base, link_extra_opt))
config.write('CFLAGS=$(CXXFLAGS)\n')
# End of Windows VS config.mk
if is_verbose():
print('64-bit: %s' % is64())
if is_java_enabled():
print('JNI Bindings: %s' % JNI_HOME)
print('Java Compiler: %s' % JAVAC)
if is_ml_enabled():
print('OCaml Compiler: %s' % OCAMLC)
print('OCaml Find tool: %s' % OCAMLFIND)
print('OCaml Native: %s' % OCAMLOPT)
print('OCaml Library: %s' % OCAML_LIB)
else:
OS_DEFINES = ""
ARITH = "internal"
check_ar()
CXX = find_cxx_compiler()
CC = find_c_compiler()
SLIBEXTRAFLAGS = ''
# SLIBEXTRAFLAGS = '%s -Wl,-soname,libz3.so.0' % LDFLAGS
EXE_EXT = ''
LIB_EXT = '.a'
if GPROF:
CXXFLAGS = '%s -pg' % CXXFLAGS
LDFLAGS = '%s -pg' % LDFLAGS
if GMP:
test_gmp(CXX)
ARITH = "gmp"
CPPFLAGS = '%s -D_MP_GMP' % CPPFLAGS
LDFLAGS = '%s -lgmp' % LDFLAGS
SLIBEXTRAFLAGS = '%s -lgmp' % SLIBEXTRAFLAGS
else:
CPPFLAGS = '%s -D_MP_INTERNAL' % CPPFLAGS
if GIT_HASH:
CPPFLAGS = '%s -DZ3GITHASH=%s' % (CPPFLAGS, GIT_HASH)
CXXFLAGS = '%s -std=c++17' % CXXFLAGS
CXXFLAGS = '%s -fvisibility=hidden -fvisibility-inlines-hidden -c' % CXXFLAGS
FPMATH = test_fpmath(CXX)
CXXFLAGS = '%s %s' % (CXXFLAGS, FPMATH_FLAGS)
if LOG_SYNC:
CXXFLAGS = '%s -DZ3_LOG_SYNC' % CXXFLAGS
if SINGLE_THREADED:
CXXFLAGS = '%s -DSINGLE_THREAD' % CXXFLAGS
if DEBUG_MODE:
CXXFLAGS = '%s -g -Wall' % CXXFLAGS
EXAMP_DEBUG_FLAG = '-g'
CPPFLAGS = '%s -DZ3DEBUG -D_DEBUG' % CPPFLAGS
else:
CXXFLAGS = '%s -O3' % CXXFLAGS
if GPROF:
CXXFLAGS += '-fomit-frame-pointer'
CPPFLAGS = '%s -DNDEBUG -D_EXTERNAL_RELEASE' % CPPFLAGS
if is_CXX_clangpp():
CXXFLAGS = '%s -Wno-unknown-pragmas -Wno-overloaded-virtual -Wno-unused-value' % CXXFLAGS
sysname, _, _, _, machine = os.uname()
if sysname == 'Darwin':
SO_EXT = '.dylib'
SLIBFLAGS = '-dynamiclib'
elif sysname == 'Linux':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
SLIBEXTRAFLAGS = '%s -Wl,-soname,libz3.so' % SLIBEXTRAFLAGS
elif sysname == 'GNU':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
elif sysname == 'FreeBSD':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
SLIBEXTRAFLAGS = '%s -Wl,-soname,libz3.so' % SLIBEXTRAFLAGS
elif sysname == 'NetBSD':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
elif sysname == 'OpenBSD':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
elif sysname == 'SunOS':
SO_EXT = '.so'
SLIBFLAGS = '-shared'
SLIBEXTRAFLAGS = '%s -mimpure-text' % SLIBEXTRAFLAGS
elif sysname.startswith('CYGWIN'):
SO_EXT = '.dll'
SLIBFLAGS = '-shared'
elif sysname.startswith('MSYS_NT') or sysname.startswith('MINGW'):
SO_EXT = '.dll'
SLIBFLAGS = '-shared'
EXE_EXT = '.exe'
LIB_EXT = '.lib'
else:
raise MKException('Unsupported platform: %s' % sysname)
if is64():
if not sysname.startswith('CYGWIN') and not sysname.startswith('MSYS') and not sysname.startswith('MINGW'):
CXXFLAGS = '%s -fPIC' % CXXFLAGS
elif not LINUX_X64:
CXXFLAGS = '%s -m32' % CXXFLAGS
LDFLAGS = '%s -m32' % LDFLAGS
SLIBFLAGS = '%s -m32' % SLIBFLAGS
if TRACE or DEBUG_MODE:
CPPFLAGS = '%s -D_TRACE' % CPPFLAGS
if is_cygwin_mingw() or is_msys2():
# when cross-compiling with MinGW, we need to statically link its standard libraries
# and to make it create an import library.
SLIBEXTRAFLAGS = '%s -static-libgcc -static-libstdc++ -Wl,--out-implib,libz3.dll.a' % SLIBEXTRAFLAGS
LDFLAGS = '%s -static-libgcc -static-libstdc++' % LDFLAGS
if sysname == 'Linux' and machine.startswith('armv7') or machine.startswith('armv8'):
CXXFLAGS = '%s -fpic' % CXXFLAGS
if IS_OSX and IS_ARCH_ARM64:
print("Setting arm64")
CXXFLAGS = '%s -arch arm64' % CXXFLAGS
LDFLAGS = '%s -arch arm64' % LDFLAGS
SLIBEXTRAFLAGS = '%s -arch arm64' % SLIBEXTRAFLAGS
config.write('PREFIX=%s\n' % PREFIX)
config.write('CC=%s\n' % CC)
config.write('CXX=%s\n' % CXX)
config.write('CXXFLAGS=%s %s\n' % (CPPFLAGS, CXXFLAGS))
config.write('CFLAGS=%s %s\n' % (CPPFLAGS, CXXFLAGS.replace('-std=c++17', '')))
config.write('EXAMP_DEBUG_FLAG=%s\n' % EXAMP_DEBUG_FLAG)
config.write('CXX_OUT_FLAG=-o \n')
config.write('C_OUT_FLAG=-o \n')
config.write('OBJ_EXT=.o\n')
config.write('LIB_EXT=%s\n' % LIB_EXT)
config.write('AR=%s\n' % AR)
config.write('AR_FLAGS=rcs\n')
config.write('AR_OUTFLAG=\n')
config.write('EXE_EXT=%s\n' % EXE_EXT)
config.write('LINK=%s\n' % CXX)
config.write('LINK_FLAGS=\n')
config.write('LINK_OUT_FLAG=-o \n')
if is_linux() and (build_static_lib() or build_static_bin()):
config.write('LINK_EXTRA_FLAGS=-Wl,--whole-archive -lrt -lpthread -Wl,--no-whole-archive %s\n' % LDFLAGS)
else:
config.write('LINK_EXTRA_FLAGS=-lpthread %s\n' % LDFLAGS)
config.write('SO_EXT=%s\n' % SO_EXT)
config.write('SLINK=%s\n' % CXX)
config.write('SLINK_FLAGS=%s\n' % SLIBFLAGS)
config.write('SLINK_EXTRA_FLAGS=-lpthread %s\n' % SLIBEXTRAFLAGS)
config.write('SLINK_OUT_FLAG=-o \n')
config.write('OS_DEFINES=%s\n' % OS_DEFINES)
if is_verbose():
print('Host platform: %s' % sysname)
print('C++ Compiler: %s' % CXX)
print('C Compiler : %s' % CC)
if is_cygwin_mingw():
print('MinGW32 cross: %s' % (is_cygwin_mingw()))
print('Archive Tool: %s' % AR)
print('Arithmetic: %s' % ARITH)
print('Prefix: %s' % PREFIX)
print('64-bit: %s' % is64())
print('FP math: %s' % FPMATH)
print("Python pkg dir: %s" % PYTHON_PACKAGE_DIR)
if GPROF:
print('gprof: enabled')
print('Python version: %s' % sysconfig.get_python_version())
if is_java_enabled():
print('JNI Bindings: %s' % JNI_HOME)
print('Java Compiler: %s' % JAVAC)
if is_ml_enabled():
print('OCaml Compiler: %s' % OCAMLC)
print('OCaml Find tool: %s' % OCAMLFIND)
print('OCaml Native: %s' % OCAMLOPT)
print('OCaml Library: %s' % OCAML_LIB)
if is_dotnet_core_enabled():
print('C# Compiler: %s' % DOTNET)
config.close()
def mk_install(out):
out.write('install: ')
for c in get_components():
c.mk_install_deps(out)
out.write(' ')
out.write('\n')
MakeRuleCmd.make_install_directory(out, INSTALL_BIN_DIR)
MakeRuleCmd.make_install_directory(out, INSTALL_INCLUDE_DIR)
MakeRuleCmd.make_install_directory(out, INSTALL_LIB_DIR)
for c in get_components():
c.mk_install(out)
out.write('\t@echo Z3 was successfully installed.\n')
out.write('\n')
def mk_uninstall(out):
out.write('uninstall:\n')
for c in get_components():
c.mk_uninstall(out)
out.write('\t@echo Z3 was successfully uninstalled.\n')
out.write('\n')
# Generate the Z3 makefile
def mk_makefile():
mk_dir(BUILD_DIR)
mk_config()
if VERBOSE:
print("Writing %s" % os.path.join(BUILD_DIR, 'Makefile'))
out = open(os.path.join(BUILD_DIR, 'Makefile'), 'w')
out.write('# Automatically generated file.\n')
out.write('include config.mk\n')
# Generate :all rule
out.write('all:')
for c in get_components():
if c.main_component():
out.write(' %s' % c.name)
out.write('\n\t@echo Z3 was successfully built.\n')
out.write("\t@echo \"Z3Py scripts can already be executed in the \'%s\' directory.\"\n" % os.path.join(BUILD_DIR, 'python'))
pathvar = "DYLD_LIBRARY_PATH" if IS_OSX else "PATH" if IS_WINDOWS else "LD_LIBRARY_PATH"
out.write("\t@echo \"Z3Py scripts stored in arbitrary directories can be executed if the \'%s\' directory is added to the PYTHONPATH environment variable and the \'%s\' directory is added to the %s environment variable.\"\n" % (os.path.join(BUILD_DIR, 'python'), BUILD_DIR, pathvar))
if not IS_WINDOWS:
out.write("\t@echo Use the following command to install Z3 at prefix $(PREFIX).\n")
out.write('\t@echo " sudo make install"\n\n')
# out.write("\t@echo If you are doing a staged install you can use DESTDIR.\n")
# out.write('\t@echo " make DESTDIR=/some/temp/directory install"\n')
# Generate :examples rule
out.write('examples:')
for c in get_components():
if c.is_example():
out.write(' _ex_%s' % c.name)
out.write('\n\t@echo Z3 examples were successfully built.\n')
# Generate components
for c in get_components():
c.mk_makefile(out)
# Generate install/uninstall rules if not WINDOWS
if not IS_WINDOWS:
mk_install(out)
mk_uninstall(out)
for c in get_components():
c.final_info()
out.close()
# Finalize
if VERBOSE:
print("Makefile was successfully generated.")
if DEBUG_MODE:
print(" compilation mode: Debug")
else:
print(" compilation mode: Release")
if IS_WINDOWS:
if VS_X64:
print(" platform: x64\n")
print("To build Z3, open a [Visual Studio x64 Command Prompt], then")
elif VS_ARM:
print(" platform: ARM\n")
print("To build Z3, open a [Visual Studio ARM Command Prompt], then")
else:
print(" platform: x86")
print("To build Z3, open a [Visual Studio Command Prompt], then")
print("type 'cd %s && nmake'\n" % os.path.join(os.getcwd(), BUILD_DIR))
print('Remark: to open a Visual Studio Command Prompt, go to: "Start > All Programs > Visual Studio > Visual Studio Tools"')
else:
print("Type 'cd %s; make' to build Z3" % BUILD_DIR)
# Generate automatically generated source code
def mk_auto_src():
if not ONLY_MAKEFILES:
exec_pyg_scripts()
mk_pat_db()
mk_all_install_tactic_cpps()
mk_all_mem_initializer_cpps()
mk_all_gparams_register_modules()
def _execfile(file, globals=globals(), locals=locals()):
if sys.version < "2.7":
execfile(file, globals, locals)
else:
with open(file, "r") as fh:
exec(fh.read()+"\n", globals, locals)
# Execute python auxiliary scripts that generate extra code for Z3.
def exec_pyg_scripts():
for root, dirs, files in os.walk('src'):
for f in files:
if f.endswith('.pyg'):
script = os.path.join(root, f)
generated_file = mk_genfile_common.mk_hpp_from_pyg(script, root)
if is_verbose():
print("Generated '{}'".format(generated_file))
# TODO: delete after src/ast/pattern/expr_pattern_match
# database.smt ==> database.h
def mk_pat_db():
c = get_component(PATTERN_COMPONENT)
fin = os.path.join(c.src_dir, 'database.smt2')
fout = os.path.join(c.src_dir, 'database.h')
mk_genfile_common.mk_pat_db_internal(fin, fout)
if VERBOSE:
print("Generated '{}'".format(fout))
# Update version numbers
def update_version():
major = VER_MAJOR
minor = VER_MINOR
build = VER_BUILD
revision = VER_TWEAK
print("UpdateVersion:", get_full_version_string(major, minor, build, revision))
if major is None or minor is None or build is None or revision is None:
raise MKException("set_version(major, minor, build, revision) must be used before invoking update_version()")
if not ONLY_MAKEFILES:
mk_version_dot_h(major, minor, build, revision)
mk_all_assembly_infos(major, minor, build, revision)
mk_def_files()
def get_full_version_string(major, minor, build, revision):
global GIT_HASH, GIT_DESCRIBE
res = "Z3 %s.%s.%s.%s" % (major, minor, build, revision)
if GIT_HASH:
res += " " + GIT_HASH
if GIT_DESCRIBE:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
res += " " + branch + " " + check_output(['git', 'describe'])
return '"' + res + '"'
# Update files with the version number
def mk_version_dot_h(major, minor, build, revision):
c = get_component(UTIL_COMPONENT)
version_template = os.path.join(c.src_dir, 'z3_version.h.in')
version_header_output = os.path.join(c.src_dir, 'z3_version.h')
# Note the substitution names are what is used by the CMake
# builds system. If you change these you should change them
# in the CMake build too
configure_file(version_template, version_header_output,
{ 'Z3_VERSION_MAJOR': str(major),
'Z3_VERSION_MINOR': str(minor),
'Z3_VERSION_PATCH': str(build),
'Z3_VERSION_TWEAK': str(revision),
'Z3_FULL_VERSION': get_full_version_string(major, minor, build, revision)
}
)
if VERBOSE:
print("Generated '%s'" % version_header_output)
# Generate AssemblyInfo.cs files with the right version numbers by using ``AssemblyInfo.cs.in`` files as a template
def mk_all_assembly_infos(major, minor, build, revision):
for c in get_components():
if c.has_assembly_info():
c.make_assembly_info(major, minor, build, revision)
def get_header_files_for_components(component_src_dirs):
assert isinstance(component_src_dirs, list)
h_files_full_path = []
for component_src_dir in sorted(component_src_dirs):
h_files = filter(lambda f: f.endswith('.h') or f.endswith('.hpp'), os.listdir(component_src_dir))
h_files = list(map(lambda p: os.path.join(component_src_dir, p), h_files))
h_files_full_path.extend(h_files)
return h_files_full_path
def mk_install_tactic_cpp(cnames, path):
component_src_dirs = []
for cname in cnames:
print("Component %s" % cname)
c = get_component(cname)
component_src_dirs.append(c.src_dir)
h_files_full_path = get_header_files_for_components(component_src_dirs)
generated_file = mk_genfile_common.mk_install_tactic_cpp_internal(h_files_full_path, path)
if VERBOSE:
print("Generated '{}'".format(generated_file))
def mk_all_install_tactic_cpps():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_install_tactics():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_install_tactic_cpp(cnames, c.src_dir)
def mk_mem_initializer_cpp(cnames, path):
component_src_dirs = []
for cname in cnames:
c = get_component(cname)
component_src_dirs.append(c.src_dir)
h_files_full_path = get_header_files_for_components(component_src_dirs)
generated_file = mk_genfile_common.mk_mem_initializer_cpp_internal(h_files_full_path, path)
if VERBOSE:
print("Generated '{}'".format(generated_file))
def mk_all_mem_initializer_cpps():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_mem_initializer():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_mem_initializer_cpp(cnames, c.src_dir)
def mk_gparams_register_modules(cnames, path):
component_src_dirs = []
for cname in cnames:
c = get_component(cname)
component_src_dirs.append(c.src_dir)
h_files_full_path = get_header_files_for_components(component_src_dirs)
generated_file = mk_genfile_common.mk_gparams_register_modules_internal(h_files_full_path, path)
if VERBOSE:
print("Generated '{}'".format(generated_file))
def mk_all_gparams_register_modules():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_mem_initializer():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_gparams_register_modules(cnames, c.src_dir)
# Generate a .def based on the files at c.export_files slot.
def mk_def_file(c):
defname = '%s.def' % os.path.join(c.src_dir, c.name)
dll_name = c.dll_name
export_header_files = []
for dot_h in c.export_files:
dot_h_c = c.find_file(dot_h, c.name)
api = os.path.join(dot_h_c.src_dir, dot_h)
export_header_files.append(api)
mk_genfile_common.mk_def_file_internal(defname, dll_name, export_header_files)
if VERBOSE:
print("Generated '%s'" % defname)
def mk_def_files():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_def_file():
mk_def_file(c)
def cp_z3py_to_build():
mk_dir(BUILD_DIR)
mk_dir(os.path.join(BUILD_DIR, 'python'))
z3py_dest = os.path.join(BUILD_DIR, 'python', 'z3')
z3py_src = os.path.join(Z3PY_SRC_DIR, 'z3')
# Erase existing .pyc files
for root, dirs, files in os.walk(Z3PY_SRC_DIR):
for f in files:
if f.endswith('.pyc'):
rmf(os.path.join(root, f))
# Compile Z3Py files
if compileall.compile_dir(z3py_src, force=1) != 1:
raise MKException("failed to compile Z3Py sources")
if is_verbose:
print("Generated python bytecode")
# Copy sources to build
mk_dir(z3py_dest)
for py in filter(lambda f: f.endswith('.py'), os.listdir(z3py_src)):
shutil.copyfile(os.path.join(z3py_src, py), os.path.join(z3py_dest, py))
if is_verbose():
print("Copied '%s'" % py)
# Python 2.x support
for pyc in filter(lambda f: f.endswith('.pyc'), os.listdir(z3py_src)):
shutil.copyfile(os.path.join(z3py_src, pyc), os.path.join(z3py_dest, pyc))
if is_verbose():
print("Copied '%s'" % pyc)
# Python 3.x support
src_pycache = os.path.join(z3py_src, '__pycache__')
target_pycache = os.path.join(z3py_dest, '__pycache__')
if os.path.exists(src_pycache):
for pyc in filter(lambda f: f.endswith('.pyc'), os.listdir(src_pycache)):
mk_dir(target_pycache)
shutil.copyfile(os.path.join(src_pycache, pyc), os.path.join(target_pycache, pyc))
if is_verbose():
print("Copied '%s'" % pyc)
# Copy z3test.py
shutil.copyfile(os.path.join(Z3PY_SRC_DIR, 'z3test.py'), os.path.join(BUILD_DIR, 'python', 'z3test.py'))
def mk_bindings(api_files):
if not ONLY_MAKEFILES:
mk_z3consts_py(api_files)
new_api_files = []
api = get_component(API_COMPONENT)
for api_file in api_files:
api_file_path = api.find_file(api_file, api.name)
new_api_files.append(os.path.join(api_file_path.src_dir, api_file))
g = globals()
g["API_FILES"] = new_api_files
if is_java_enabled():
check_java()
mk_z3consts_java(api_files)
# Generate some of the bindings and "api" module files
import update_api
dotnet_output_dir = None
if is_dotnet_core_enabled():
dotnet_output_dir = os.path.join(BUILD_DIR, 'dotnet')
mk_dir(dotnet_output_dir)
java_input_dir = None
java_output_dir = None
java_package_name = None
if is_java_enabled():
java_output_dir = get_component('java').src_dir
java_input_dir = get_component('java').src_dir
java_package_name = get_component('java').package_name
ml_output_dir = None
if is_ml_enabled():
ml_output_dir = get_component('ml').src_dir
# Get the update_api module to do the work for us
update_api.VERBOSE = is_verbose()
update_api.generate_files(api_files=new_api_files,
api_output_dir=get_component('api').src_dir,
z3py_output_dir=get_z3py_dir(),
dotnet_output_dir=dotnet_output_dir,
java_input_dir=java_input_dir,
java_output_dir=java_output_dir,
java_package_name=java_package_name,
ml_output_dir=ml_output_dir,
ml_src_dir=ml_output_dir
)
cp_z3py_to_build()
if is_ml_enabled():
check_ml()
mk_z3consts_ml(api_files)
if is_dotnet_core_enabled():
check_dotnet_core()
mk_z3consts_dotnet(api_files, dotnet_output_dir)
# Extract enumeration types from API files, and add python definitions.
def mk_z3consts_py(api_files):
if Z3PY_SRC_DIR is None:
raise MKException("You must invoke set_z3py_dir(path):")
full_path_api_files = []
api_dll = get_component(Z3_DLL_COMPONENT)
for api_file in api_files:
api_file_c = api_dll.find_file(api_file, api_dll.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
full_path_api_files.append(api_file)
generated_file = mk_genfile_common.mk_z3consts_py_internal(full_path_api_files, Z3PY_SRC_DIR)
if VERBOSE:
print("Generated '{}".format(generated_file))
# Extract enumeration types from z3_api.h, and add .Net definitions
def mk_z3consts_dotnet(api_files, output_dir):
dotnet = get_component(DOTNET_COMPONENT)
if not dotnet:
dotnet = get_component(DOTNET_CORE_COMPONENT)
full_path_api_files = []
for api_file in api_files:
api_file_c = dotnet.find_file(api_file, dotnet.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
full_path_api_files.append(api_file)
generated_file = mk_genfile_common.mk_z3consts_dotnet_internal(full_path_api_files, output_dir)
if VERBOSE:
print("Generated '{}".format(generated_file))
# Extract enumeration types from z3_api.h, and add Java definitions
def mk_z3consts_java(api_files):
java = get_component(JAVA_COMPONENT)
full_path_api_files = []
for api_file in api_files:
api_file_c = java.find_file(api_file, java.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
full_path_api_files.append(api_file)
generated_files = mk_genfile_common.mk_z3consts_java_internal(
full_path_api_files,
java.package_name,
java.src_dir)
if VERBOSE:
for generated_file in generated_files:
print("Generated '{}'".format(generated_file))
# Extract enumeration types from z3_api.h, and add ML definitions
def mk_z3consts_ml(api_files):
ml = get_component(ML_COMPONENT)
full_path_api_files = []
for api_file in api_files:
api_file_c = ml.find_file(api_file, ml.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
full_path_api_files.append(api_file)
generated_file = mk_genfile_common.mk_z3consts_ml_internal(
full_path_api_files,
ml.src_dir)
if VERBOSE:
print ('Generated "%s"' % generated_file)
def mk_gui_str(id):
return '4D2F40D8-E5F9-473B-B548-%012d' % id
def get_platform_toolset_str():
default = 'v110';
vstr = check_output(['msbuild', '/ver'])
lines = vstr.split('\n')
lline = lines[-1]
tokens = lline.split('.')
if len(tokens) < 2:
return default
else:
if tokens[0] == "15":
# Visual Studio 2017 reports 15.* but the PlatformToolsetVersion is 141
return "v141"
else:
return 'v' + tokens[0] + tokens[1]
def mk_vs_proj_property_groups(f, name, target_ext, type):
f.write(' <ItemGroup Label="ProjectConfigurations">\n')
f.write(' <ProjectConfiguration Include="Debug|Win32">\n')
f.write(' <Configuration>Debug</Configuration>\n')
f.write(' <Platform>Win32</Platform>\n')
f.write(' </ProjectConfiguration>\n')
f.write(' <ProjectConfiguration Include="Release|Win32">\n')
f.write(' <Configuration>Release</Configuration>\n')
f.write(' <Platform>Win32</Platform>\n')
f.write(' </ProjectConfiguration>\n')
f.write(' </ItemGroup>\n')
f.write(' <PropertyGroup Label="Globals">\n')
f.write(' <ProjectGuid>{%s}</ProjectGuid>\n' % mk_gui_str(0))
f.write(' <ProjectName>%s</ProjectName>\n' % name)
f.write(' <Keyword>Win32Proj</Keyword>\n')
f.write(' <PlatformToolset>%s</PlatformToolset>\n' % get_platform_toolset_str())
f.write(' </PropertyGroup>\n')
f.write(' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />\n')
f.write(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'" Label="Configuration">\n')
f.write(' <ConfigurationType>%s</ConfigurationType>\n' % type)
f.write(' <CharacterSet>Unicode</CharacterSet>\n')
f.write(' <UseOfMfc>false</UseOfMfc>\n')
f.write(' </PropertyGroup>\n')
f.write(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'" Label="Configuration">\n')
f.write(' <ConfigurationType>%s</ConfigurationType>\n' % type)
f.write(' <CharacterSet>Unicode</CharacterSet>\n')
f.write(' <UseOfMfc>false</UseOfMfc>\n')
f.write(' </PropertyGroup>\n')
f.write(' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props" />\n')
f.write(' <ImportGroup Label="ExtensionSettings" />\n')
f.write(' <ImportGroup Label="PropertySheets">\n')
f.write(' <Import Project="$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props" Condition="exists(\'$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\')" Label="LocalAppDataPlatform" /> </ImportGroup>\n')
f.write(' <PropertyGroup Label="UserMacros" />\n')
f.write(' <PropertyGroup>\n')
f.write(' <OutDir Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">$(SolutionDir)\\$(ProjectName)\\$(Configuration)\\</OutDir>\n')
f.write(' <TargetName Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">%s</TargetName>\n' % name)
f.write(' <TargetExt Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">.%s</TargetExt>\n' % target_ext)
f.write(' <OutDir Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">$(SolutionDir)\\$(ProjectName)\\$(Configuration)\\</OutDir>\n')
f.write(' <TargetName Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">%s</TargetName>\n' % name)
f.write(' <TargetExt Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">.%s</TargetExt>\n' % target_ext)
f.write(' </PropertyGroup>\n')
f.write(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">\n')
f.write(' <IntDir>$(ProjectName)\\$(Configuration)\\</IntDir>\n')
f.write(' </PropertyGroup>\n')
f.write(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">\n')
f.write(' <IntDir>$(ProjectName)\\$(Configuration)\\</IntDir>\n')
f.write(' </PropertyGroup>\n')
def mk_vs_proj_cl_compile(f, name, components, debug):
f.write(' <ClCompile>\n')
f.write(' <Optimization>Disabled</Optimization>\n')
if debug:
f.write(' <PreprocessorDefinitions>WIN32;_DEBUG;Z3DEBUG;_TRACE;_MP_INTERNAL;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n')
else:
f.write(' <PreprocessorDefinitions>WIN32;NDEBUG;_MP_INTERNAL;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n')
if VS_PAR:
f.write(' <MinimalRebuild>false</MinimalRebuild>\n')
f.write(' <MultiProcessorCompilation>true</MultiProcessorCompilation>\n')
else:
f.write(' <MinimalRebuild>true</MinimalRebuild>\n')
f.write(' <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n')
f.write(' <WarningLevel>Level3</WarningLevel>\n')
if debug:
f.write(' <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n')
else:
f.write(' <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n')
f.write(' <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n')
f.write(' <AdditionalIncludeDirectories>')
deps = find_all_deps(name, components)
first = True
for dep in deps:
if first:
first = False
else:
f.write(';')
f.write(get_component(dep).to_src_dir)
f.write(';%s\n' % os.path.join(REV_BUILD_DIR, SRC_DIR))
f.write('</AdditionalIncludeDirectories>\n')
f.write(' </ClCompile>\n')
def mk_vs_proj_dep_groups(f, name, components):
f.write(' <ItemGroup>\n')
deps = find_all_deps(name, components)
for dep in deps:
dep = get_component(dep)
for cpp in filter(lambda f: f.endswith('.cpp'), os.listdir(dep.src_dir)):
f.write(' <ClCompile Include="%s" />\n' % os.path.join(dep.to_src_dir, cpp))
f.write(' </ItemGroup>\n')
def mk_vs_proj_link_exe(f, name, debug):
f.write(' <Link>\n')
f.write(' <OutputFile>$(OutDir)%s.exe</OutputFile>\n' % name)
f.write(' <GenerateDebugInformation>true</GenerateDebugInformation>\n')
f.write(' <SubSystem>Console</SubSystem>\n')
f.write(' <StackReserveSize>8388608</StackReserveSize>\n')
f.write(' <RandomizedBaseAddress>false</RandomizedBaseAddress>\n')
f.write(' <DataExecutionPrevention/>\n')
f.write(' <TargetMachine>MachineX86</TargetMachine>\n')
f.write(' <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n')
f.write(' <AdditionalDependencies>psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n')
f.write(' </Link>\n')
def mk_vs_proj(name, components):
if not VS_PROJ:
return
proj_name = '%s.vcxproj' % os.path.join(BUILD_DIR, name)
modes=['Debug', 'Release']
PLATFORMS=['Win32']
f = open(proj_name, 'w')
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
mk_vs_proj_property_groups(f, name, 'exe', 'Application')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">\n')
mk_vs_proj_cl_compile(f, name, components, debug=True)
mk_vs_proj_link_exe(f, name, debug=True)
f.write(' </ItemDefinitionGroup>\n')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">\n')
mk_vs_proj_cl_compile(f, name, components, debug=False)
mk_vs_proj_link_exe(f, name, debug=False)
f.write(' </ItemDefinitionGroup>\n')
mk_vs_proj_dep_groups(f, name, components)
f.write(' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.targets" />\n')
f.write(' <ImportGroup Label="ExtensionTargets">\n')
f.write(' </ImportGroup>\n')
f.write('</Project>\n')
f.close()
if is_verbose():
print("Generated '%s'" % proj_name)
def mk_vs_proj_link_dll(f, name, debug):
f.write(' <Link>\n')
f.write(' <OutputFile>$(OutDir)%s.dll</OutputFile>\n' % name)
f.write(' <GenerateDebugInformation>true</GenerateDebugInformation>\n')
f.write(' <SubSystem>Console</SubSystem>\n')
f.write(' <StackReserveSize>8388608</StackReserveSize>\n')
f.write(' <RandomizedBaseAddress>false</RandomizedBaseAddress>\n')
f.write(' <DataExecutionPrevention/>\n')
f.write(' <TargetMachine>MachineX86</TargetMachine>\n')
f.write(' <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n')
f.write(' <AdditionalDependencies>psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n')
f.write(' <ModuleDefinitionFile>%s</ModuleDefinitionFile>' % os.path.join(get_component('api_dll').to_src_dir, 'api_dll.def'))
f.write(' </Link>\n')
def mk_vs_proj_dll(name, components):
if not VS_PROJ:
return
proj_name = '%s.vcxproj' % os.path.join(BUILD_DIR, name)
modes=['Debug', 'Release']
PLATFORMS=['Win32']
f = open(proj_name, 'w')
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
mk_vs_proj_property_groups(f, name, 'dll', 'DynamicLibrary')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">\n')
mk_vs_proj_cl_compile(f, name, components, debug=True)
mk_vs_proj_link_dll(f, name, debug=True)
f.write(' </ItemDefinitionGroup>\n')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">\n')
mk_vs_proj_cl_compile(f, name, components, debug=False)
mk_vs_proj_link_dll(f, name, debug=False)
f.write(' </ItemDefinitionGroup>\n')
mk_vs_proj_dep_groups(f, name, components)
f.write(' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.targets" />\n')
f.write(' <ImportGroup Label="ExtensionTargets">\n')
f.write(' </ImportGroup>\n')
f.write('</Project>\n')
f.close()
if is_verbose():
print("Generated '%s'" % proj_name)
def mk_win_dist(build_path, dist_path):
for c in get_components():
c.mk_win_dist(build_path, dist_path)
def mk_unix_dist(build_path, dist_path):
for c in get_components():
c.mk_unix_dist(build_path, dist_path)
# Add Z3Py to bin directory
for pyc in filter(lambda f: f.endswith('.pyc') or f.endswith('.py'), os.listdir(build_path)):
shutil.copy(os.path.join(build_path, pyc),
os.path.join(dist_path, INSTALL_BIN_DIR, pyc))
class MakeRuleCmd(object):
"""
These class methods provide a convenient way to emit frequently
needed commands used in Makefile rules
Note that several of the method are meant for use during ``make
install`` and ``make uninstall``. These methods correctly use
``$(PREFIX)`` and ``$(DESTDIR)`` and therefore are preferable
to writing commands manually which can be error prone.
"""
@classmethod
def install_root(cls):
"""
Returns a string that will expand to the
install location when used in a makefile rule.
"""
# Note: DESTDIR is to support staged installs
return "$(DESTDIR)$(PREFIX)/"
@classmethod
def _is_str(cls, obj):
if sys.version_info.major > 2:
# Python 3 or newer. Strings are always unicode and of type str
return isinstance(obj, str)
else:
# Python 2. Has byte-string and unicode representation, allow both
return isinstance(obj, str) or isinstance(obj, unicode)
@classmethod
def _install_root(cls, path, in_prefix, out, is_install=True):
if not in_prefix:
# The Python bindings on OSX are sometimes not installed inside the prefix.
install_root = "$(DESTDIR)"
action_string = 'install' if is_install else 'uninstall'
cls.write_cmd(out, 'echo "WARNING: {}ing files/directories ({}) that are not in the install prefix ($(PREFIX))."'.format(
action_string, path))
#print("WARNING: Generating makefile rule that {}s {} '{}' which is outside the installation prefix '{}'.".format(
# action_string, 'to' if is_install else 'from', path, PREFIX))
else:
# assert not os.path.isabs(path)
install_root = cls.install_root()
return install_root
@classmethod
def install_files(cls, out, src_pattern, dest, in_prefix=True):
assert len(dest) > 0
assert cls._is_str(src_pattern)
assert not ' ' in src_pattern
assert cls._is_str(dest)
assert not ' ' in dest
assert not os.path.isabs(src_pattern)
install_root = cls._install_root(dest, in_prefix, out)
cls.write_cmd(out, "cp {src_pattern} {install_root}{dest}".format(
src_pattern=src_pattern,
install_root=install_root,
dest=dest))
@classmethod
def remove_installed_files(cls, out, pattern, in_prefix=True):
assert len(pattern) > 0
assert cls._is_str(pattern)
assert not ' ' in pattern
install_root = cls._install_root(pattern, in_prefix, out, is_install=False)
cls.write_cmd(out, "rm -f {install_root}{pattern}".format(
install_root=install_root,
pattern=pattern))
@classmethod
def make_install_directory(cls, out, dir, in_prefix=True):
assert len(dir) > 0
assert cls._is_str(dir)
assert not ' ' in dir
install_root = cls._install_root(dir, in_prefix, out)
if is_windows():
cls.write_cmd(out, "IF NOT EXIST {dir} (mkdir {dir})".format(
install_root=install_root,
dir=dir))
else:
cls.write_cmd(out, "mkdir -p {install_root}{dir}".format(
install_root=install_root,
dir=dir))
@classmethod
def _is_path_prefix_of(cls, temp_path, target_as_abs):
"""
Returns True iff ``temp_path`` is a path prefix
of ``target_as_abs``
"""
assert cls._is_str(temp_path)
assert cls._is_str(target_as_abs)
assert len(temp_path) > 0
assert len(target_as_abs) > 0
assert os.path.isabs(temp_path)
assert os.path.isabs(target_as_abs)
# Need to stick extra slash in front otherwise we might think that
# ``/lib`` is a prefix of ``/lib64``. Of course if ``temp_path ==
# '/'`` then we shouldn't else we would check if ``//`` (rather than
# ``/``) is a prefix of ``/lib64``, which would fail.
if len(temp_path) > 1:
temp_path += os.sep
return target_as_abs.startswith(temp_path)
@classmethod
def create_relative_symbolic_link(cls, out, target, link_name):
assert cls._is_str(target)
assert cls._is_str(link_name)
assert len(target) > 0
assert len(link_name) > 0
assert not os.path.isabs(target)
assert not os.path.isabs(link_name)
# We can't test to see if link_name is a file or directory
# because it may not exist yet. Instead follow the convention
# that if there is a leading slash target is a directory otherwise
# it's a file
if link_name[-1] != '/':
# link_name is a file
temp_path = os.path.dirname(link_name)
else:
# link_name is a directory
temp_path = link_name[:-1]
temp_path = '/' + temp_path
relative_path = ""
targetAsAbs = '/' + target
assert os.path.isabs(targetAsAbs)
assert os.path.isabs(temp_path)
# Keep walking up the directory tree until temp_path
# is a prefix of targetAsAbs
while not cls._is_path_prefix_of(temp_path, targetAsAbs):
assert temp_path != '/'
temp_path = os.path.dirname(temp_path)
relative_path += '../'
# Now get the path from the common prefix directory to the target
target_from_prefix = targetAsAbs[len(temp_path):]
relative_path += target_from_prefix
# Remove any double slashes
relative_path = relative_path.replace('//','/')
cls.create_symbolic_link(out, relative_path, link_name)
@classmethod
def create_symbolic_link(cls, out, target, link_name):
assert cls._is_str(target)
assert cls._is_str(link_name)
assert not os.path.isabs(target)
cls.write_cmd(out, 'ln -s {target} {install_root}{link_name}'.format(
target=target,
install_root=cls.install_root(),
link_name=link_name))
# TODO: Refactor all of the build system to emit commands using this
# helper to simplify code. This will also let us replace ``@`` with
# ``$(Verb)`` and have it set to ``@`` or empty at build time depending on
# a variable (e.g. ``VERBOSE``) passed to the ``make`` invocation. This
# would be very helpful for debugging.
@classmethod
def write_cmd(cls, out, line):
out.write("\t@{}\n".format(line))
def strip_path_prefix(path, prefix):
if path.startswith(prefix):
stripped_path = path[len(prefix):]
stripped_path.replace('//','/')
if stripped_path[0] == '/':
stripped_path = stripped_path[1:]
assert not os.path.isabs(stripped_path)
return stripped_path
else:
return path
def configure_file(template_file_path, output_file_path, substitutions):
"""
Read a template file ``template_file_path``, perform substitutions
found in the ``substitutions`` dictionary and write the result to
the output file ``output_file_path``.
The template file should contain zero or more template strings of the
form ``@NAME@``.
The substitutions dictionary maps old strings (without the ``@``
symbols) to their replacements.
"""
assert isinstance(template_file_path, str)
assert isinstance(output_file_path, str)
assert isinstance(substitutions, dict)
assert len(template_file_path) > 0
assert len(output_file_path) > 0
print("Generating {} from {}".format(output_file_path, template_file_path))
if not os.path.exists(template_file_path):
raise MKException('Could not find template file "{}"'.format(template_file_path))
# Read whole template file into string
template_string = None
with open(template_file_path, 'r') as f:
template_string = f.read()
# Do replacements
for (old_string, replacement) in substitutions.items():
template_string = template_string.replace('@{}@'.format(old_string), replacement)
# Write the string to the file
with open(output_file_path, 'w') as f:
f.write(template_string)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 143,588 | 39.550409 | 287 |
py
|
z3
|
z3-master/scripts/mk_make.py
|
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Scripts for generating Makefiles and Visual
# Studio project files.
#
# Author: Leonardo de Moura (leonardo)
############################################
from mk_util import *
from mk_project import *
parse_options()
check_eol()
API_files = init_project_def()
update_version()
mk_auto_src()
mk_bindings(API_files)
mk_vs_proj('z3', ['shell'])
mk_vs_proj_dll('libz3', ['api_dll'])
mk_makefile()
| 487 | 21.181818 | 45 |
py
|
z3
|
z3-master/scripts/update_header_guards.py
|
# Copyright (c) 2015 Microsoft Corporation
import os
import re
ifndef = re.compile("#ifndef \_(.*)\_H\_")
doubleu = re.compile("#(.*) (.*)\_\_H\_")
defn = re.compile("#define \_(.*)\_H\_")
endif = re.compile("#endif /\* \_(.*)\_H\_")
def fix_hdr(file):
print(file)
tmp = "%s.tmp" % file
ins = open(file)
ous = open(tmp,'w')
line = ins.readline()
found = False
while line:
m = doubleu.search(line)
if m:
ous.write("#")
ous.write(m.group(1))
ous.write(" ")
ous.write(m.group(2))
ous.write("_H_\n")
line = ins.readline()
found = True
continue
m = ifndef.search(line)
if m:
print(m.group(1))
ous.write("#ifndef ")
ous.write(m.group(1))
ous.write("_H_\n")
line = ins.readline()
found = True
continue
m = defn.search(line)
if m:
ous.write("#define ")
ous.write(m.group(1))
ous.write("_H_\n")
line = ins.readline()
found = True
continue
m = endif.search(line)
if m:
ous.write("#endif /* ")
ous.write(m.group(1))
ous.write("_H_ */\n")
line = ins.readline()
found = True
continue
ous.write(line)
line = ins.readline()
ins.close()
ous.close()
if found:
os.system("move %s %s" % (tmp, file))
else:
os.system("del %s" % tmp)
def fixup(dir):
for root, dirs, files in os.walk(dir):
for f in files:
if f.endswith('.h'):
path = "%s\\%s" % (root, f)
fix_hdr(path)
fixup('src')
| 1,792 | 23.902778 | 46 |
py
|
z3
|
z3-master/scripts/mk_project.py
|
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 project configuration files
#
# Author: Leonardo de Moura (leonardo)
############################################
from mk_util import *
def init_version():
set_version(4, 12, 3, 0) # express a default build version or pick up ci build version
# Z3 Project definition
def init_project_def():
init_version()
add_lib('util', [], includes2install = ['z3_version.h'])
add_lib('polynomial', ['util'], 'math/polynomial')
add_lib('interval', ['util'], 'math/interval')
add_lib('dd', ['util', 'interval'], 'math/dd')
add_lib('simplex', ['util'], 'math/simplex')
add_lib('hilbert', ['util'], 'math/hilbert')
add_lib('automata', ['util'], 'math/automata')
add_lib('params', ['util'])
add_lib('realclosure', ['interval'], 'math/realclosure')
add_lib('subpaving', ['interval'], 'math/subpaving')
add_lib('ast', ['util', 'polynomial'])
add_lib('smt_params', ['ast', 'params'], 'smt/params')
add_lib('parser_util', ['ast'], 'parsers/util')
add_lib('euf', ['ast'], 'ast/euf')
add_lib('grobner', ['ast', 'dd', 'simplex'], 'math/grobner')
add_lib('sat', ['params', 'util', 'dd', 'grobner'])
add_lib('nlsat', ['polynomial', 'sat'])
add_lib('lp', ['util', 'nlsat', 'grobner', 'interval', 'smt_params'], 'math/lp')
add_lib('rewriter', ['ast', 'polynomial', 'interval', 'automata', 'params'], 'ast/rewriter')
add_lib('bit_blaster', ['rewriter'], 'ast/rewriter/bit_blaster')
add_lib('normal_forms', ['rewriter'], 'ast/normal_forms')
add_lib('substitution', ['rewriter'], 'ast/substitution')
add_lib('proofs', ['rewriter'], 'ast/proofs')
add_lib('macros', ['rewriter'], 'ast/macros')
add_lib('model', ['macros'])
add_lib('converters', ['model'], 'ast/converters')
add_lib('simplifiers', ['euf', 'normal_forms', 'bit_blaster', 'converters', 'substitution'], 'ast/simplifiers')
add_lib('tactic', ['simplifiers'])
add_lib('mbp', ['model', 'simplex'], 'qe/mbp')
add_lib('qe_lite', ['tactic', 'mbp'], 'qe/lite')
add_lib('solver', ['params', 'smt_params', 'model', 'tactic', 'qe_lite', 'proofs'])
add_lib('cmd_context', ['solver', 'rewriter', 'params'])
add_lib('smt2parser', ['cmd_context', 'parser_util'], 'parsers/smt2')
add_lib('pattern', ['normal_forms', 'smt2parser', 'rewriter'], 'ast/pattern')
add_lib('aig_tactic', ['tactic'], 'tactic/aig')
add_lib('ackermannization', ['model', 'rewriter', 'ast', 'solver', 'tactic'], 'ackermannization')
add_lib('fpa', ['ast', 'util', 'rewriter', 'model'], 'ast/fpa')
add_lib('core_tactics', ['tactic', 'macros', 'normal_forms', 'rewriter', 'pattern'], 'tactic/core')
add_lib('arith_tactics', ['core_tactics', 'sat'], 'tactic/arith')
add_lib('solver_assertions', ['pattern','smt_params','cmd_context','qe_lite'], 'solver/assertions')
add_lib('subpaving_tactic', ['core_tactics', 'subpaving'], 'math/subpaving/tactic')
add_lib('proto_model', ['model', 'rewriter', 'smt_params'], 'smt/proto_model')
add_lib('smt', ['bit_blaster', 'macros', 'normal_forms', 'cmd_context', 'proto_model', 'solver_assertions',
'substitution', 'grobner', 'simplex', 'proofs', 'pattern', 'parser_util', 'fpa', 'lp'])
add_lib('sat_smt', ['sat', 'euf', 'smt', 'tactic', 'solver', 'smt_params', 'bit_blaster', 'fpa', 'mbp', 'normal_forms', 'lp', 'pattern', 'qe_lite'], 'sat/smt')
add_lib('sat_tactic', ['tactic', 'sat', 'solver', 'sat_smt'], 'sat/tactic')
add_lib('nlsat_tactic', ['nlsat', 'sat_tactic', 'arith_tactics'], 'nlsat/tactic')
add_lib('bv_tactics', ['tactic', 'bit_blaster', 'core_tactics'], 'tactic/bv')
add_lib('fuzzing', ['ast'], 'test/fuzzing')
add_lib('smt_tactic', ['smt'], 'smt/tactic')
add_lib('sls_tactic', ['tactic', 'normal_forms', 'core_tactics', 'bv_tactics'], 'tactic/sls')
add_lib('qe', ['smt', 'mbp', 'qe_lite', 'nlsat', 'tactic', 'nlsat_tactic'], 'qe')
add_lib('sat_solver', ['solver', 'core_tactics', 'aig_tactic', 'bv_tactics', 'arith_tactics', 'sat_tactic'], 'sat/sat_solver')
add_lib('fd_solver', ['core_tactics', 'arith_tactics', 'sat_solver', 'smt'], 'tactic/fd_solver')
add_lib('muz', ['smt', 'sat', 'smt2parser', 'aig_tactic', 'qe'], 'muz/base')
add_lib('dataflow', ['muz'], 'muz/dataflow')
add_lib('transforms', ['muz', 'hilbert', 'dataflow'], 'muz/transforms')
add_lib('rel', ['muz', 'transforms'], 'muz/rel')
add_lib('spacer', ['muz', 'transforms', 'arith_tactics', 'smt_tactic'], 'muz/spacer')
add_lib('clp', ['muz', 'transforms'], 'muz/clp')
add_lib('tab', ['muz', 'transforms'], 'muz/tab')
add_lib('ddnf', ['muz', 'transforms', 'rel'], 'muz/ddnf')
add_lib('bmc', ['muz', 'transforms', 'fd_solver'], 'muz/bmc')
add_lib('fp', ['muz', 'clp', 'tab', 'rel', 'bmc', 'ddnf', 'spacer'], 'muz/fp')
add_lib('smtlogic_tactics', ['ackermannization', 'sat_solver', 'arith_tactics', 'bv_tactics', 'nlsat_tactic', 'smt_tactic', 'aig_tactic', 'fp', 'muz', 'qe'], 'tactic/smtlogics')
add_lib('ufbv_tactic', ['normal_forms', 'core_tactics', 'macros', 'smt_tactic', 'rewriter', 'smtlogic_tactics'], 'tactic/ufbv')
add_lib('fpa_tactics', ['fpa', 'core_tactics', 'bv_tactics', 'sat_tactic', 'smt_tactic', 'arith_tactics', 'smtlogic_tactics'], 'tactic/fpa')
add_lib('portfolio', ['smtlogic_tactics', 'sat_solver', 'ufbv_tactic', 'fpa_tactics', 'aig_tactic', 'fp', 'fd_solver', 'qe', 'sls_tactic', 'subpaving_tactic'], 'tactic/portfolio')
add_lib('opt', ['smt', 'smtlogic_tactics', 'sls_tactic', 'sat_solver'], 'opt')
API_files = ['z3_api.h', 'z3_ast_containers.h', 'z3_algebraic.h', 'z3_polynomial.h', 'z3_rcf.h', 'z3_fixedpoint.h', 'z3_optimization.h', 'z3_fpa.h', 'z3_spacer.h']
add_lib('extra_cmds', ['cmd_context', 'subpaving_tactic', 'qe', 'euf', 'arith_tactics'], 'cmd_context/extra_cmds')
add_lib('api', ['portfolio', 'realclosure', 'opt', 'extra_cmds'],
includes2install=['z3.h', 'z3_v1.h', 'z3_macros.h'] + API_files)
add_exe('shell', ['api', 'sat', 'extra_cmds', 'opt'], exe_name='z3')
add_exe('test', ['api', 'fuzzing', 'simplex', 'sat_smt'], exe_name='test-z3', install=False)
_libz3Component = add_dll('api_dll', ['api', 'sat', 'extra_cmds'], 'api/dll',
reexports=['api'],
dll_name='libz3',
static=build_static_lib(),
export_files=API_files,
staging_link='python')
add_dot_net_core_dll('dotnet', ['api_dll'], 'api/dotnet', dll_name='Microsoft.Z3', default_key_file='src/api/dotnet/Microsoft.Z3.snk')
add_java_dll('java', ['api_dll'], 'api/java', dll_name='libz3java', package_name="com.microsoft.z3", manifest_file='manifest')
add_ml_lib('ml', ['api_dll'], 'api/ml', lib_name='libz3ml')
add_hlib('cpp', 'api/c++', includes2install=['z3++.h'])
set_z3py_dir('api/python')
add_python(_libz3Component)
add_python_install(_libz3Component)
add_js()
# Examples
add_cpp_example('cpp_example', 'c++')
add_cpp_example('z3_tptp', 'tptp')
add_c_example('c_example', 'c')
add_c_example('maxsat')
add_dotnet_example('dotnet_example', 'dotnet')
add_java_example('java_example', 'java')
add_ml_example('ml_example', 'ml')
add_z3py_example('py_example', 'python')
return API_files
| 7,467 | 62.288136 | 184 |
py
|
z3
|
z3-master/scripts/mk_def_file.py
|
# - /usr/bin/env python
"""
Reads a list of Z3 API header files and
generate a ``.def`` file to define the
exported symbols of a dll. This file
can be passed to the ``/DEF`` to the
linker used by MSVC.
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("output_file", help="output def file path")
parser.add_argument("dllname", help="dllname to use in def file")
parser.add_argument("api_files", nargs="+")
pargs = parser.parse_args(args)
if not mk_genfile_common.check_files_exist(pargs.api_files):
logging.error('One or more api files do not exist')
return 1
mk_genfile_common.mk_def_file_internal(
pargs.output_file,
pargs.dllname,
pargs.api_files)
logging.info('Generated "{}"'.format(pargs.output_file))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1,024 | 26.702703 | 69 |
py
|
z3
|
z3-master/scripts/mk_mem_initializer_cpp.py
|
# -- /usr/bin/env python
"""
Scans the listed header files for
memory initializers and finalizers and
emits and implementation of
``void mem_initialize()`` and
``void mem_finalize()`` into ``mem_initializer.cpp``
in the destination directory.
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("destination_dir", help="destination directory")
parser.add_argument("header_files", nargs="+",
help="One or more header files to parse")
pargs = parser.parse_args(args)
if not mk_genfile_common.check_dir_exists(pargs.destination_dir):
return 1
h_files_full_path = []
for header_file in pargs.header_files:
h_files_full_path.append(os.path.abspath(header_file))
output = mk_genfile_common.mk_mem_initializer_cpp_internal(
h_files_full_path,
pargs.destination_dir
)
logging.info('Generated "{}"'.format(output))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1,144 | 26.926829 | 72 |
py
|
z3
|
z3-master/scripts/mk_unix_dist.py
|
############################################
# Copyright (c) 2013 Microsoft Corporation
#
# Scripts for automatically generating
# Linux/OSX/BSD distribution zip files.
#
# Author: Leonardo de Moura (leonardo)
############################################
import os
import glob
import re
import getopt
import sys
import shutil
import subprocess
import zipfile
from mk_exception import *
from mk_project import *
import mk_util
BUILD_DIR='build-dist'
VERBOSE=True
DIST_DIR='dist'
FORCE_MK=False
DOTNET_CORE_ENABLED=True
DOTNET_KEY_FILE=None
JAVA_ENABLED=True
GIT_HASH=False
PYTHON_ENABLED=True
MAKEJOBS=getenv("MAKEJOBS", '8')
OS_NAME=None
def set_verbose(flag):
global VERBOSE
VERBOSE = flag
def is_verbose():
return VERBOSE
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def set_build_dir(path):
global BUILD_DIR
BUILD_DIR = mk_util.norm_path(path)
mk_dir(BUILD_DIR)
def display_help():
print("mk_unix_dist.py: Z3 Linux/OSX/BSD distribution generator\n")
print("This script generates the zip files containing executables, shared objects, header files for Linux/OSX/BSD.")
print("It must be executed from the Z3 root directory.")
print("\nOptions:")
print(" -h, --help display this message.")
print(" -s, --silent do not print verbose messages.")
print(" -b <sudir>, --build=<subdir> subdirectory where x86 and x64 Z3 versions will be built (default: build-dist).")
print(" -f, --force force script to regenerate Makefiles.")
print(" --nodotnet do not include .NET bindings in the binary distribution files.")
print(" --dotnet-key=<file> sign the .NET assembly with the private key in <file>.")
print(" --arch=<arch> set architecture (to arm64) to force arm64 build")
print(" --nojava do not include Java bindings in the binary distribution files.")
print(" --os=<os> set OS version.")
print(" --nopython do not include Python bindings in the binary distribution files.")
print(" --githash include git hash in the Zip file.")
exit(0)
# Parse configuration option for mk_make script
def parse_options():
global FORCE_MK, JAVA_ENABLED, GIT_HASH, DOTNET_CORE_ENABLED, DOTNET_KEY_FILE, PYTHON_ENABLED, OS_NAME
path = BUILD_DIR
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'b:hsf', ['build=',
'help',
'silent',
'force',
'nojava',
'nodotnet',
'dotnet-key=',
'arch=',
'os=',
'githash',
'nopython'
])
for opt, arg in options:
if opt in ('-b', '--build'):
if arg == 'src':
raise MKException('The src directory should not be used to host the Makefile')
path = arg
elif opt in ('-s', '--silent'):
set_verbose(False)
elif opt in ('-h', '--help'):
display_help()
elif opt in ('-f', '--force'):
FORCE_MK = True
elif opt == '--nodotnet':
DOTNET_CORE_ENABLED = False
elif opt == '--nopython':
PYTHON_ENABLED = False
elif opt == '--dotnet-key':
DOTNET_KEY_FILE = arg
elif opt == '--nojava':
JAVA_ENABLED = False
elif opt == '--githash':
GIT_HASH = True
elif opt == '--arch':
if arg == "arm64":
mk_util.IS_ARCH_ARM64 = True
else:
raise MKException("Invalid architecture directive '%s'. Legal directives: arm64" % arg)
elif opt == '--os':
OS_NAME = arg
else:
raise MKException("Invalid command line option '%s'" % opt)
set_build_dir(path)
# Check whether build directory already exists or not
def check_build_dir(path):
return os.path.exists(path) and os.path.exists(os.path.join(path, 'Makefile'))
# Create a build directory using mk_make.py
def mk_build_dir(path):
if not check_build_dir(path) or FORCE_MK:
opts = [sys.executable, os.path.join('scripts', 'mk_make.py'), "-b", path, "--staticlib"]
if DOTNET_CORE_ENABLED:
opts.append('--dotnet')
if not DOTNET_KEY_FILE is None:
opts.append('--dotnet-key=' + DOTNET_KEY_FILE)
if JAVA_ENABLED:
opts.append('--java')
if GIT_HASH:
opts.append('--githash=%s' % mk_util.git_hash())
opts.append('--git-describe')
if PYTHON_ENABLED:
opts.append('--python')
if mk_util.IS_ARCH_ARM64:
opts.append('--arm64=true')
if subprocess.call(opts) != 0:
raise MKException("Failed to generate build directory at '%s'" % path)
# Create build directories
def mk_build_dirs():
mk_build_dir(BUILD_DIR)
class cd:
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def mk_z3():
with cd(BUILD_DIR):
try:
return subprocess.call(['make', '-j', MAKEJOBS])
except:
return 1
def get_os_name():
if OS_NAME is not None:
return OS_NAME
import platform
basic = os.uname()[0].lower()
if basic == 'linux':
dist = platform.libc_ver()
if len(dist) == 2 and len(dist[0]) > 0 and len(dist[1]) > 0:
return '%s-%s' % (dist[0].lower(), dist[1].lower())
else:
return basic
elif basic == 'darwin':
ver = platform.mac_ver()
if len(ver) == 3 and len(ver[0]) > 0:
return 'osx-%s' % ver[0]
else:
return 'osx'
elif basic == 'freebsd':
ver = platform.version()
idx1 = ver.find(' ')
idx2 = ver.find('-')
if idx1 < 0 or idx2 < 0 or idx1 >= idx2:
return basic
else:
return 'freebsd-%s' % ver[(idx1+1):idx2]
else:
return basic
def get_z3_name():
major, minor, build, revision = get_version()
if mk_util.IS_ARCH_ARM64:
platform = "arm64"
elif sys.maxsize >= 2**32:
platform = "x64"
else:
platform = "x86"
osname = get_os_name()
if GIT_HASH:
return 'z3-%s.%s.%s.%s-%s-%s' % (major, minor, build, mk_util.git_hash(), platform, osname)
else:
return 'z3-%s.%s.%s-%s-%s' % (major, minor, build, platform, osname)
def mk_dist_dir():
build_path = BUILD_DIR
dist_path = os.path.join(DIST_DIR, get_z3_name())
mk_dir(dist_path)
mk_util.DOTNET_CORE_ENABLED = DOTNET_CORE_ENABLED
mk_util.DOTNET_ENABLED = False
mk_util.DOTNET_KEY_FILE = DOTNET_KEY_FILE
mk_util.JAVA_ENABLED = JAVA_ENABLED
mk_util.PYTHON_ENABLED = PYTHON_ENABLED
mk_unix_dist(build_path, dist_path)
if is_verbose():
print("Generated distribution folder at '%s'" % dist_path)
def get_dist_path():
return get_z3_name()
def mk_zip():
dist_path = get_dist_path()
old = os.getcwd()
try:
os.chdir(DIST_DIR)
zfname = '%s.zip' % dist_path
zipout = zipfile.ZipFile(zfname, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(dist_path):
for f in files:
zipout.write(os.path.join(root, f))
if is_verbose():
print("Generated '%s'" % zfname)
except:
pass
os.chdir(old)
def cp_license():
shutil.copy("LICENSE.txt", os.path.join(DIST_DIR, get_dist_path()))
# Entry point
def main():
parse_options()
mk_build_dirs()
mk_z3()
init_project_def()
mk_dist_dir()
cp_license()
mk_zip()
main()
| 8,497 | 32.992 | 124 |
py
|
z3
|
z3-master/scripts/update_api.py
|
# - !/usr/bin/env python
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Scripts for generating Makefiles and Visual
# Studio project files.
#
# Author: Leonardo de Moura (leonardo)
############################################
"""
This script generates the ``api_log_macros.h``,
``api_log_macros.cpp`` and ``api_commands.cpp``
files for the "api" module based on parsing
several API header files. It can also optionally
emit some of the files required for Z3's different
language bindings.
"""
import argparse
import logging
import re
import os
import sys
VERBOSE = True
def is_verbose():
return VERBOSE
##########################################################
# TODO: rewrite this file without using global variables.
# This file is a big HACK.
# It started as small simple script.
# Now, it is too big, and is invoked from mk_make.py
#
##########################################################
IN = 0
OUT = 1
INOUT = 2
IN_ARRAY = 3
OUT_ARRAY = 4
INOUT_ARRAY = 5
OUT_MANAGED_ARRAY = 6
FN_PTR = 7
# Primitive Types
VOID = 0
VOID_PTR = 1
INT = 2
UINT = 3
INT64 = 4
UINT64 = 5
STRING = 6
STRING_PTR = 7
BOOL = 8
SYMBOL = 9
PRINT_MODE = 10
ERROR_CODE = 11
DOUBLE = 12
FLOAT = 13
CHAR = 14
CHAR_PTR = 15
LBOOL = 16
FIRST_FN_ID = 50
FIRST_OBJ_ID = 100
def is_obj(ty):
return ty >= FIRST_OBJ_ID
def is_fn(ty):
return FIRST_FN_ID <= ty and ty < FIRST_OBJ_ID
Type2Str = { VOID : 'void', VOID_PTR : 'void*', INT : 'int', UINT : 'unsigned', INT64 : 'int64_t', UINT64 : 'uint64_t', DOUBLE : 'double',
FLOAT : 'float', STRING : 'Z3_string', STRING_PTR : 'Z3_string_ptr', BOOL : 'bool', SYMBOL : 'Z3_symbol',
PRINT_MODE : 'Z3_ast_print_mode', ERROR_CODE : 'Z3_error_code', CHAR: 'char', CHAR_PTR: 'Z3_char_ptr', LBOOL : 'Z3_lbool'
}
Type2PyStr = { VOID_PTR : 'ctypes.c_void_p', INT : 'ctypes.c_int', UINT : 'ctypes.c_uint', INT64 : 'ctypes.c_longlong',
UINT64 : 'ctypes.c_ulonglong', DOUBLE : 'ctypes.c_double', FLOAT : 'ctypes.c_float',
STRING : 'ctypes.c_char_p', STRING_PTR : 'ctypes.POINTER(ctypes.c_char_p)', BOOL : 'ctypes.c_bool', SYMBOL : 'Symbol',
PRINT_MODE : 'ctypes.c_uint', ERROR_CODE : 'ctypes.c_uint', CHAR : 'ctypes.c_char', CHAR_PTR: 'ctypes.POINTER(ctypes.c_char)', LBOOL : 'ctypes.c_int'
}
# Mapping to .NET types
Type2Dotnet = { VOID : 'void', VOID_PTR : 'IntPtr', INT : 'int', UINT : 'uint', INT64 : 'Int64', UINT64 : 'UInt64', DOUBLE : 'double',
FLOAT : 'float', STRING : 'string', STRING_PTR : 'byte**', BOOL : 'byte', SYMBOL : 'IntPtr',
PRINT_MODE : 'uint', ERROR_CODE : 'uint', CHAR : 'char', CHAR_PTR : 'IntPtr', LBOOL : 'int' }
# Mapping to ML types
Type2ML = { VOID : 'unit', VOID_PTR : 'ptr', INT : 'int', UINT : 'int', INT64 : 'int64', UINT64 : 'int64', DOUBLE : 'float',
FLOAT : 'float', STRING : 'string', STRING_PTR : 'char**',
BOOL : 'bool', SYMBOL : 'z3_symbol', PRINT_MODE : 'int', ERROR_CODE : 'int', CHAR : 'char', CHAR_PTR : 'string', LBOOL : 'int' }
Closures = []
class APITypes:
def __init__(self):
self.next_type_id = FIRST_OBJ_ID
self.next_fntype_id = FIRST_FN_ID
def def_Type(self, var, c_type, py_type):
"""Process type definitions of the form def_Type(var, c_type, py_type)
The variable 'var' is set to a unique number and recorded globally using exec
It is used by 'def_APIs' to that uses the unique numbers to access the
corresponding C and Python types.
"""
id = self.next_type_id
exec('%s = %s' % (var, id), globals())
Type2Str[id] = c_type
Type2PyStr[id] = py_type
self.next_type_id += 1
def def_Types(self, api_files):
global Closures
pat1 = re.compile(r" *def_Type\(\'(.*)\',[^\']*\'(.*)\',[^\']*\'(.*)\'\)[ \t]*")
pat2 = re.compile(r"Z3_DECLARE_CLOSURE\((.*),(.*), \((.*)\)\)")
for api_file in api_files:
with open(api_file, 'r') as api:
for line in api:
m = pat1.match(line)
if m:
self.def_Type(m.group(1), m.group(2), m.group(3))
continue
m = pat2.match(line)
if m:
self.fun_Type(m.group(1))
Closures += [(m.group(1), m.group(2), m.group(3))]
continue
#
# Populate object type entries in dotnet and ML bindings.
#
for k in Type2Str:
v = Type2Str[k]
if is_obj(k) or is_fn(k):
Type2Dotnet[k] = v
Type2ML[k] = v.lower()
def fun_Type(self, var):
"""Process function type definitions"""
id = self.next_fntype_id
exec('%s = %s' % (var, id), globals())
Type2Str[id] = var
Type2PyStr[id] = var
self.next_fntype_id += 1
def type2str(ty):
global Type2Str
return Type2Str[ty]
def type2pystr(ty):
global Type2PyStr
return Type2PyStr[ty]
def type2dotnet(ty):
global Type2Dotnet
return Type2Dotnet[ty]
def type2ml(ty):
global Type2ML
q = Type2ML[ty]
if q[0:3] == 'z3_':
return q[3:]
else:
return q;
def _in(ty):
return (IN, ty)
def _in_array(sz, ty):
return (IN_ARRAY, ty, sz)
def _fnptr(ty):
return (FN_PTR, ty)
def _out(ty):
return (OUT, ty)
def _out_array(sz, ty):
return (OUT_ARRAY, ty, sz, sz)
# cap contains the position of the argument that stores the capacity of the array
# sz contains the position of the output argument that stores the (real) size of the array
def _out_array2(cap, sz, ty):
return (OUT_ARRAY, ty, cap, sz)
def _inout_array(sz, ty):
return (INOUT_ARRAY, ty, sz, sz)
def _out_managed_array(sz,ty):
return (OUT_MANAGED_ARRAY, ty, 0, sz)
def param_kind(p):
return p[0]
def param_type(p):
return p[1]
def param_array_capacity_pos(p):
return p[2]
def param_array_size_pos(p):
return p[3]
def param2str(p):
if param_kind(p) == IN_ARRAY:
return "%s const *" % (type2str(param_type(p)))
elif param_kind(p) == OUT_ARRAY or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY:
return "%s*" % (type2str(param_type(p)))
elif param_kind(p) == OUT:
return "%s*" % (type2str(param_type(p)))
elif param_kind(p) == FN_PTR:
return "%s*" % (type2str(param_type(p)))
else:
return type2str(param_type(p))
def param2dotnet(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == STRING:
return "out IntPtr"
else:
return "[In, Out] ref %s" % type2dotnet(param_type(p))
elif k == IN_ARRAY:
return "[In] %s[]" % type2dotnet(param_type(p))
elif k == INOUT_ARRAY:
return "[In, Out] %s[]" % type2dotnet(param_type(p))
elif k == OUT_ARRAY:
return "[Out] %s[]" % type2dotnet(param_type(p))
elif k == OUT_MANAGED_ARRAY:
return "[Out] out %s[]" % type2dotnet(param_type(p))
else:
return type2dotnet(param_type(p))
# --------------
def param2pystr(p):
if param_kind(p) == IN_ARRAY or param_kind(p) == OUT_ARRAY or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT:
return "ctypes.POINTER(%s)" % type2pystr(param_type(p))
else:
return type2pystr(param_type(p))
# --------------
# ML
def param2ml(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL:
return "int"
elif param_type(p) == INT64 or param_type(p) == UINT64:
return "int64"
elif param_type(p) == STRING:
return "string"
else:
return "ptr"
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
return "%s list" % type2ml(param_type(p))
elif k == OUT_MANAGED_ARRAY:
return "%s list" % type2ml(param_type(p))
else:
return type2ml(param_type(p))
# Save name, result, params to generate wrapper
_API2PY = []
def mk_py_binding(name, result, params):
global core_py
global _API2PY
_API2PY.append((name, result, params))
if result != VOID:
core_py.write("_lib.%s.restype = %s\n" % (name, type2pystr(result)))
core_py.write("_lib.%s.argtypes = [" % name)
first = True
for p in params:
if first:
first = False
else:
core_py.write(", ")
core_py.write(param2pystr(p))
core_py.write("]\n")
def extra_API(name, result, params):
mk_py_binding(name, result, params)
reg_dotnet(name, result, params)
def display_args(num):
for i in range(num):
if i > 0:
core_py.write(", ")
core_py.write("a%s" % i)
def display_args_to_z3(params):
i = 0
for p in params:
if i > 0:
core_py.write(", ")
if param_type(p) == STRING:
core_py.write("_str_to_bytes(a%s)" % i)
else:
core_py.write("a%s" % i)
i = i + 1
NULLWrapped = [ 'Z3_mk_context', 'Z3_mk_context_rc' ]
Unwrapped = [ 'Z3_del_context', 'Z3_get_error_code' ]
Unchecked = frozenset([ 'Z3_dec_ref', 'Z3_params_dec_ref', 'Z3_model_dec_ref',
'Z3_func_interp_dec_ref', 'Z3_func_entry_dec_ref',
'Z3_goal_dec_ref', 'Z3_tactic_dec_ref', 'Z3_simplifier_dec_ref', 'Z3_probe_dec_ref',
'Z3_fixedpoint_dec_ref', 'Z3_param_descrs_dec_ref',
'Z3_ast_vector_dec_ref', 'Z3_ast_map_dec_ref',
'Z3_apply_result_dec_ref', 'Z3_solver_dec_ref',
'Z3_stats_dec_ref', 'Z3_optimize_dec_ref'])
def mk_py_wrappers():
core_py.write("""
class Elementaries:
def __init__(self, f):
self.f = f
self.get_error_code = _lib.Z3_get_error_code
self.get_error_message = _lib.Z3_get_error_msg
self.OK = Z3_OK
self.Exception = Z3Exception
def Check(self, ctx):
err = self.get_error_code(ctx)
if err != self.OK:
raise self.Exception(self.get_error_message(ctx, err))
def Z3_set_error_handler(ctx, hndlr, _elems=Elementaries(_lib.Z3_set_error_handler)):
ceh = _error_handler_type(hndlr)
_elems.f(ctx, ceh)
_elems.Check(ctx)
return ceh
def Z3_solver_register_on_clause(ctx, s, user_ctx, on_clause_eh, _elems = Elementaries(_lib.Z3_solver_register_on_clause)):
_elems.f(ctx, s, user_ctx, on_clause_eh)
_elems.Check(ctx)
def Z3_solver_propagate_init(ctx, s, user_ctx, push_eh, pop_eh, fresh_eh, _elems = Elementaries(_lib.Z3_solver_propagate_init)):
_elems.f(ctx, s, user_ctx, push_eh, pop_eh, fresh_eh)
_elems.Check(ctx)
def Z3_solver_propagate_final(ctx, s, final_eh, _elems = Elementaries(_lib.Z3_solver_propagate_final)):
_elems.f(ctx, s, final_eh)
_elems.Check(ctx)
def Z3_solver_propagate_fixed(ctx, s, fixed_eh, _elems = Elementaries(_lib.Z3_solver_propagate_fixed)):
_elems.f(ctx, s, fixed_eh)
_elems.Check(ctx)
def Z3_solver_propagate_eq(ctx, s, eq_eh, _elems = Elementaries(_lib.Z3_solver_propagate_eq)):
_elems.f(ctx, s, eq_eh)
_elems.Check(ctx)
def Z3_solver_propagate_diseq(ctx, s, diseq_eh, _elems = Elementaries(_lib.Z3_solver_propagate_diseq)):
_elems.f(ctx, s, diseq_eh)
_elems.Check(ctx)
def Z3_optimize_register_model_eh(ctx, o, m, user_ctx, on_model_eh, _elems = Elementaries(_lib.Z3_optimize_register_model_eh)):
_elems.f(ctx, o, m, user_ctx, on_model_eh)
_elems.Check(ctx)
""")
for sig in _API2PY:
mk_py_wrapper_single(sig)
if sig[1] == STRING:
mk_py_wrapper_single(sig, decode_string=False)
def mk_py_wrapper_single(sig, decode_string=True):
name = sig[0]
result = sig[1]
params = sig[2]
num = len(params)
def_name = name
if not decode_string:
def_name += '_bytes'
core_py.write("def %s(" % def_name)
display_args(num)
comma = ", " if num != 0 else ""
core_py.write("%s_elems=Elementaries(_lib.%s)):\n" % (comma, name))
lval = "r = " if result != VOID else ""
core_py.write(" %s_elems.f(" % lval)
display_args_to_z3(params)
core_py.write(")\n")
if len(params) > 0 and param_type(params[0]) == CONTEXT and not name in Unwrapped and not name in Unchecked:
core_py.write(" _elems.Check(a0)\n")
if result == STRING and decode_string:
core_py.write(" return _to_pystr(r)\n")
elif result != VOID:
core_py.write(" return r\n")
core_py.write("\n")
## .NET API native interface
_dotnet_decls = []
def reg_dotnet(name, result, params):
global _dotnet_decls
_dotnet_decls.append((name, result, params))
def mk_dotnet(dotnet):
global Type2Str
dotnet.write('// Automatically generated file\n')
dotnet.write('using System;\n')
dotnet.write('using System.Collections.Generic;\n')
dotnet.write('using System.Text;\n')
dotnet.write('using System.Runtime.InteropServices;\n\n')
dotnet.write('#pragma warning disable 1591\n\n')
dotnet.write('namespace Microsoft.Z3\n')
dotnet.write('{\n')
for k in Type2Str:
v = Type2Str[k]
if is_obj(k):
dotnet.write(' using %s = System.IntPtr;\n' % v)
dotnet.write(' using voidp = System.IntPtr;\n')
dotnet.write('\n')
dotnet.write(' public class Native\n')
dotnet.write(' {\n\n')
for name, ret, sig in Closures:
sig = sig.replace("void*","voidp").replace("unsigned","uint")
sig = sig.replace("Z3_ast*","ref IntPtr").replace("uint*","ref uint").replace("Z3_lbool*","ref int")
ret = ret.replace("void*","voidp").replace("unsigned","uint")
if "*" in sig or "*" in ret:
continue
dotnet.write(' [UnmanagedFunctionPointer(CallingConvention.Cdecl)]\n')
dotnet.write(' public delegate %s %s(%s);\n' % (ret,name,sig))
dotnet.write(' public class LIB\n')
dotnet.write(' {\n')
dotnet.write(' const string Z3_DLL_NAME = \"libz3\";\n'
' \n')
dotnet.write(' [DllImport(Z3_DLL_NAME, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]\n')
dotnet.write(' public extern static void Z3_set_error_handler(Z3_context a0, Z3_error_handler a1);\n\n')
for name, result, params in _dotnet_decls:
dotnet.write(' [DllImport(Z3_DLL_NAME, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]\n')
dotnet.write(' ')
if result == STRING:
dotnet.write('public extern static IntPtr %s(' % (name))
else:
dotnet.write('public extern static %s %s(' % (type2dotnet(result), name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
dotnet.write('%s a%d' % (param2dotnet(param), i))
i = i + 1
dotnet.write(');\n\n')
dotnet.write(' }\n')
def mk_dotnet_wrappers(dotnet):
global Type2Str
dotnet.write("\n")
dotnet.write(" public static void Z3_set_error_handler(Z3_context a0, Z3_error_handler a1) {\n")
dotnet.write(" LIB.Z3_set_error_handler(a0, a1);\n")
dotnet.write(" Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n")
dotnet.write(" if (err != Z3_error_code.Z3_OK)\n")
dotnet.write(" throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n")
dotnet.write(" }\n\n")
for name, result, params in _dotnet_decls:
if result == STRING:
dotnet.write(' public static string %s(' % (name))
else:
dotnet.write(' public static %s %s(' % (type2dotnet(result), name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
dotnet.write('%s a%d' % (param2dotnet(param), i))
i = i + 1
dotnet.write(') {\n')
dotnet.write(' ')
if result == STRING:
dotnet.write('IntPtr r = ')
elif result != VOID:
dotnet.write('%s r = ' % type2dotnet(result))
dotnet.write('LIB.%s(' % (name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
if param_kind(param) == OUT:
if param_type(param) == STRING:
dotnet.write('out ')
else:
dotnet.write('ref ')
elif param_kind(param) == OUT_MANAGED_ARRAY:
dotnet.write('out ')
dotnet.write('a%d' % i)
i = i + 1
dotnet.write(');\n')
if name not in Unwrapped:
if name in NULLWrapped:
dotnet.write(" if (r == IntPtr.Zero)\n")
dotnet.write(" throw new Z3Exception(\"Object allocation failed.\");\n")
else:
if len(params) > 0 and param_type(params[0]) == CONTEXT and name not in Unchecked:
dotnet.write(" Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n")
dotnet.write(" if (err != Z3_error_code.Z3_OK)\n")
dotnet.write(" throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n")
if result == STRING:
dotnet.write(" return Marshal.PtrToStringAnsi(r);\n")
elif result != VOID:
dotnet.write(" return r;\n")
dotnet.write(" }\n\n")
dotnet.write(" }\n\n")
dotnet.write("}\n\n")
# ----------------------
# Java
Type2Java = { VOID : 'void', VOID_PTR : 'long', INT : 'int', UINT : 'int', INT64 : 'long', UINT64 : 'long', DOUBLE : 'double',
FLOAT : 'float', STRING : 'String', STRING_PTR : 'StringPtr',
BOOL : 'boolean', SYMBOL : 'long', PRINT_MODE : 'int', ERROR_CODE : 'int', CHAR : 'char', CHAR_PTR : 'long', LBOOL : 'int' }
Type2JavaW = { VOID : 'void', VOID_PTR : 'jlong', INT : 'jint', UINT : 'jint', INT64 : 'jlong', UINT64 : 'jlong', DOUBLE : 'jdouble',
FLOAT : 'jfloat', STRING : 'jstring', STRING_PTR : 'jobject',
BOOL : 'jboolean', SYMBOL : 'jlong', PRINT_MODE : 'jint', ERROR_CODE : 'jint', CHAR : 'jchar', CHAR_PTR : 'jlong', LBOOL : 'jint'}
def type2java(ty):
global Type2Java
if (ty >= FIRST_FN_ID):
return 'long'
else:
return Type2Java[ty]
def type2javaw(ty):
global Type2JavaW
if (ty >= FIRST_FN_ID):
return 'jlong'
else:
return Type2JavaW[ty]
def param2java(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == INT or param_type(p) == UINT:
return "IntPtr"
elif param_type(p) == INT64 or param_type(p) == UINT64 or param_type(p) == VOID_PTR or param_type(p) >= FIRST_OBJ_ID:
return "LongPtr"
elif param_type(p) == STRING:
return "StringPtr"
else:
print("ERROR: unreachable code")
assert(False)
exit(1)
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
return "%s[]" % type2java(param_type(p))
elif k == OUT_MANAGED_ARRAY:
if param_type(p) == UINT:
return "UIntArrayPtr"
else:
return "ObjArrayPtr"
elif k == FN_PTR:
return "LongPtr"
else:
return type2java(param_type(p))
def param2javaw(p):
k = param_kind(p)
if k == OUT:
return "jobject"
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL:
return "jintArray"
else:
return "jlongArray"
elif k == OUT_MANAGED_ARRAY:
return "jlong"
else:
return type2javaw(param_type(p))
def java_method_name(name):
result = ''
name = name[3:] # Remove Z3_
n = len(name)
i = 0
while i < n:
if name[i] == '_':
i = i + 1
if i < n:
result += name[i].upper()
else:
result += name[i]
i = i + 1
return result
# Return the type of the java array elements
def java_array_element_type(p):
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL:
return 'jint'
else:
return 'jlong'
def mk_java(java_src, java_dir, package_name):
java_nativef = os.path.join(java_dir, 'Native.java')
java_wrapperf = os.path.join(java_dir, 'Native.cpp')
java_native = open(java_nativef, 'w')
java_native.write('// Automatically generated file\n')
java_native.write('package %s;\n' % package_name)
java_native.write('import %s.enumerations.*;\n' % package_name)
java_native.write('public final class Native {\n')
java_native.write(' public static class IntPtr { public int value; }\n')
java_native.write(' public static class LongPtr { public long value; }\n')
java_native.write(' public static class StringPtr { public String value; }\n')
java_native.write(' public static class ObjArrayPtr { public long[] value; }\n')
java_native.write(' public static class UIntArrayPtr { public int[] value; }\n')
java_native.write(' public static native void setInternalErrorHandler(long ctx);\n\n')
java_native.write(' static {\n')
java_native.write(' if (!Boolean.parseBoolean(System.getProperty("z3.skipLibraryLoad"))) {\n')
java_native.write(' try {\n')
java_native.write(' System.loadLibrary("z3java");\n')
java_native.write(' } catch (UnsatisfiedLinkError ex) {\n')
java_native.write(' System.loadLibrary("libz3java");\n')
java_native.write(' }\n')
java_native.write(' }\n')
java_native.write(' }\n')
java_native.write("""
public static native long propagateInit(Object o, long ctx, long solver);
public static native void propagateRegisterCreated(Object o, long ctx, long solver);
public static native void propagateRegisterFixed(Object o, long ctx, long solver);
public static native void propagateRegisterEq(Object o, long ctx, long solver);
public static native void propagateRegisterDecide(Object o, long ctx, long solver);
public static native void propagateRegisterFinal(Object o, long ctx, long solver);
public static native void propagateConflict(Object o, long ctx, long solver, long javainfo, int num_fixed, long[] fixed, long num_eqs, long[] eq_lhs, long[] eq_rhs, long conseq);
public static native void propagateAdd(Object o, long ctx, long solver, long javainfo, long e);
public static native boolean propagateNextSplit(Object o, long ctx, long solver, long javainfo, long e, long idx, int phase);
public static native void propagateDestroy(Object o, long ctx, long solver, long javainfo);
public static abstract class UserPropagatorBase implements AutoCloseable {
protected long ctx;
protected long solver;
protected long javainfo;
public UserPropagatorBase(long _ctx, long _solver) {
ctx = _ctx;
solver = _solver;
javainfo = propagateInit(this, ctx, solver);
}
@Override
public void close() {
Native.propagateDestroy(this, ctx, solver, javainfo);
javainfo = 0;
solver = 0;
ctx = 0;
}
protected final void registerCreated() {
Native.propagateRegisterCreated(this, ctx, solver);
}
protected final void registerFixed() {
Native.propagateRegisterFixed(this, ctx, solver);
}
protected final void registerEq() {
Native.propagateRegisterEq(this, ctx, solver);
}
protected final void registerDecide() {
Native.propagateRegisterDecide(this, ctx, solver);
}
protected final void registerFinal() {
Native.propagateRegisterFinal(this, ctx, solver);
}
protected abstract void pushWrapper();
protected abstract void popWrapper(int number);
protected abstract void finWrapper();
protected abstract void eqWrapper(long lx, long ly);
protected abstract UserPropagatorBase freshWrapper(long lctx);
protected abstract void createdWrapper(long le);
protected abstract void fixedWrapper(long lvar, long lvalue);
}
""")
java_native.write('\n')
for name, result, params in _dotnet_decls:
java_native.write(' protected static native %s INTERNAL%s(' % (type2java(result), java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('%s a%d' % (param2java(param), i))
i = i + 1
java_native.write(');\n')
java_native.write('\n\n')
# Exception wrappers
for name, result, params in _dotnet_decls:
java_native.write(' public static %s %s(' % (type2java(result), java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('%s a%d' % (param2java(param), i))
i = i + 1
java_native.write(')')
if (len(params) > 0 and param_type(params[0]) == CONTEXT) or name in NULLWrapped:
java_native.write(' throws Z3Exception')
java_native.write('\n')
java_native.write(' {\n')
java_native.write(' ')
if result != VOID:
java_native.write('%s res = ' % type2java(result))
java_native.write('INTERNAL%s(' % (java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('a%d' % i)
i = i + 1
java_native.write(');\n')
if name not in Unwrapped:
if name in NULLWrapped:
java_native.write(" if (res == 0)\n")
java_native.write(" throw new Z3Exception(\"Object allocation failed.\");\n")
else:
if len(params) > 0 and param_type(params[0]) == CONTEXT and name not in Unchecked:
java_native.write(' Z3_error_code err = Z3_error_code.fromInt(INTERNALgetErrorCode(a0));\n')
java_native.write(' if (err != Z3_error_code.Z3_OK)\n')
java_native.write(' throw new Z3Exception(INTERNALgetErrorMsg(a0, err.toInt()));\n')
if result != VOID:
java_native.write(' return res;\n')
java_native.write(' }\n\n')
java_native.write('}\n')
java_wrapper = open(java_wrapperf, 'w')
pkg_str = package_name.replace('.', '_')
java_wrapper.write("// Automatically generated file\n")
with open(java_src + "/NativeStatic.txt") as ins:
for line in ins:
java_wrapper.write(line)
for name, result, params in _dotnet_decls:
java_wrapper.write('DLL_VIS JNIEXPORT %s JNICALL Java_%s_Native_INTERNAL%s(JNIEnv * jenv, jclass cls' % (type2javaw(result), pkg_str, java_method_name(name)))
i = 0
for param in params:
java_wrapper.write(', ')
java_wrapper.write('%s a%d' % (param2javaw(param), i))
i = i + 1
java_wrapper.write(') {\n')
# preprocess arrays, strings, in/out arguments
i = 0
for param in params:
k = param_kind(param)
if k == OUT or k == INOUT:
java_wrapper.write(' %s _a%s;\n' % (type2str(param_type(param)), i))
elif k == IN_ARRAY or k == INOUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' %s * _a%s = (%s*) jenv->GetIntArrayElements(a%s, NULL);\n' % (type2str(param_type(param)), i, type2str(param_type(param)), i))
else:
java_wrapper.write(' GETLONGAELEMS(%s, a%s, _a%s);\n' % (type2str(param_type(param)), i, i))
elif k == OUT_ARRAY:
java_wrapper.write(' %s * _a%s = (%s *) malloc(((unsigned)a%s) * sizeof(%s));\n' % (type2str(param_type(param)),
i,
type2str(param_type(param)),
param_array_capacity_pos(param),
type2str(param_type(param))))
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->GetIntArrayRegion(a%s, 0, (jsize)a%s, (jint*)_a%s);\n' % (i, param_array_capacity_pos(param), i))
else:
java_wrapper.write(' GETLONGAREGION(%s, a%s, 0, a%s, _a%s);\n' % (type2str(param_type(param)), i, param_array_capacity_pos(param), i))
elif k == IN and param_type(param) == STRING:
java_wrapper.write(' Z3_string _a%s = (Z3_string) jenv->GetStringUTFChars(a%s, NULL);\n' % (i, i))
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write(' %s * _a%s = 0;\n' % (type2str(param_type(param)), i))
i = i + 1
# invoke procedure
java_wrapper.write(' ')
if result != VOID:
java_wrapper.write('%s result = ' % type2str(result))
java_wrapper.write('%s(' % name)
i = 0
first = True
for param in params:
if first:
first = False
else:
java_wrapper.write(', ')
k = param_kind(param)
if k == OUT or k == INOUT:
java_wrapper.write('&_a%s' % i)
elif k == OUT_ARRAY or k == IN_ARRAY or k == INOUT_ARRAY:
java_wrapper.write('_a%s' % i)
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write('&_a%s' % i)
elif k == IN and param_type(param) == STRING:
java_wrapper.write('_a%s' % i)
else:
java_wrapper.write('(%s)a%i' % (param2str(param), i))
i = i + 1
java_wrapper.write(');\n')
# cleanup
i = 0
for param in params:
k = param_kind(param)
if k == OUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->SetIntArrayRegion(a%s, 0, (jsize)a%s, (jint*)_a%s);\n' % (i, param_array_capacity_pos(param), i))
else:
java_wrapper.write(' SETLONGAREGION(a%s, 0, a%s, _a%s);\n' % (i, param_array_capacity_pos(param), i))
java_wrapper.write(' free(_a%s);\n' % i)
elif k == IN_ARRAY or k == OUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->ReleaseIntArrayElements(a%s, (jint*)_a%s, JNI_ABORT);\n' % (i, i))
else:
java_wrapper.write(' RELEASELONGAELEMS(a%s, _a%s);\n' % (i, i))
elif k == OUT or k == INOUT:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' {\n')
java_wrapper.write(' jclass mc = jenv->GetObjectClass(a%s);\n' % i)
java_wrapper.write(' jfieldID fid = jenv->GetFieldID(mc, "value", "I");\n')
java_wrapper.write(' jenv->SetIntField(a%s, fid, (jint) _a%s);\n' % (i, i))
java_wrapper.write(' }\n')
elif param_type(param) == STRING:
java_wrapper.write(' {\n')
java_wrapper.write(' jclass mc = jenv->GetObjectClass(a%s);\n' % i)
java_wrapper.write(' jfieldID fid = jenv->GetFieldID(mc, "value", "Ljava/lang/String;");')
java_wrapper.write(' jstring fval = jenv->NewStringUTF(_a%s);\n' % i)
java_wrapper.write(' jenv->SetObjectField(a%s, fid, fval);\n' % i)
java_wrapper.write(' }\n')
else:
java_wrapper.write(' {\n')
java_wrapper.write(' jclass mc = jenv->GetObjectClass(a%s);\n' % i)
java_wrapper.write(' jfieldID fid = jenv->GetFieldID(mc, "value", "J");\n')
java_wrapper.write(' jenv->SetLongField(a%s, fid, (jlong) _a%s);\n' % (i, i))
java_wrapper.write(' }\n')
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write(' *(jlong**)a%s = (jlong*)_a%s;\n' % (i, i))
elif k == IN and param_type(param) == STRING:
java_wrapper.write(' jenv->ReleaseStringUTFChars(a%s, _a%s);\n' % (i, i));
i = i + 1
# return
if result == STRING:
java_wrapper.write(' return jenv->NewStringUTF(result);\n')
elif result != VOID:
java_wrapper.write(' return (%s) result;\n' % type2javaw(result))
java_wrapper.write('}\n')
java_wrapper.write('#ifdef __cplusplus\n')
java_wrapper.write('}\n')
java_wrapper.write('#endif\n')
if is_verbose():
print("Generated '%s'" % java_nativef)
def mk_log_header(file, name, params):
file.write("void log_%s(" % name)
i = 0
for p in params:
if i > 0:
file.write(", ")
file.write("%s a%s" % (param2str(p), i))
i = i + 1
file.write(")")
# ---------------------------------
# Logging
def log_param(p):
kind = param_kind(p)
ty = param_type(p)
return is_obj(ty) and (kind == OUT or kind == INOUT or kind == OUT_ARRAY or kind == INOUT_ARRAY)
def log_result(result, params):
for p in params:
if log_param(p):
return True
return False
def mk_log_macro(file, name, params):
file.write("#define LOG_%s(" % name)
i = 0
for p in params:
if i > 0:
file.write(", ")
file.write("_ARG%s" % i)
i = i + 1
file.write(") z3_log_ctx _LOG_CTX; ")
auxs = set()
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
if cap not in auxs:
auxs.add(cap)
file.write("unsigned _Z3_UNUSED Z3ARG%s = 0; " % cap)
sz = param_array_size_pos(p)
if sz not in auxs:
auxs.add(sz)
file.write("unsigned * _Z3_UNUSED Z3ARG%s = 0; " % sz)
file.write("%s _Z3_UNUSED Z3ARG%s = 0; " % (param2str(p), i))
i = i + 1
file.write("if (_LOG_CTX.enabled()) { log_%s(" % name)
i = 0
for p in params:
if (i > 0):
file.write(', ')
file.write("_ARG%s" %i)
i = i + 1
file.write("); ")
auxs = set()
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
if cap not in auxs:
auxs.add(cap)
file.write("Z3ARG%s = _ARG%s; " % (cap, cap))
sz = param_array_size_pos(p)
if sz not in auxs:
auxs.add(sz)
file.write("Z3ARG%s = _ARG%s; " % (sz, sz))
file.write("Z3ARG%s = _ARG%s; " % (i, i))
i = i + 1
file.write("}\n")
def mk_log_result_macro(file, name, result, params):
file.write("#define RETURN_%s" % name)
if is_obj(result):
file.write("(Z3RES)")
file.write(" ")
file.write("if (_LOG_CTX.enabled()) { ")
if is_obj(result):
file.write("SetR(Z3RES); ")
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
sz = param_array_size_pos(p)
if cap == sz:
file.write("for (unsigned i = 0; i < Z3ARG%s; i++) { SetAO(Z3ARG%s[i], %s, i); } " % (sz, i, i))
else:
file.write("for (unsigned i = 0; Z3ARG%s && i < *Z3ARG%s; i++) { SetAO(Z3ARG%s[i], %s, i); } " % (sz, sz, i, i))
if kind == OUT or kind == INOUT:
file.write("SetO((Z3ARG%s == 0 ? 0 : *Z3ARG%s), %s); " % (i, i, i))
i = i + 1
file.write("} ")
if is_obj(result):
file.write("return Z3RES\n")
else:
file.write("return\n")
def mk_exec_header(file, name):
file.write("void exec_%s(z3_replayer & in)" % name)
def error(msg):
sys.stderr.write(msg)
exit(-1)
next_id = 0
API2Id = {}
def def_API(name, result, params):
global API2Id, next_id
global log_h, log_c
mk_py_binding(name, result, params)
reg_dotnet(name, result, params)
API2Id[next_id] = name
mk_log_header(log_h, name, params)
log_h.write(';\n')
mk_log_header(log_c, name, params)
log_c.write(' {\n R();\n')
mk_exec_header(exe_c, name)
exe_c.write(' {\n')
# Create Log function & Function call
i = 0
exe_c.write(" ")
if is_obj(result):
exe_c.write("%s result = " % type2str(result))
exe_c.write("%s(\n " % name)
for p in params:
kind = param_kind(p)
ty = param_type(p)
if (i > 0):
exe_c.write(",\n ")
if kind == IN:
if is_obj(ty):
log_c.write(" P(a%s);\n" % i)
exe_c.write("reinterpret_cast<%s>(in.get_obj(%s))" % (param2str(p), i))
elif ty == STRING:
log_c.write(" S(a%s);\n" % i)
exe_c.write("in.get_str(%s)" % i)
elif ty == SYMBOL:
log_c.write(" Sy(a%s);\n" % i)
exe_c.write("in.get_symbol(%s)" % i)
elif ty == UINT:
log_c.write(" U(a%s);\n" % i)
exe_c.write("in.get_uint(%s)" % i)
elif ty == UINT64:
log_c.write(" U(a%s);\n" % i)
exe_c.write("in.get_uint64(%s)" % i)
elif ty == INT:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_int(%s)" % i)
elif ty == INT64:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_int64(%s)" % i)
elif ty == DOUBLE:
log_c.write(" D(a%s);\n" % i)
exe_c.write("in.get_double(%s)" % i)
elif ty == FLOAT:
log_c.write(" D(a%s);\n" % i)
exe_c.write("in.get_float(%s)" % i)
elif ty == BOOL:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_bool(%s)" % i)
elif ty == VOID_PTR:
log_c.write(" P(0);\n")
exe_c.write("in.get_obj_addr(%s)" % i)
elif ty == LBOOL:
log_c.write(" I(static_cast<signed>(a%s));\n" % i)
exe_c.write("static_cast<%s>(in.get_int(%s))" % (type2str(ty), i))
elif ty == PRINT_MODE or ty == ERROR_CODE:
log_c.write(" U(static_cast<unsigned>(a%s));\n" % i)
exe_c.write("static_cast<%s>(in.get_uint(%s))" % (type2str(ty), i))
else:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == INOUT:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == OUT:
if is_obj(ty):
log_c.write(" P(0);\n")
exe_c.write("reinterpret_cast<%s>(in.get_obj_addr(%s))" % (param2str(p), i))
elif ty == STRING:
log_c.write(" S(\"\");\n")
exe_c.write("in.get_str_addr(%s)" % i)
elif ty == UINT:
log_c.write(" U(0);\n")
exe_c.write("in.get_uint_addr(%s)" % i)
elif ty == UINT64:
log_c.write(" U(0);\n")
exe_c.write("in.get_uint64_addr(%s)" % i)
elif ty == INT:
log_c.write(" I(0);\n")
exe_c.write("in.get_int_addr(%s)" % i)
elif ty == INT64:
log_c.write(" I(0);\n")
exe_c.write("in.get_int64_addr(%s)" % i)
elif ty == VOID_PTR:
log_c.write(" P(0);\n")
exe_c.write("in.get_obj_addr(%s)" % i)
else:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == IN_ARRAY or kind == INOUT_ARRAY:
sz = param_array_capacity_pos(p)
log_c.write(" for (unsigned i = 0; i < a%s; i++) { " % sz)
if is_obj(ty):
log_c.write("P(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Ap(a%s);\n" % sz)
exe_c.write("reinterpret_cast<%s*>(in.get_obj_array(%s))" % (type2str(ty), i))
elif ty == SYMBOL:
log_c.write("Sy(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Asy(a%s);\n" % sz)
exe_c.write("in.get_symbol_array(%s)" % i)
elif ty == UINT:
log_c.write("U(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Au(a%s);\n" % sz)
exe_c.write("in.get_uint_array(%s)" % i)
elif ty == INT:
log_c.write("I(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Ai(a%s);\n" % sz)
exe_c.write("in.get_int_array(%s)" % i)
elif ty == BOOL:
log_c.write("U(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Au(a%s);\n" % sz)
exe_c.write("in.get_bool_array(%s)" % i)
else:
error ("unsupported parameter for %s, %s, %s" % (ty, name, p))
elif kind == OUT_ARRAY:
sz = param_array_capacity_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if sz_p_k == OUT or sz_p_k == INOUT:
sz_e = ("(*a%s)" % sz)
else:
sz_e = ("a%s" % sz)
log_c.write(" for (unsigned i = 0; i < %s; i++) { " % sz_e)
if is_obj(ty):
log_c.write("P(0);")
log_c.write(" }\n")
log_c.write(" Ap(%s);\n" % sz_e)
exe_c.write("reinterpret_cast<%s*>(in.get_obj_array(%s))" % (tstr, i))
elif ty == UINT:
log_c.write("U(0);")
log_c.write(" }\n")
log_c.write(" Au(%s);\n" % sz_e)
exe_c.write("in.get_uint_array(%s)" % i)
else:
error ("unsupported parameter for %s, %s" % (name, p))
elif kind == OUT_MANAGED_ARRAY:
sz = param_array_size_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if sz_p_k == OUT or sz_p_k == INOUT:
sz_e = ("(*a%s)" % sz)
else:
sz_e = ("a%s" % sz)
log_c.write(" for (unsigned i = 0; i < %s; i++) { " % sz_e)
log_c.write("P(0);")
log_c.write(" }\n")
log_c.write(" Ap(%s);\n" % sz_e)
exe_c.write("reinterpret_cast<%s**>(in.get_obj_array(%s))" % (tstr, i))
elif kind == FN_PTR:
log_c.write("// P(a%s);\n" % i)
exe_c.write("reinterpret_cast<%s>(in.get_obj(%s))" % (param2str(p), i))
else:
error ("unsupported parameter for %s, %s" % (name, p))
i = i + 1
log_c.write(" C(%s);\n" % next_id)
exe_c.write(");\n")
if is_obj(result):
exe_c.write(" in.store_result(result);\n")
if name == 'Z3_mk_context' or name == 'Z3_mk_context_rc':
exe_c.write(" Z3_set_error_handler(result, Z3_replayer_error_handler);")
log_c.write('}\n')
exe_c.write('}\n')
mk_log_macro(log_h, name, params)
if log_result(result, params):
mk_log_result_macro(log_h, name, result, params)
next_id = next_id + 1
def mk_bindings(exe_c):
exe_c.write("void register_z3_replayer_cmds(z3_replayer & in) {\n")
for key, val in API2Id.items():
exe_c.write(" in.register_cmd(%s, exec_%s, \"%s\");\n" % (key, val, val))
exe_c.write("}\n")
def ml_method_name(name):
return name[3:] # Remove Z3_
def is_out_param(p):
if param_kind(p) == OUT or param_kind(p) == INOUT or param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT_MANAGED_ARRAY:
return True
else:
return False
def outparams(params):
op = []
for param in params:
if is_out_param(param):
op.append(param)
return op
def is_in_param(p):
if param_kind(p) == IN or param_kind(p) == INOUT or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY:
return True
else:
return False
def inparams(params):
ip = []
for param in params:
if is_in_param(param):
ip.append(param)
return ip
def is_array_param(p):
if param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT_ARRAY:
return True
else:
return False
def arrayparams(params):
op = []
for param in params:
if is_array_param(param):
op.append(param)
return op
def ml_plus_type(ts):
if ts == 'Z3_context':
return 'Z3_context_plus'
elif ts == 'Z3_ast' or ts == 'Z3_sort' or ts == 'Z3_func_decl' or ts == 'Z3_app' or ts == 'Z3_pattern':
return 'Z3_ast_plus'
elif ts == 'Z3_symbol':
return 'Z3_symbol_plus'
elif ts == 'Z3_constructor':
return 'Z3_constructor_plus'
elif ts == 'Z3_constructor_list':
return 'Z3_constructor_list_plus'
elif ts == 'Z3_rcf_num':
return 'Z3_rcf_num_plus'
elif ts == 'Z3_params':
return 'Z3_params_plus'
elif ts == 'Z3_param_descrs':
return 'Z3_param_descrs_plus'
elif ts == 'Z3_model':
return 'Z3_model_plus'
elif ts == 'Z3_func_interp':
return 'Z3_func_interp_plus'
elif ts == 'Z3_func_entry':
return 'Z3_func_entry_plus'
elif ts == 'Z3_goal':
return 'Z3_goal_plus'
elif ts == 'Z3_tactic':
return 'Z3_tactic_plus'
elif ts == 'Z3_simplifier':
return 'Z3_simplifier_plus'
elif ts == 'Z3_probe':
return 'Z3_probe_plus'
elif ts == 'Z3_apply_result':
return 'Z3_apply_result_plus'
elif ts == 'Z3_solver':
return 'Z3_solver_plus'
elif ts == 'Z3_stats':
return 'Z3_stats_plus'
elif ts == 'Z3_ast_vector':
return 'Z3_ast_vector_plus'
elif ts == 'Z3_ast_map':
return 'Z3_ast_map_plus'
elif ts == 'Z3_fixedpoint':
return 'Z3_fixedpoint_plus'
elif ts == 'Z3_optimize':
return 'Z3_optimize_plus'
else:
return ts
def ml_minus_type(ts):
if ts == 'Z3_ast' or ts == 'Z3_sort' or ts == 'Z3_func_decl' or ts == 'Z3_app' or ts == 'Z3_pattern':
return 'Z3_ast'
if ts == 'Z3_ast_plus' or ts == 'Z3_sort_plus' or ts == 'Z3_func_decl_plus' or ts == 'Z3_app_plus' or ts == 'Z3_pattern_plus':
return 'Z3_ast'
elif ts == 'Z3_constructor_plus':
return 'Z3_constructor'
elif ts == 'Z3_constructor_list_plus':
return 'Z3_constructor_list'
elif ts == 'Z3_rcf_num_plus':
return 'Z3_rcf_num'
elif ts == 'Z3_params_plus':
return 'Z3_params'
elif ts == 'Z3_param_descrs_plus':
return 'Z3_param_descrs'
elif ts == 'Z3_model_plus':
return 'Z3_model'
elif ts == 'Z3_func_interp_plus':
return 'Z3_func_interp'
elif ts == 'Z3_func_entry_plus':
return 'Z3_func_entry'
elif ts == 'Z3_goal_plus':
return 'Z3_goal'
elif ts == 'Z3_tactic_plus':
return 'Z3_tactic'
elif ts == 'Z3_simplifier_plus':
return 'Z3_simplifier'
elif ts == 'Z3_probe_plus':
return 'Z3_probe'
elif ts == 'Z3_apply_result_plus':
return 'Z3_apply_result'
elif ts == 'Z3_solver_plus':
return 'Z3_solver'
elif ts == 'Z3_stats_plus':
return 'Z3_stats'
elif ts == 'Z3_ast_vector_plus':
return 'Z3_ast_vector'
elif ts == 'Z3_ast_map_plus':
return 'Z3_ast_map'
elif ts == 'Z3_fixedpoint_plus':
return 'Z3_fixedpoint'
elif ts == 'Z3_optimize_plus':
return 'Z3_optimize'
else:
return ts
def ml_plus_type_raw(ts):
if ml_has_plus_type(ts):
return ml_plus_type(ts) + '_raw';
else:
return ts
def ml_plus_ops_type(ts):
if ml_has_plus_type(ts):
return ml_plus_type(ts) + '_custom_ops'
else:
return 'default_custom_ops'
def ml_has_plus_type(ts):
return ts != ml_plus_type(ts)
def ml_unwrap(t, ts, s):
if t == STRING:
return '(' + ts + ') String_val(' + s + ')'
elif t == BOOL or (type2str(t) == 'bool'):
return '(' + ts + ') Bool_val(' + s + ')'
elif t == INT or t == PRINT_MODE or t == ERROR_CODE or t == LBOOL:
return '(' + ts + ') Int_val(' + s + ')'
elif t == UINT:
return '(' + ts + ') Unsigned_int_val(' + s + ')'
elif t == INT64:
return '(' + ts + ') Int64_val(' + s + ')'
elif t == UINT64:
return '(' + ts + ') Int64_val(' + s + ')'
elif t == DOUBLE:
return '(' + ts + ') Double_val(' + s + ')'
elif ml_has_plus_type(ts):
pts = ml_plus_type(ts)
return '(' + ts + ') ' + ml_plus_type_raw(ts) + '((' + pts + '*) Data_custom_val(' + s + '))'
else:
return '* ((' + ts + '*) Data_custom_val(' + s + '))'
def ml_set_wrap(t, d, n):
if t == VOID:
return d + ' = Val_unit;'
elif t == BOOL or (type2str(t) == 'bool'):
return d + ' = Val_bool(' + n + ');'
elif t == INT or t == UINT or t == PRINT_MODE or t == ERROR_CODE or t == LBOOL:
return d + ' = Val_int(' + n + ');'
elif t == INT64 or t == UINT64:
return d + ' = caml_copy_int64(' + n + ');'
elif t == DOUBLE:
return d + '= caml_copy_double(' + n + ');'
elif t == STRING:
return d + ' = caml_copy_string((const char*) ' + n + ');'
else:
pts = ml_plus_type(type2str(t))
return '*(' + pts + '*)Data_custom_val(' + d + ') = ' + n + ';'
def ml_alloc_and_store(t, lhs, rhs):
if t == VOID or t == BOOL or t == INT or t == UINT or t == PRINT_MODE or t == ERROR_CODE or t == INT64 or t == UINT64 or t == DOUBLE or t == STRING or t == LBOOL or (type2str(t) == 'bool'):
return ml_set_wrap(t, lhs, rhs)
else:
pts = ml_plus_type(type2str(t))
pops = ml_plus_ops_type(type2str(t))
alloc_str = '%s = caml_alloc_custom(&%s, sizeof(%s), 0, 1); ' % (lhs, pops, pts)
return alloc_str + ml_set_wrap(t, lhs, rhs)
z3_long_funs = frozenset([
'Z3_solver_check',
'Z3_solver_check_assumptions',
'Z3_simplify',
'Z3_simplify_ex',
])
z3_ml_overrides = frozenset([
'Z3_mk_config'])
z3_ml_callbacks = frozenset([
'Z3_solver_propagate_init',
'Z3_solver_propagate_fixed',
'Z3_solver_propagate_final',
'Z3_solver_propagate_eq',
'Z3_solver_propagate_diseq',
'Z3_solver_propagate_created',
'Z3_solver_propagate_decide',
'Z3_solver_register_on_clause'
])
def mk_ml(ml_src_dir, ml_output_dir):
global Type2Str
ml_nativef = os.path.join(ml_output_dir, 'z3native.ml')
ml_native = open(ml_nativef, 'w')
ml_native.write('(* Automatically generated file *)\n\n')
ml_pref = open(os.path.join(ml_src_dir, 'z3native.ml.pre'), 'r')
for s in ml_pref:
ml_native.write(s);
ml_pref.close()
ml_native.write('\n')
for name, result, params in _dotnet_decls:
if name in z3_ml_callbacks:
continue
ml_native.write('external %s : ' % ml_method_name(name))
ip = inparams(params)
op = outparams(params)
if len(ip) == 0:
ml_native.write(' unit -> ')
for p in ip:
ml_native.write('%s -> ' % param2ml(p))
if len(op) > 0:
ml_native.write('(')
first = True
if result != VOID or len(op) == 0:
ml_native.write('%s' % type2ml(result))
first = False
for p in op:
if first:
first = False
else:
ml_native.write(' * ')
ml_native.write('%s' % param2ml(p))
if len(op) > 0:
ml_native.write(')')
if len(ip) > 5:
ml_native.write(' = "n_%s_bytecode" "n_%s"\n' % (ml_method_name(name), ml_method_name(name)))
else:
ml_native.write(' = "n_%s"\n' % ml_method_name(name))
ml_native.write('\n')
# null pointer helpers
for type_id in Type2Str:
type_name = Type2Str[type_id]
if ml_has_plus_type(type_name) and not type_name in ['Z3_context', 'Z3_sort', 'Z3_func_decl', 'Z3_app', 'Z3_pattern']:
ml_name = type2ml(type_id)
ml_native.write('external context_of_%s : %s -> context = "n_context_of_%s"\n' % (ml_name, ml_name, ml_name))
ml_native.write('external is_null_%s : %s -> bool = "n_is_null_%s"\n' % (ml_name, ml_name, ml_name))
ml_native.write('external mk_null_%s : context -> %s = "n_mk_null_%s"\n\n' % (ml_name, ml_name, ml_name))
ml_native.write('(**/**)\n')
ml_native.close()
if is_verbose():
print ('Generated "%s"' % ml_nativef)
mk_z3native_stubs_c(ml_src_dir, ml_output_dir)
def mk_z3native_stubs_c(ml_src_dir, ml_output_dir): # C interface
ml_wrapperf = os.path.join(ml_output_dir, 'z3native_stubs.c')
ml_wrapper = open(ml_wrapperf, 'w')
ml_wrapper.write('// Automatically generated file\n\n')
ml_pref = open(os.path.join(ml_src_dir, 'z3native_stubs.c.pre'), 'r')
for s in ml_pref:
ml_wrapper.write(s);
ml_pref.close()
for name, result, params in _dotnet_decls:
if name in z3_ml_overrides:
continue
if name in z3_ml_callbacks:
continue
ip = inparams(params)
op = outparams(params)
ap = arrayparams(params)
ret_size = len(op)
if result != VOID:
ret_size = ret_size + 1
# Setup frame
ml_wrapper.write('CAMLprim DLL_PUBLIC value n_%s(' % ml_method_name(name))
first = True
i = 0
for p in params:
if is_in_param(p):
if first:
first = False
else:
ml_wrapper.write(', ')
ml_wrapper.write('value a%d' % i)
i = i + 1
ml_wrapper.write(') {\n')
ml_wrapper.write(' CAMLparam%d(' % len(ip))
i = 0
first = True
for p in params:
if is_in_param(p):
if first:
first = False
else:
ml_wrapper.write(', ')
ml_wrapper.write('a%d' % i)
i = i + 1
ml_wrapper.write(');\n')
i = 0
if len(op) + len(ap) == 0:
ml_wrapper.write(' CAMLlocal1(result);\n')
else:
c = 0
needs_tmp_value = False
for p in params:
if is_out_param(p) or is_array_param(p):
c = c + 1
needs_tmp_value = needs_tmp_value or param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY
if needs_tmp_value:
c = c + 1
if len(ap) > 0:
c = c + 1
ml_wrapper.write(' CAMLlocal%s(result, z3rv_val' % (c+2))
for p in params:
if is_out_param(p) or is_array_param(p):
ml_wrapper.write(', _a%s_val' % i)
i = i + 1
if needs_tmp_value:
ml_wrapper.write(', tmp_val')
if len(ap) != 0:
ml_wrapper.write(', _iter');
ml_wrapper.write(');\n')
if len(ap) > 0:
ml_wrapper.write(' unsigned _i;\n')
# determine if the function has a context as parameter.
have_context = (len(params) > 0) and (param_type(params[0]) == CONTEXT)
if have_context and name not in Unwrapped and name not in Unchecked:
ml_wrapper.write(' Z3_error_code ec;\n')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' %s z3rv_m;\n' % ts)
ml_wrapper.write(' %s z3rv;\n' % pts)
else:
ml_wrapper.write(' %s z3rv;\n' % ts)
# declare all required local variables
# To comply with C89, we need to first declare the variables and initialize them
# only afterwards.
i = 0
for param in params:
if param_type(param) == CONTEXT and i == 0:
ml_wrapper.write(' Z3_context_plus ctx_p;\n')
ml_wrapper.write(' Z3_context _a0;\n')
else:
k = param_kind(param)
if k == OUT_ARRAY:
ml_wrapper.write(' %s * _a%s;\n' % (type2str(param_type(param)), i))
elif k == OUT_MANAGED_ARRAY:
ml_wrapper.write(' %s * _a%s;\n' % (type2str(param_type(param)), i))
elif k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' %s * _a%s;\n' % (ts, i))
elif k == IN:
t = param_type(param)
ml_wrapper.write(' %s _a%s;\n' % (type2str(t), i))
elif k == OUT or k == INOUT:
t = param_type(param)
ml_wrapper.write(' %s _a%s;\n' % (type2str(t), i))
ts = type2str(t)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' %s _a%dp;\n' % (pts, i))
i = i + 1
# End of variable declarations in outermost block:
# To comply with C89, no variable declarations may occur in the outermost block
# from that point onwards (breaks builds with at least VC 2012 and prior)
ml_wrapper.write('\n')
# Declare locals, preprocess arrays, strings, in/out arguments
i = 0
for param in params:
if param_type(param) == CONTEXT and i == 0:
ml_wrapper.write(' ctx_p = *(Z3_context_plus*) Data_custom_val(a' + str(i) + ');\n')
ml_wrapper.write(' _a0 = ctx_p->ctx;\n')
else:
k = param_kind(param)
if k == OUT_ARRAY:
ml_wrapper.write(' _a%s = (%s*) malloc(sizeof(%s) * (_a%s));\n' % (
i,
type2str(param_type(param)),
type2str(param_type(param)),
param_array_capacity_pos(param)))
elif k == OUT_MANAGED_ARRAY:
ml_wrapper.write(' _a%s = 0;\n' % i)
elif k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' _a%s = (%s*) malloc(sizeof(%s) * _a%s);\n' % (i, ts, ts, param_array_capacity_pos(param)))
elif k == IN:
t = param_type(param)
ml_wrapper.write(' _a%s = %s;\n' % (i, ml_unwrap(t, type2str(t), 'a' + str(i))))
i = i + 1
i = 0
for param in params:
k = param_kind(param)
if k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' _iter = a' + str(i) + ';\n')
ml_wrapper.write(' for (_i = 0; _i < _a%s; _i++) {\n' % param_array_capacity_pos(param))
ml_wrapper.write(' assert(_iter != Val_emptylist);\n')
ml_wrapper.write(' _a%s[_i] = %s;\n' % (i, ml_unwrap(t, ts, 'Field(_iter, 0)')))
ml_wrapper.write(' _iter = Field(_iter, 1);\n')
ml_wrapper.write(' }\n')
ml_wrapper.write(' assert(_iter == Val_emptylist);\n\n')
i = i + 1
release_caml_gc= name in z3_long_funs
if release_caml_gc:
ml_wrapper.write('\n caml_release_runtime_system();\n')
ml_wrapper.write('\n /* invoke Z3 function */\n ')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
ml_wrapper.write('z3rv_m = ')
else:
ml_wrapper.write('z3rv = ')
# invoke procedure
ml_wrapper.write('%s(' % name)
i = 0
first = True
for param in params:
if first:
first = False
else:
ml_wrapper.write(', ')
k = param_kind(param)
if k == OUT or k == INOUT or k == OUT_MANAGED_ARRAY:
ml_wrapper.write('&_a%s' % i)
else:
ml_wrapper.write('_a%i' % i)
i = i + 1
ml_wrapper.write(');\n')
if name in NULLWrapped:
ml_wrapper.write(' if (z3rv_m == NULL) {\n')
ml_wrapper.write(' caml_raise_with_string(*caml_named_value("Z3EXCEPTION"), "Object allocation failed");\n')
ml_wrapper.write(' }\n')
if release_caml_gc:
ml_wrapper.write('\n caml_acquire_runtime_system();\n')
if have_context and name not in Unwrapped and name not in Unchecked:
ml_wrapper.write(' ec = Z3_get_error_code(ctx_p->ctx);\n')
ml_wrapper.write(' if (ec != Z3_OK) {\n')
ml_wrapper.write(' const char * msg = Z3_get_error_msg(ctx_p->ctx, ec);\n')
ml_wrapper.write(' caml_raise_with_string(*caml_named_value("Z3EXCEPTION"), msg);\n')
ml_wrapper.write(' }\n')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
if name in NULLWrapped:
ml_wrapper.write(' z3rv = %s_mk(z3rv_m);\n' % pts)
else:
ml_wrapper.write(' z3rv = %s_mk(ctx_p, (%s) z3rv_m);\n' % (pts, ml_minus_type(ts)))
# convert output params
if len(op) > 0:
# we have output parameters (i.e. call-by-reference arguments to the Z3 native
# code function). Hence, the value returned by the OCaml native wrapper is a tuple
# which contains the Z3 native function's return value (if it is non-void) in its
# first and the output parameters in the following components.
ml_wrapper.write('\n /* construct return tuple */\n')
ml_wrapper.write(' result = caml_alloc(%s, 0);\n' % ret_size)
i = 0
for p in params:
pt = param_type(p)
ts = type2str(pt)
if param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY:
# convert a C-array into an OCaml list and return it
ml_wrapper.write('\n _a%s_val = Val_emptylist;\n' % i)
ml_wrapper.write(' for (_i = _a%s; _i > 0; _i--) {\n' % param_array_capacity_pos(p))
pts = ml_plus_type(ts)
pops = ml_plus_ops_type(ts)
if ml_has_plus_type(ts):
ml_wrapper.write(' %s _a%dp = %s_mk(ctx_p, (%s) _a%d[_i - 1]);\n' % (pts, i, pts, ml_minus_type(ts), i))
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, 'tmp_val', '_a%dp' % i))
else:
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, 'tmp_val', '_a%d[_i - 1]' % i))
ml_wrapper.write(' _iter = caml_alloc(2,0);\n')
ml_wrapper.write(' Store_field(_iter, 0, tmp_val);\n')
ml_wrapper.write(' Store_field(_iter, 1, _a%s_val);\n' % i)
ml_wrapper.write(' _a%s_val = _iter;\n' % i)
ml_wrapper.write(' }\n\n')
elif param_kind(p) == OUT_MANAGED_ARRAY:
wrp = ml_set_wrap(pt, '_a%d_val' % i, '_a%d' % i)
wrp = wrp.replace('*)', '**)')
wrp = wrp.replace('_plus', '')
ml_wrapper.write(' %s\n' % wrp)
elif is_out_param(p):
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' _a%dp = %s_mk(ctx_p, (%s) _a%d);\n' % (i, pts, ml_minus_type(ts), i))
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, '_a%d_val' % i, '_a%dp' % i))
else:
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, '_a%d_val' % i, '_a%d' % i))
i = i + 1
# return tuples
i = j = 0
if result != VOID:
ml_wrapper.write(' %s' % ml_alloc_and_store(result, 'z3rv_val', 'z3rv'))
ml_wrapper.write(' Store_field(result, 0, z3rv_val);\n')
j = j + 1
for p in params:
if is_out_param(p):
ml_wrapper.write(' Store_field(result, %s, _a%s_val);\n' % (j, i))
j = j + 1
i = i + 1
else:
# As we have no output parameters, we simply return the result
ml_wrapper.write('\n /* construct simple return value */\n')
ml_wrapper.write(' %s' % ml_alloc_and_store(result, "result", "z3rv"))
# local array cleanup
ml_wrapper.write('\n /* cleanup and return */\n')
i = 0
for p in params:
k = param_kind(p)
if k == OUT_ARRAY or k == IN_ARRAY or k == INOUT_ARRAY:
ml_wrapper.write(' free(_a%s);\n' % i)
i = i + 1
# return
ml_wrapper.write(' CAMLreturn(result);\n')
ml_wrapper.write('}\n\n')
if len(ip) > 5:
ml_wrapper.write('CAMLprim DLL_PUBLIC value n_%s_bytecode(value * argv, int argn) {\n' % ml_method_name(name))
ml_wrapper.write(' return n_%s(' % ml_method_name(name))
i = 0
while i < len(ip):
if i == 0:
ml_wrapper.write('argv[0]')
else:
ml_wrapper.write(', argv[%s]' % i)
i = i + 1
ml_wrapper.write(');\n}\n')
ml_wrapper.write('\n\n')
ml_wrapper.write('#ifdef __cplusplus\n')
ml_wrapper.write('}\n')
ml_wrapper.write('#endif\n')
if is_verbose():
print ('Generated "%s"' % ml_wrapperf)
# Collect API(...) commands from
def def_APIs(api_files):
pat1 = re.compile(" *def_API.*")
pat2 = re.compile(" *extra_API.*")
for api_file in api_files:
api = open(api_file, 'r')
for line in api:
line = line.strip('\r\n\t ')
try:
m = pat1.match(line)
if m:
eval(line)
m = pat2.match(line)
if m:
eval(line)
except Exception as e:
error('ERROR: While processing: %s: %s\n' % (e, line))
def write_log_h_preamble(log_h):
log_h.write('// Automatically generated file\n')
log_h.write('#include\"api/z3.h\"\n')
log_h.write('#ifdef __GNUC__\n')
log_h.write('#define _Z3_UNUSED __attribute__((unused))\n')
log_h.write('#else\n')
log_h.write('#define _Z3_UNUSED\n')
log_h.write('#endif\n')
#
log_h.write('#include "util/mutex.h"\n')
log_h.write('extern atomic<bool> g_z3_log_enabled;\n')
log_h.write('void ctx_enable_logging();\n')
log_h.write('class z3_log_ctx { bool m_prev; public: z3_log_ctx() { ATOMIC_EXCHANGE(m_prev, g_z3_log_enabled, false); } ~z3_log_ctx() { if (m_prev) g_z3_log_enabled = true; } bool enabled() const { return m_prev; } };\n')
log_h.write('void SetR(void * obj);\nvoid SetO(void * obj, unsigned pos);\nvoid SetAO(void * obj, unsigned pos, unsigned idx);\n')
log_h.write('#define RETURN_Z3(Z3RES) do { auto tmp_ret = Z3RES; if (_LOG_CTX.enabled()) { SetR(tmp_ret); } return tmp_ret; } while (0)\n')
def write_log_c_preamble(log_c):
log_c.write('// Automatically generated file\n')
log_c.write('#include\"api/z3.h\"\n')
log_c.write('#include\"api/api_log_macros.h\"\n')
log_c.write('#include\"api/z3_logger.h\"\n')
def write_exe_c_preamble(exe_c):
exe_c.write('// Automatically generated file\n')
exe_c.write('#include\"api/z3.h\"\n')
exe_c.write('#include\"api/z3_replayer.h\"\n')
#
exe_c.write('void Z3_replayer_error_handler(Z3_context ctx, Z3_error_code c) { printf("[REPLAYER ERROR HANDLER]: %s\\n", Z3_get_error_msg(ctx, c)); }\n')
def write_core_py_post(core_py):
core_py.write("""
# Clean up
del _lib
del _default_dirs
del _all_dirs
del _ext
""")
def write_core_py_preamble(core_py):
core_py.write(
"""
# Automatically generated file
import sys, os
import ctypes
import pkg_resources
from .z3types import *
from .z3consts import *
_ext = 'dll' if sys.platform in ('win32', 'cygwin') else 'dylib' if sys.platform == 'darwin' else 'so'
_lib = None
_default_dirs = ['.',
os.path.dirname(os.path.abspath(__file__)),
pkg_resources.resource_filename('z3', 'lib'),
os.path.join(sys.prefix, 'lib'),
None]
_all_dirs = []
# search the default dirs first
_all_dirs.extend(_default_dirs)
if sys.version < '3':
import __builtin__
if hasattr(__builtin__, "Z3_LIB_DIRS"):
_all_dirs = __builtin__.Z3_LIB_DIRS
else:
import builtins
if hasattr(builtins, "Z3_LIB_DIRS"):
_all_dirs = builtins.Z3_LIB_DIRS
for v in ('Z3_LIBRARY_PATH', 'PATH', 'PYTHONPATH'):
if v in os.environ:
lp = os.environ[v];
lds = lp.split(';') if sys.platform in ('win32') else lp.split(':')
_all_dirs.extend(lds)
_failures = []
for d in _all_dirs:
try:
d = os.path.realpath(d)
if os.path.isdir(d):
d = os.path.join(d, 'libz3.%s' % _ext)
if os.path.isfile(d):
_lib = ctypes.CDLL(d)
break
except Exception as e:
_failures += [e]
pass
if _lib is None:
# If all else failed, ask the system to find it.
try:
_lib = ctypes.CDLL('libz3.%s' % _ext)
except Exception as e:
_failures += [e]
pass
if _lib is None:
print("Could not find libz3.%s; consider adding the directory containing it to" % _ext)
print(" - your system's PATH environment variable,")
print(" - the Z3_LIBRARY_PATH environment variable, or ")
print(" - to the custom Z3_LIB_DIRS Python-builtin before importing the z3 module, e.g. via")
if sys.version < '3':
print(" import __builtin__")
print(" __builtin__.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
else:
print(" import builtins")
print(" builtins.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
print(_failures)
raise Z3Exception("libz3.%s not found." % _ext)
if sys.version < '3':
def _str_to_bytes(s):
return s
def _to_pystr(s):
return s
else:
def _str_to_bytes(s):
if isinstance(s, str):
enc = sys.getdefaultencoding()
return s.encode(enc if enc != None else 'latin-1')
else:
return s
def _to_pystr(s):
if s != None:
enc = sys.getdefaultencoding()
return s.decode(enc if enc != None else 'latin-1')
else:
return ""
_error_handler_type = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_uint)
_lib.Z3_set_error_handler.restype = None
_lib.Z3_set_error_handler.argtypes = [ContextObj, _error_handler_type]
Z3_on_clause_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Z3_push_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
Z3_pop_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint)
Z3_fresh_eh = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Z3_fixed_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Z3_final_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
Z3_eq_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Z3_created_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Z3_decide_eh = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int)
_lib.Z3_solver_register_on_clause.restype = None
_lib.Z3_solver_propagate_init.restype = None
_lib.Z3_solver_propagate_final.restype = None
_lib.Z3_solver_propagate_fixed.restype = None
_lib.Z3_solver_propagate_eq.restype = None
_lib.Z3_solver_propagate_diseq.restype = None
_lib.Z3_solver_propagate_decide.restype = None
on_model_eh_type = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_lib.Z3_optimize_register_model_eh.restype = None
_lib.Z3_optimize_register_model_eh.argtypes = [ContextObj, OptimizeObj, ModelObj, ctypes.c_void_p, on_model_eh_type]
"""
)
log_h = None
log_c = None
exe_c = None
core_py = None
# FIXME: This can only be called once from this module
# due to its use of global state!
def generate_files(api_files,
api_output_dir=None,
z3py_output_dir=None,
dotnet_output_dir=None,
java_input_dir=None,
java_output_dir=None,
java_package_name=None,
ml_output_dir=None,
ml_src_dir=None):
"""
Scan the api files in ``api_files`` and emit the relevant API files into
the output directories specified. If an output directory is set to ``None``
then the files for that language binding or module are not emitted.
The reason for this function interface is:
* The CMake build system needs to control where
files are emitted.
* The CMake build system needs to be able to choose
which API files are emitted.
* This function should be as decoupled from the Python
build system as much as possible but it must be possible
for the Python build system code to use this function.
Therefore we:
* Do not use the ``mk_util.is_*_enabled()`` functions
to determine if certain files should be or should not be emitted.
* Do not use the components declared in the Python build system
to determine the output directory paths.
"""
# FIXME: These should not be global
global log_h, log_c, exe_c, core_py
assert isinstance(api_files, list)
# Hack: Avoid emitting files when we don't want them
# by writing to temporary files that get deleted when
# closed. This allows us to work around the fact that
# existing code is designed to always emit these files.
def mk_file_or_temp(output_dir, file_name, mode='w'):
if output_dir != None:
assert os.path.exists(output_dir) and os.path.isdir(output_dir)
return open(os.path.join(output_dir, file_name), mode)
else:
# Return a file that we can write to without caring
print("Faking emission of '{}'".format(file_name))
import tempfile
return tempfile.TemporaryFile(mode=mode)
apiTypes = APITypes()
with mk_file_or_temp(api_output_dir, 'api_log_macros.h') as log_h:
with mk_file_or_temp(api_output_dir, 'api_log_macros.cpp') as log_c:
with mk_file_or_temp(api_output_dir, 'api_commands.cpp') as exe_c:
with mk_file_or_temp(z3py_output_dir, os.path.join('z3', 'z3core.py')) as core_py:
# Write preambles
write_log_h_preamble(log_h)
write_log_c_preamble(log_c)
write_exe_c_preamble(exe_c)
write_core_py_preamble(core_py)
# FIXME: these functions are awful
apiTypes.def_Types(api_files)
def_APIs(api_files)
mk_bindings(exe_c)
mk_py_wrappers()
write_core_py_post(core_py)
if is_verbose():
print("Generated '{}'".format(log_h.name))
print("Generated '{}'".format(log_c.name))
print("Generated '{}'".format(exe_c.name))
print("Generated '{}'".format(core_py.name))
if dotnet_output_dir:
with open(os.path.join(dotnet_output_dir, 'Native.cs'), 'w') as dotnet_file:
mk_dotnet(dotnet_file)
mk_dotnet_wrappers(dotnet_file)
if is_verbose():
print("Generated '{}'".format(dotnet_file.name))
if java_output_dir:
mk_java(java_input_dir, java_output_dir, java_package_name)
if ml_output_dir:
assert not ml_src_dir is None
mk_ml(ml_src_dir, ml_output_dir)
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("api_files",
nargs="+",
help="API header files to generate files from")
parser.add_argument("--api_output_dir",
default=None,
help="Directory to emit files for api module. If not specified no files are emitted.")
parser.add_argument("--z3py-output-dir",
dest="z3py_output_dir",
default=None,
help="Directory to emit z3py files. If not specified no files are emitted.")
parser.add_argument("--dotnet-output-dir",
dest="dotnet_output_dir",
default=None,
help="Directory to emit dotnet files. If not specified no files are emitted.")
parser.add_argument("--java-input-dir",
dest="java_input_dir",
default=None,
help="Directory where Java sources reside.")
parser.add_argument("--java-output-dir",
dest="java_output_dir",
default=None,
help="Directory to emit Java files. If not specified no files are emitted.")
parser.add_argument("--java-package-name",
dest="java_package_name",
default=None,
help="Name to give the Java package (e.g. ``com.microsoft.z3``).")
parser.add_argument("--ml-src-dir",
dest="ml_src_dir",
default=None,
help="Directory containing OCaml source files. If not specified no files are emitted")
parser.add_argument("--ml-output-dir",
dest="ml_output_dir",
default=None,
help="Directory to emit OCaml files. If not specified no files are emitted.")
pargs = parser.parse_args(args)
if pargs.java_output_dir:
if pargs.java_package_name == None:
logging.error('--java-package-name must be specified')
return 1
if pargs.java_input_dir is None:
logging.error('--java-input-dir must be specified')
return 1
if pargs.ml_output_dir:
if pargs.ml_src_dir is None:
logging.error('--ml-src-dir must be specified')
return 1
for api_file in pargs.api_files:
if not os.path.exists(api_file):
logging.error('"{}" does not exist'.format(api_file))
return 1
generate_files(api_files=pargs.api_files,
api_output_dir=pargs.api_output_dir,
z3py_output_dir=pargs.z3py_output_dir,
dotnet_output_dir=pargs.dotnet_output_dir,
java_input_dir=pargs.java_input_dir,
java_output_dir=pargs.java_output_dir,
java_package_name=pargs.java_package_name,
ml_output_dir=pargs.ml_output_dir,
ml_src_dir=pargs.ml_src_dir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 81,103 | 37.347045 | 223 |
py
|
z3
|
z3-master/scripts/mk_pat_db.py
|
# -- /usr/bin/env python
"""
Reads a pattern database and generates the corresponding
header file.
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("db_file", help="pattern database file")
parser.add_argument("output_file", help="output header file path")
pargs = parser.parse_args(args)
if not os.path.exists(pargs.db_file):
logging.error('"{}" does not exist'.format(pargs.db_file))
return 1
mk_genfile_common.mk_pat_db_internal(pargs.db_file, pargs.output_file)
logging.info('Generated "{}"'.format(pargs.output_file))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 810 | 26.033333 | 74 |
py
|
z3
|
z3-master/scripts/update_include.py
|
# Copyright (c) 2017 Microsoft Corporation
import os
import re
is_include = re.compile("#include \"(.*)\"")
is_include2 = re.compile("#include\"(.*)\"")
def fix_include(file, paths):
tmp = "%s.tmp" % file
ins = open(file)
ous = open(tmp,'w')
line = ins.readline()
found = False
while line:
m = is_include.search(line)
if m and m.group(1) in paths:
ous.write("#include \"")
ous.write(paths[m.group(1)])
ous.write("\"\n")
found = True
line = ins.readline()
continue
m = is_include2.search(line)
if m and m.group(1) in paths:
ous.write("#include \"")
ous.write(paths[m.group(1)])
ous.write("\"\n")
found = True
line = ins.readline()
continue
ous.write(line)
line = ins.readline()
ins.close()
ous.close()
if found:
print(file)
os.system("move %s %s" % (tmp, file))
else:
os.system("del %s" % tmp)
def find_paths(dir):
paths = {}
for root, dirs, files in os.walk(dir):
root1 = root.replace("\\","/")[4:]
for f in files:
if f.endswith('.h') or f.endswith('.hpp') or f.endswith('.cpp'):
path = "%s/%s" % (root1, f)
paths[f] = path
if f.endswith('.pyg'):
f = f.replace("pyg","hpp")
path = "%s/%s" % (root1, f)
paths[f] = path
return paths
paths = find_paths('src')
def fixup(dir):
for root, dirs, files in os.walk(dir):
for f in files:
if f == "z3.h":
continue
if f.endswith('.h') or f.endswith('.cpp'):
path = "%s\\%s" % (root, f)
fix_include(path, paths)
fixup('src')
| 1,855 | 25.514286 | 76 |
py
|
z3
|
z3-master/scripts/mk_consts_files.py
|
# -- /usr/bin/env python
"""
Reads a list of Z3 API header files and
generate the constant declarations need
by one or more Z3 language bindings
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("api_files", nargs="+")
parser.add_argument("--z3py-output-dir", dest="z3py_output_dir", default=None)
parser.add_argument("--dotnet-output-dir", dest="dotnet_output_dir", default=None)
parser.add_argument("--java-output-dir", dest="java_output_dir", default=None)
parser.add_argument("--java-package-name",
dest="java_package_name",
default=None,
help="Name to give the Java package (e.g. ``com.microsoft.z3``).")
parser.add_argument("--ml-output-dir", dest="ml_output_dir", default=None)
pargs = parser.parse_args(args)
if not mk_genfile_common.check_files_exist(pargs.api_files):
logging.error('One or more API files do not exist')
return 1
count = 0
if pargs.z3py_output_dir:
if not mk_genfile_common.check_dir_exists(pargs.z3py_output_dir):
return 1
output = mk_genfile_common.mk_z3consts_py_internal(pargs.api_files, pargs.z3py_output_dir)
logging.info('Generated "{}"'.format(output))
count += 1
if pargs.dotnet_output_dir:
if not mk_genfile_common.check_dir_exists(pargs.dotnet_output_dir):
return 1
output = mk_genfile_common.mk_z3consts_dotnet_internal(
pargs.api_files,
pargs.dotnet_output_dir)
logging.info('Generated "{}"'.format(output))
count += 1
if pargs.java_output_dir:
if pargs.java_package_name == None:
logging.error('Java package name must be specified')
return 1
if not mk_genfile_common.check_dir_exists(pargs.java_output_dir):
return 1
outputs = mk_genfile_common.mk_z3consts_java_internal(
pargs.api_files,
pargs.java_package_name,
pargs.java_output_dir)
for generated_file in outputs:
logging.info('Generated "{}"'.format(generated_file))
count += 1
if pargs.ml_output_dir:
if not mk_genfile_common.check_dir_exists(pargs.ml_output_dir):
return 1
output = mk_genfile_common.mk_z3consts_ml_internal(
pargs.api_files,
pargs.ml_output_dir)
logging.info('Generated "{}"'.format(output))
count += 1
if count == 0:
logging.info('No files generated. You need to specific an output directory'
' for the relevant language bindings')
# TODO: Add support for other bindings
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2,919 | 35.049383 | 98 |
py
|
z3
|
z3-master/scripts/mk_install_tactic_cpp.py
|
# -- /usr/bin/env python
"""
Determines the available tactics from a list of header files and generates a
``install_tactic.cpp`` file in the destination directory that defines a
function ``void install_tactics(tactic_manager& ctx)``.
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("destination_dir", help="destination directory")
parser.add_argument("deps", help="file with header file names to parse")
pargs = parser.parse_args(args)
if not mk_genfile_common.check_dir_exists(pargs.destination_dir):
return 1
if not mk_genfile_common.check_files_exist([pargs.deps]):
return 1
with open(pargs.deps, 'r') as f:
lines = f.read().split('\n')
h_files_full_path = [os.path.abspath(header_file)
for header_file in lines if header_file]
if not mk_genfile_common.check_files_exist(h_files_full_path):
return 1
output = mk_genfile_common.mk_install_tactic_cpp_internal(
h_files_full_path,
pargs.destination_dir
)
logging.info('Generated "{}"'.format(output))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1,330 | 29.953488 | 76 |
py
|
z3
|
z3-master/scripts/mk_nuget_task.py
|
#
# Copyright (c) 2018 Microsoft Corporation
#
# 1. copy over dlls
# 2. copy over libz3.dll for the different architectures
# 3. copy over Microsoft.Z3.dll from suitable distribution
# 4. copy nuspec file from packages
# 5. call nuget pack
# 6. sign package
import json
import os
import zipfile
import sys
import os.path
import shutil
import subprocess
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
os_info = { 'ubuntu-latest' : ('so', 'linux-x64'),
'ubuntu-18' : ('so', 'linux-x64'),
'ubuntu-20' : ('so', 'linux-x64'),
'glibc' : ('so', 'linux-x64'),
#'glibc-2.35' : ('so', 'linux-x64'),
'x64-win' : ('dll', 'win-x64'),
'x86-win' : ('dll', 'win-x86'),
'x64-osx' : ('dylib', 'osx-x64'),
'arm64-osx' : ('dylib', 'osx-arm64'),
'debian' : ('so', 'linux-x64') }
def classify_package(f, arch):
for os_name in os_info:
if os_name in f:
ext, dst = os_info[os_name]
return os_name, f[:-4], ext, dst
print("Could not classify", f)
return None
def replace(src, dst):
try:
os.remove(dst)
except:
shutil.move(src, dst)
def unpack(packages, symbols, arch):
# unzip files in packages
# out
# +- runtimes
# +- win-x64
# +- win-x86
# +- linux-x64
# +- osx-x64
# +
tmp = "tmp" if not symbols else "tmpsym"
for f in os.listdir(packages):
print(f)
if f.endswith(".zip") and classify_package(f, arch):
os_name, package_dir, ext, dst = classify_package(f, arch)
path = os.path.abspath(os.path.join(packages, f))
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extract(f"{package_dir}/bin/libz3.{ext}", f"{tmp}")
mk_dir(f"out/runtimes/{dst}/native")
replace(f"{tmp}/{package_dir}/bin/libz3.{ext}", f"out/runtimes/{dst}/native/libz3.{ext}")
if "x64-win" in f or "x86-win" in f:
mk_dir("out/lib/netstandard2.0/")
if symbols:
zip_ref.extract(f"{package_dir}/bin/libz3.pdb", f"{tmp}")
replace(f"{tmp}/{package_dir}/bin/libz3.pdb", f"out/runtimes/{dst}/native/libz3.pdb")
files = ["Microsoft.Z3.dll"]
if symbols:
files += ["Microsoft.Z3.pdb", "Microsoft.Z3.xml"]
for b in files:
zip_ref.extract(f"{package_dir}/bin/{b}", f"{tmp}")
replace(f"{tmp}/{package_dir}/bin/{b}", f"out/lib/netstandard2.0/{b}")
def mk_targets(source_root):
mk_dir("out/build")
shutil.copy(f"{source_root}/src/api/dotnet/Microsoft.Z3.targets.in", "out/build/Microsoft.Z3.targets")
def mk_icon(source_root):
mk_dir("out/content")
shutil.copy(f"{source_root}/resources/icon.jpg", "out/content/icon.jpg")
def create_nuget_spec(version, repo, branch, commit, symbols, arch):
arch = f".{arch}" if arch == "x86" else ""
contents = """<?xml version="1.0" encoding="utf-8"?>
<package xmlns="http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd">
<metadata>
<id>Microsoft.Z3{4}</id>
<version>{0}</version>
<authors>Microsoft</authors>
<description>
Z3 is a satisfiability modulo theories solver from Microsoft Research.
Linux Dependencies:
libgomp.so.1 installed
</description>
<copyright>© Microsoft Corporation. All rights reserved.</copyright>
<tags>smt constraint solver theorem prover</tags>
<icon>content/icon.jpg</icon>
<projectUrl>https://github.com/Z3Prover/z3</projectUrl>
<license type="expression">MIT</license>
<repository type="git" url="{1}" branch="{2}" commit="{3}" />
<requireLicenseAcceptance>true</requireLicenseAcceptance>
<language>en</language>
<dependencies>
<group targetFramework=".netstandard2.0" />
</dependencies>
</metadata>
</package>""".format(version, repo, branch, commit, arch)
print(contents)
sym = "sym." if symbols else ""
file = f"out/Microsoft.Z3{arch}.{sym}nuspec"
print(file)
with open(file, 'w') as f:
f.write(contents)
class Env:
def __init__(self, argv):
self.packages = argv[1]
self.version = argv[2]
self.repo = argv[3]
self.branch = argv[4]
self.commit = argv[5]
self.source_root = argv[6]
self.symbols = False
self.arch = "x64"
if len(argv) > 7 and "symbols" == argv[7]:
self.symbols = True
if len(argv) > 8:
self.arch = argv[8]
def create(self):
mk_dir(self.packages)
unpack(self.packages, self.symbols, self.arch)
mk_targets(self.source_root)
mk_icon(self.source_root)
create_nuget_spec(self.version, self.repo, self.branch, self.commit, self.symbols, self.arch)
def main():
env = Env(sys.argv)
print(env.packages)
env.create()
main()
| 5,105 | 32.372549 | 113 |
py
|
z3
|
z3-master/doc/mk_params_doc.py
|
# Copyright (c) Microsoft Corporation 2015
"""
Z3 API documentation for parameters
"""
import argparse
import subprocess
import sys
import re
import os
BUILD_DIR='../build'
OUTPUT_DIRECTORY=os.path.join(os.getcwd(), 'api')
def parse_options():
global BUILD_DIR, OUTPUT_DIRECTORY
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-b',
'--build',
default=BUILD_DIR,
help='Directory where Z3 is built (default: %(default)s)',
)
parser.add_argument('--output-dir',
dest='output_dir',
default=OUTPUT_DIRECTORY,
help='Path to output directory (default: %(default)s)',
)
pargs = parser.parse_args()
BUILD_DIR = pargs.build
OUTPUT_DIRECTORY = pargs.output_dir
def help(ous):
global BUILD_DIR
ous.write("Z3 Options\n")
z3_exe = BUILD_DIR + "/z3"
out = subprocess.Popen([z3_exe, "-pm"],stdout=subprocess.PIPE).communicate()[0]
modules = ["global"]
if out != None:
out = out.decode(sys.getdefaultencoding())
module_re = re.compile(r"\[module\] (.*)\,")
lines = out.split("\n")
for line in lines:
m = module_re.search(line)
if m:
modules += [m.group(1)]
for module in modules:
out = subprocess.Popen([z3_exe, "-pmmd:%s" % module],stdout=subprocess.PIPE).communicate()[0]
if out == None:
continue
out = out.decode(sys.getdefaultencoding())
out = out.replace("\r","")
ous.write(out)
parse_options()
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
mk_dir(os.path.join(OUTPUT_DIRECTORY, 'md'))
with open(OUTPUT_DIRECTORY + "/md/Parameters.md",'w') as ous:
help(ous)
| 1,772 | 26.276923 | 105 |
py
|
z3
|
z3-master/doc/mk_api_doc.py
|
# Copyright (c) Microsoft Corporation 2015
"""
Z3 API documentation generator script
"""
import argparse
import os
import shutil
import re
import getopt
import pydoc
import sys
import subprocess
ML_ENABLED=False
MLD_ENABLED=False
JS_ENABLED=False
BUILD_DIR='../build'
DOXYGEN_EXE='doxygen'
TEMP_DIR=os.path.join(os.getcwd(), 'tmp')
OUTPUT_DIRECTORY=os.path.join(os.getcwd(), 'api')
Z3PY_PACKAGE_PATH='../src/api/python/z3'
JS_API_PATH='../src/api/js'
Z3PY_ENABLED=True
DOTNET_ENABLED=True
JAVA_ENABLED=True
Z3OPTIONS_ENABLED=True
DOTNET_API_SEARCH_PATHS=['../src/api/dotnet']
JAVA_API_SEARCH_PATHS=['../src/api/java']
SCRIPT_DIR=os.path.abspath(os.path.dirname(__file__))
def parse_options():
global ML_ENABLED, MLD_ENABLED, BUILD_DIR, DOXYGEN_EXE, TEMP_DIR, OUTPUT_DIRECTORY
global Z3PY_PACKAGE_PATH, Z3PY_ENABLED, DOTNET_ENABLED, JAVA_ENABLED, JS_ENABLED
global DOTNET_API_SEARCH_PATHS, JAVA_API_SEARCH_PATHS, JS_API_PATH
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-b',
'--build',
default=BUILD_DIR,
help='Directory where Z3 is built (default: %(default)s)',
)
parser.add_argument('--ml',
action='store_true',
default=False,
help='Include ML/OCaml API documentation'
)
parser.add_argument('--mld',
action='store_true',
default=False,
help='Include ML/OCaml API documentation'
)
parser.add_argument('--js',
action='store_true',
default=False,
help='Include JS/TS API documentation'
)
parser.add_argument('--doxygen-executable',
dest='doxygen_executable',
default=DOXYGEN_EXE,
help='Doxygen executable to use (default: %(default)s)',
)
parser.add_argument('--temp-dir',
dest='temp_dir',
default=TEMP_DIR,
help='Path to directory to use as temporary directory. '
'(default: %(default)s)',
)
parser.add_argument('--output-dir',
dest='output_dir',
default=OUTPUT_DIRECTORY,
help='Path to output directory (default: %(default)s)',
)
parser.add_argument('--z3py-package-path',
dest='z3py_package_path',
default=Z3PY_PACKAGE_PATH,
help='Path to directory containing Z3py package (default: %(default)s)',
)
# FIXME: I would prefer not to have negative options (i.e. `--z3py`
# instead of `--no-z3py`) but historically these bindings have been on by
# default so we have options to disable generating documentation for these
# bindings rather than enable them.
parser.add_argument('--no-z3py',
dest='no_z3py',
action='store_true',
default=False,
help='Do not generate documentation for Python bindings',
)
parser.add_argument('--no-dotnet',
dest='no_dotnet',
action='store_true',
default=False,
help='Do not generate documentation for .NET bindings',
)
parser.add_argument('--no-java',
dest='no_java',
action='store_true',
default=False,
help='Do not generate documentation for Java bindings',
)
parser.add_argument('--dotnet-search-paths',
dest='dotnet_search_paths',
nargs='+',
default=DOTNET_API_SEARCH_PATHS,
help='Specify one or more path to look for .NET files (default: %(default)s).',
)
parser.add_argument('--java-search-paths',
dest='java_search_paths',
nargs='+',
default=JAVA_API_SEARCH_PATHS,
help='Specify one or more paths to look for Java files (default: %(default)s).',
)
pargs = parser.parse_args()
ML_ENABLED = pargs.ml
MLD_ENABLED = pargs.mld
JS_ENABLED = pargs.js
BUILD_DIR = pargs.build
DOXYGEN_EXE = pargs.doxygen_executable
TEMP_DIR = pargs.temp_dir
OUTPUT_DIRECTORY = pargs.output_dir
Z3PY_PACKAGE_PATH = pargs.z3py_package_path
Z3PY_ENABLED = not pargs.no_z3py
DOTNET_ENABLED = not pargs.no_dotnet
JAVA_ENABLED = not pargs.no_java
DOTNET_API_SEARCH_PATHS = pargs.dotnet_search_paths
JAVA_API_SEARCH_PATHS = pargs.java_search_paths
if Z3PY_ENABLED:
if not os.path.exists(Z3PY_PACKAGE_PATH):
raise Exception('"{}" does not exist'.format(Z3PY_PACKAGE_PATH))
if not os.path.basename(Z3PY_PACKAGE_PATH) == 'z3':
raise Exception('"{}" does not end with "z3"'.format(Z3PY_PACKAGE_PATH))
return
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
# Eliminate def_API, extra_API, and def_Type directives from file 'inf'.
# The result is stored in 'outf'.
def cleanup_API(inf, outf):
pat1 = re.compile(".*def_API.*")
pat2 = re.compile(".*extra_API.*")
pat3 = re.compile(r".*def_Type\(.*")
pat4 = re.compile("Z3_DECLARE_CLOSURE.*")
pat5 = re.compile("DEFINE_TYPE.*")
_inf = open(inf, 'r')
_outf = open(outf, 'w')
for line in _inf:
if not pat1.match(line) and not pat2.match(line) and not pat3.match(line) and not pat4.match(line) and not pat5.match(line):
_outf.write(line)
def configure_file(template_file_path, output_file_path, substitutions):
"""
Read a template file ``template_file_path``, perform substitutions
found in the ``substitutions`` dictionary and write the result to
the output file ``output_file_path``.
The template file should contain zero or more template strings of the
form ``@NAME@``.
The substitutions dictionary maps old strings (without the ``@``
symbols) to their replacements.
"""
assert isinstance(template_file_path, str)
assert isinstance(output_file_path, str)
assert isinstance(substitutions, dict)
assert len(template_file_path) > 0
assert len(output_file_path) > 0
print("Generating {} from {}".format(output_file_path, template_file_path))
if not os.path.exists(template_file_path):
raise Exception('Could not find template file "{}"'.format(template_file_path))
# Read whole template file into string
template_string = None
with open(template_file_path, 'r') as f:
template_string = f.read()
# Do replacements
for (old_string, replacement) in substitutions.items():
template_string = template_string.replace('@{}@'.format(old_string), replacement)
# Write the string to the file
with open(output_file_path, 'w') as f:
f.write(template_string)
try:
parse_options()
print("Creating temporary directory \"{}\"".format(TEMP_DIR))
mk_dir(TEMP_DIR)
# Short-hand for path to temporary file
def temp_path(path):
return os.path.join(TEMP_DIR, path)
# Short-hand for path to file in `doc` directory
def doc_path(path):
return os.path.join(SCRIPT_DIR, path)
# Create configuration file from template
doxygen_config_substitutions = {
'OUTPUT_DIRECTORY': OUTPUT_DIRECTORY,
'TEMP_DIR': TEMP_DIR,
'CXX_API_SEARCH_PATHS': doc_path('../src/api/c++'),
}
if Z3PY_ENABLED:
print("Z3Py documentation enabled")
doxygen_config_substitutions['PYTHON_API_FILES'] = 'z3*.py'
else:
print("Z3Py documentation disabled")
doxygen_config_substitutions['PYTHON_API_FILES'] = ''
if DOTNET_ENABLED:
print(".NET documentation enabled")
doxygen_config_substitutions['DOTNET_API_FILES'] = '*.cs'
dotnet_api_search_path_str = ""
for p in DOTNET_API_SEARCH_PATHS:
# Quote path so that paths with spaces are handled correctly
dotnet_api_search_path_str += "\"{}\" ".format(p)
doxygen_config_substitutions['DOTNET_API_SEARCH_PATHS'] = dotnet_api_search_path_str
else:
print(".NET documentation disabled")
doxygen_config_substitutions['DOTNET_API_FILES'] = ''
doxygen_config_substitutions['DOTNET_API_SEARCH_PATHS'] = ''
if JAVA_ENABLED:
print("Java documentation enabled")
doxygen_config_substitutions['JAVA_API_FILES'] = '*.java'
java_api_search_path_str = ""
for p in JAVA_API_SEARCH_PATHS:
# Quote path so that paths with spaces are handled correctly
java_api_search_path_str += "\"{}\" ".format(p)
doxygen_config_substitutions['JAVA_API_SEARCH_PATHS'] = java_api_search_path_str
else:
print("Java documentation disabled")
doxygen_config_substitutions['JAVA_API_FILES'] = ''
doxygen_config_substitutions['JAVA_API_SEARCH_PATHS'] = ''
if JS_ENABLED:
print('Javascript documentation enabled')
else:
print('Javascript documentation disabled')
doxygen_config_file = temp_path('z3api.cfg')
configure_file(
doc_path('z3api.cfg.in'),
doxygen_config_file,
doxygen_config_substitutions)
website_dox_substitutions = {}
bullet_point_prefix='\n - '
website_dox_substitutions['CPP_API'] = (
'{prefix}<a class="el" href="namespacez3.html">C++ API</a> '
).format(
prefix=bullet_point_prefix)
website_dox_substitutions['C_API'] = (
'{prefix}<a class="el" href="z3__api_8h.html">C API</a> '
).format(
prefix=bullet_point_prefix)
if Z3PY_ENABLED:
print("Python documentation enabled")
website_dox_substitutions['PYTHON_API'] = (
'{prefix}<a class="el" href="namespacez3py.html">Python API</a> '
'(also available in <a class="el" href="z3.html">pydoc format</a>)'
).format(
prefix=bullet_point_prefix)
else:
print("Python documentation disabled")
website_dox_substitutions['PYTHON_API'] = ''
if DOTNET_ENABLED:
website_dox_substitutions['DOTNET_API'] = (
'{prefix}'
'<a class="el" href="namespace_microsoft_1_1_z3.html">'
'.NET API</a>').format(
prefix=bullet_point_prefix)
else:
website_dox_substitutions['DOTNET_API'] = ''
if JAVA_ENABLED:
website_dox_substitutions['JAVA_API'] = (
'{prefix}<a class="el" href="namespacecom_1_1microsoft_1_1z3.html">'
'Java API</a>').format(
prefix=bullet_point_prefix)
else:
website_dox_substitutions['JAVA_API'] = ''
if ML_ENABLED or MLD_ENABLED:
website_dox_substitutions['OCAML_API'] = (
'{prefix}<a class="el" href="ml/index.html">ML/OCaml API</a>'
).format(
prefix=bullet_point_prefix)
else:
website_dox_substitutions['OCAML_API'] = ''
if JS_ENABLED:
website_dox_substitutions['JS_API'] = (
'{prefix}<a class="el" href="js/index.html">Javascript/Typescript API</a>'
).format(
prefix=bullet_point_prefix)
else:
website_dox_substitutions['JS_API'] = ''
configure_file(
doc_path('website.dox.in'),
temp_path('website.dox'),
website_dox_substitutions)
mk_dir(os.path.join(OUTPUT_DIRECTORY, 'html'))
if Z3PY_ENABLED:
shutil.copyfile(doc_path('../src/api/python/z3/z3.py'), temp_path('z3py.py'))
cleanup_API(doc_path('../src/api/z3_api.h'), temp_path('z3_api.h'))
cleanup_API(doc_path('../src/api/z3_ast_containers.h'), temp_path('z3_ast_containers.h'))
cleanup_API(doc_path('../src/api/z3_algebraic.h'), temp_path('z3_algebraic.h'))
cleanup_API(doc_path('../src/api/z3_polynomial.h'), temp_path('z3_polynomial.h'))
cleanup_API(doc_path('../src/api/z3_rcf.h'), temp_path('z3_rcf.h'))
cleanup_API(doc_path('../src/api/z3_fixedpoint.h'), temp_path('z3_fixedpoint.h'))
cleanup_API(doc_path('../src/api/z3_optimization.h'), temp_path('z3_optimization.h'))
cleanup_API(doc_path('../src/api/z3_fpa.h'), temp_path('z3_fpa.h'))
print("Removed annotations from z3_api.h.")
try:
if subprocess.call([DOXYGEN_EXE, doxygen_config_file]) != 0:
print("ERROR: doxygen returned nonzero return code")
exit(1)
except:
print("ERROR: failed to execute 'doxygen', make sure doxygen (http://www.doxygen.org) is available in your system.")
exit(1)
print("Generated Doxygen based documentation")
shutil.rmtree(os.path.realpath(TEMP_DIR))
print("Removed temporary directory \"{}\"".format(TEMP_DIR))
if Z3PY_ENABLED:
# Put z3py at the beginning of the search path to try to avoid picking up
# an installed copy of Z3py.
sys.path.insert(0, os.path.dirname(Z3PY_PACKAGE_PATH))
if sys.version < '3':
import __builtin__
__builtin__.Z3_LIB_DIRS = [ BUILD_DIR ]
else:
import builtins
builtins.Z3_LIB_DIRS = [ BUILD_DIR ]
for modulename in (
'z3',
'z3.z3',
'z3.z3consts',
'z3.z3core',
'z3.z3num',
'z3.z3poly',
'z3.z3printer',
'z3.z3rcf',
'z3.z3types',
'z3.z3util',
):
pydoc.writedoc(modulename)
doc = modulename + '.html'
shutil.move(doc, os.path.join(OUTPUT_DIRECTORY, 'html', doc))
print("Generated pydoc Z3Py documentation.")
if ML_ENABLED:
ml_output_dir = os.path.join(OUTPUT_DIRECTORY, 'html', 'ml')
mk_dir(ml_output_dir)
if subprocess.call(['ocamldoc', '-html', '-d', ml_output_dir, '-sort', '-hide', 'Z3', '-I', '$(ocamlfind query zarith)', '-I', '%s/api/ml' % BUILD_DIR, '%s/api/ml/z3enums.mli' % BUILD_DIR, '%s/api/ml/z3.mli' % BUILD_DIR]) != 0:
print("ERROR: ocamldoc failed.")
exit(1)
print("Generated ML/OCaml documentation.")
if JS_ENABLED:
try:
subprocess.check_output(['npm', 'run', '--prefix=%s' % JS_API_PATH, 'check-engine'])
except subprocess.CalledProcessError as e:
print("ERROR: node version check failed.")
print(e.output)
exit(1)
if subprocess.call(['npm', 'run', '--prefix=%s' % JS_API_PATH, 'docs']) != 0:
print("ERROR: npm run docs failed.")
exit(1)
print("Generated Javascript documentation.")
print("Documentation was successfully generated at subdirectory '{}'.".format(OUTPUT_DIRECTORY))
except Exception:
exctype, value = sys.exc_info()[:2]
print("ERROR: failed to generate documentation: %s" % value)
exit(1)
| 14,504 | 36.971204 | 235 |
py
|
z3
|
z3-master/doc/mk_tactic_doc.py
|
# Copyright (c) Microsoft Corporation 2015
"""
Tactic documentation generator script
"""
import os
import re
import sys
import subprocess
BUILD_DIR='../build'
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
OUTPUT_DIRECTORY = os.path.join(os.getcwd(), 'api')
def doc_path(path):
return os.path.join(SCRIPT_DIR, path)
is_doc = re.compile("Tactic Documentation")
is_doc_end = re.compile("\-\-\*\/")
is_tac_name = re.compile("## Tactic (.*)")
is_simplifier = re.compile("ADD_SIMPLIFIER\(.*\"([^\"]*)\".*,.*\"([^\"]*)\".*,.*\"([^\"]*)\"\.*\)")
def is_ws(s):
return all([0 for ch in s if ch != ' ' and ch != '\n'])
def extract_params(ous, tac):
z3_exe = BUILD_DIR + "/z3"
out = subprocess.Popen([z3_exe, f"-tacticsmd:{tac}"], stdout=subprocess.PIPE).communicate()[0]
if not out:
return
out = out.decode(sys.getdefaultencoding())
if is_ws(out):
return
ous.write("### Parameters\n\n")
for line in out:
ous.write(line.replace("\r",""))
ous.write("\n")
def generate_tactic_doc(ous, f, ins):
tac_name = None
for line in ins:
m = is_tac_name.search(line)
if m:
tac_name = m.group(1)
if is_doc_end.search(line):
if tac_name:
extract_params(ous, tac_name)
break
ous.write(line)
def extract_tactic_doc(ous, f):
with open(f) as ins:
for line in ins:
if is_doc.search(line):
generate_tactic_doc(ous, f, ins)
def generate_simplifier_doc(ous, name, desc):
ous.write("## Simplifier [" + name + "](https://microsoft.github.io/z3guide/docs/strategies/summary/#tactic-" + name + ")\n")
ous.write("### Description\n" + desc + "\n")
def extract_simplifier_doc(ous, f):
with open(f) as ins:
for line in ins:
m = is_simplifier.search(line)
if m:
generate_simplifier_doc(ous, m.group(1), m.group(2))
def find_tactic_name(path):
with open(path) as ins:
for line in ins:
m = is_tac_name.search(line)
if m:
return m.group(1)
print(f"no tactic in {path}")
return ""
def find_simplifier_name(path):
with open(path) as ins:
for line in ins:
m = is_simplifier.search(line)
if m:
return m.group(1)
print(f"no simplifier in {path}")
return ""
def presort_files(find_fn):
tac_files = []
for root, dirs, files in os.walk(doc_path("../src")):
for f in files:
if f.endswith("~"):
continue
if f.endswith("tactic.h") or "simplifiers" in root:
tac_files += [(f, os.path.join(root, f))]
tac_files = sorted(tac_files, key = lambda x: find_fn(x[1]))
return tac_files
def help(ous):
ous.write("---\n")
ous.write("title: Tactics Summary\n")
ous.write("sidebar_position: 6\n")
ous.write("---\n")
tac_files = presort_files(find_tactic_name)
for file, path in tac_files:
extract_tactic_doc(ous, path)
def help_simplifier(ous):
ous.write("---\n")
ous.write("title: Simplifiers Summary\n")
ous.write("sidebar_position: 7\n")
ous.write("---\n")
tac_files = presort_files(find_simplifier_name)
for file, path in tac_files:
extract_simplifier_doc(ous, path)
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
mk_dir(os.path.join(OUTPUT_DIRECTORY, 'md'))
with open(OUTPUT_DIRECTORY + "/md/tactics-summary.md",'w') as ous:
help(ous)
with open(OUTPUT_DIRECTORY + "/md/simplifier-summary.md",'w') as ous:
help_simplifier(ous)
| 3,668 | 27.223077 | 129 |
py
|
radynpy
|
radynpy-master/setup.py
|
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='radynpy',
version='0.5.0',
description='Analysis tools for Radyn in Python',
long_description=readme(),
url='http://github.com/Goobley/radynpy',
author='Chris Osborne',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=['numpy', 'scikit-image', 'matplotlib', 'scipy', 'colour', 'palettable', 'cdflib'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License'
],
include_package_data=True,
zip_safe=True)
| 772 | 31.208333 | 106 |
py
|
radynpy
|
radynpy-master/radynpy/__init__.py
|
from . import utils
from . import cdf
from . import matsplotlib
| 64 | 15.25 | 25 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/GenRadynCdfHelp.py
|
import pickle
import re
from itertools import permutations
from collections import deque
from radynpy.cdf.auxtypes import Val, Array, Unknown
with open('RadynKeySizes.pickle', 'rb') as p:
keys = pickle.load(p)
# ntime and maxatomlevels added by cmo
# some corrections made where things are inconsistent with docs
# need to keep additions up to date or the next stage won't work
additions = ['ntime', 'maxatomlevels']
pdfDocStr = """Index convention:
i
total range [0:maxatomlevels-1]!!nper species level index [0:nk[iel]-1]!!n[j=upper level, i=lower level]
j
total range [0:maxatomlevels-1]!!nper species level index [0:nk[iel]-1]!!n[j=upper level, i=lower level]
iel
element index [0:nel-1]
kr
transition index [0:nrad-1]
krc
continuum transition index [0:nrad-nline-1]
kfx
fixed transition index [0:nrfix-1]
k
depth index [0:ndep-1]
mu
angle index [0:nmu-1]
nu
frequency index [0:nq[kr]-1] or [1:nq[kr]]
t
time index [0:ntime-1]
Variables that do not change with time:
nel
number of elements
nk[iel]
number of levels including continuum levels for element iel
nk1
number of levels in total over all elements
nrad
number of radiative transitions treated in detail
nline
number of radiative bound-bound transitions
nrfix
number of transitions with fixed rates
nq[kr]
number of frequencies
nmu
number of angles
ndep
number of depth points
ndepm
ndep-1
ntime
number of timesteps
maxatomlevels
maximum number of atomic levels used in any of for any species
atomid[iel]
4 character identification of atom.
abnd[iel]
atomic abundance, log scale with hydrogen=12
awgt[iel]
atomic weight. input in atomic units, converted to cgs
ev[i,iel]
energy above ground state. input in cm^-1, copnverted to eV
g[i,iel]
statistical weight of level
label[i,iel]
20 character identification of level
ion[i,iel]
ionization stage of level, 1=neutral
ielrad[kr]
ielrad[kr]-1 is element number for radiative transition kr
jrad[kr]
jrad[kr]-1 is upper level of radiative transition kr
irad[kr]
irad[kr]-1 is lower level of radiative transition kr
krad[i,j,iel]
krad[i,j,iel]-1=krad[j,i,iel]-1 is the kr index of the transition from level i to level j, element iel
ktrans[kr]
ktrans[kr]-1 is continuum transition nr for transition kr, krc=ktrans[kr]-1, krc=-1 for bb transitions
cont[kr]
is 1 if the transition is bound-free and 0 for bound-bound transitions
f[kr]
oscillator strength
ga[kr]
radiative damping parameter
gw[kr]
van der waals damping parameter
gq[kr]
stark damping parameter
alamb[kr]
vacuum wavelength in angstrom
a[kr]
einstein a coefficient
bij[kr]
einstein b[i,j] coefficient
bji[kr]
einstein b[j,i] coefficient
ielfx[kfx]
ielfx[kfx]-1 is element for fixed transition kfx
jfx[kfx]
jfx[kfx]-1 is upper level of fixed transition kfx
ifx[kfx]
ifx[kfx]-1 is lower level of fixed transition kfx
ipho[kfx]
=1 continuum, =0 line
a0[kfx]
crossection at limit
trad[kfx]
brightness temperature for continua
itrad[kfx]
radiation temperature option. =1 rad.temp=temp,!!n=2 photospheric option, rad.temp=temp out to temp.lt.trad then temp=trad outwards!!n=3 chromospheric option, rad.temp=temp except when temp .gt. trad and temp increasing outwards. then rad.temp=trad
alfac[nu,krc]
photoionization crossection!!nin nu=0 the frequency for the edge is stored!!nin nu=1:nq[kr] the frequency points
qnorm
unit typical doppler width in km per second at line center
hn3c2
h*ny**3/c**2
hny4p
h * ny / (4*pi) ny in units of a typical doppler width
q[nu,kr]
frequency variable, in units of a typical doppler width!!npositive q for increased frequency
wq[nu,kr]
gauss-legendre weights for q
qmax[kr]
maximum frequency, same units as q
q0[kr]
frequency within which quadrature points are distributed!!nlinearly instead of logarithmically
ind[kr]
=1 for one sided quadrature [symmetric profile]!!n=2 for two sided quadrature [asymmetric profile]
frq[nu,krc]
frequency in hz for continua!!nin nu=0 the frequency for the edge is stored!!nin nu=1:nq[kr] the frequency points
zmu[mu]
cosine of angle vs z-axis (xmu and ymu also exist but are not used except for 1D MHD)
wmu[mu]
Gauss-Legendre weights for RT rays
atmoid
72 character identification of atmosphere used
dpid
72 character identification of depth-scale
dptype
=t depth scale is tauscale, =m depth scale is mass scale, see routine atmos
grav
gravitation acceleration
vturb[k]
microturbulence velocity
ee
electron charge
em
electron mass
hh
planck constant
cc
velocity of light
bk
boltzmann constant
amu
universal mass constant
hce
hh*cc/ee*1.e8, lambda[angstrom]=hce/energy[ev]
hc2
2*hh*cc*1.e24, 2*h*ny**3/c**2=hc2/lambda[angstrom]**3
hck
hh*cc/bk*1.e8, h*ny/kt=hck/lambda[angstrom]/t
ek
ee/bk
pi
pi
Variables that change with time:
z1[k,t]
interface heights
--zh[k,t]
--cell center heights
--dz[k,t]
--cell depths
vz1[k,t]
macroscopic velocity
d1[k,t]
density
tg1[k,t]
temperature
n1[k,i,iel,t]
population density in cm-3
ne1[k,t]
electron density
pg1[k,t]
gas pressure
fmj[k,t]
mass flux
gml[k,i,iel,t]
gains minus losses in rate equations
en1[k,t]
internal energy
coolt1[k,t]
total cooling
cool[k,kr,t]
cooling per transition/continuum in erg.cm-3.s-1
c[k,i,j,iel,t]
collisional transition rate
heat1[k,t]
mechanical energy needed to sustain initial atmosphere
eion1[k,t]
ionization+excitation energy
outint[nu,mu,kr,t]
monochromatic surface intensity in cgs units!!nin nu=0 the continuum intensity is stored!!nin nu=1:nq[kr] outint for the nu-points of the transition
cmass1[k,t]
column mass at interface
dnyd[k,iel,t]
doppler width in units of a typical doppler width
adamp[k,kr,t]
voigt damping parameter
z0[k,t]
interface height at previous timestep
tau[k,t]
opacity
vz0[k,t]
velocity at previous timestep
itime[t]
timestep number
time[t]
time
dtn[t]
timestep size from current to next time
dtnm[t]
timestep size from old to current time
iiter[t]
number of iterations needed for convergence
zm[k,t]
height for column masses of original gridpoints
nstar[k,i,iel,t]
lte population density
sl[k,kr,t]
line source function
bp[k,kr,t]
planck function
rij[k,kr,t]
radiative rate from i to j per ni atom
rji[k,kr,t]
radiative rate from j to i per nj atom
bheat1[k,t]
beam heating in erg/s/cm3
"""
docStrs = [x.replace('!!n', '\n') for x in pdfDocStr.splitlines() if not x.startswith('--')]
categories = {}
idx = 0
while True:
s = docStrs[idx]
# This line requires that the first entry in this docstring be a category header
if s.endswith(':'):
sanitised = s[:s.find(':')]
categories[sanitised] = {}
currentCat = categories[sanitised]
idx += 1
continue
if s.endswith(']'):
sanitisedName = s[:s.find('[')]
val = Array(tuple(s[s.find('[')+1:s.find(']')].split(',')))
else:
sanitisedName = s
val = Val(None)
sanitisedName = s if not s.endswith(']') else s[:s.find('[')]
currentCat[sanitisedName] = (val, docStrs[idx+1])
idx += 2
if idx >= len(docStrs):
break
# print(categories)
def extract_idl_range(s):
start = s.find('[')
i = start + 1
end = len(s)
depth = 1
while i < end:
if s[i] == '[':
depth += 1
if s[i] == ']':
depth -= 1
if depth == 0:
return s[start:i+1]
i += 1
indices = categories['Index convention']
indexMax = {}
for k, v in indices.items():
assert(type(v[0]) is Val)
var = re.match('\[0:(.*)-1\]$', extract_idl_range(v[1])).group(1)
if var.find('[') != -1:
# Then there's an array index involved, and this data isn't in the pickle
indexMax[k] = Val(Unknown())
else:
if re.match('^\w*$', var) is not None:
indexMax[k] = keys[var]
elif re.match('^(\w*)-(\w*)$', var):
matches = re.match('^(\w*)-(\w*)$', var)
var1 = matches.group(1)
var2 = matches.group(2)
indexMax[k] = Val(keys[var1].val - keys[var2].val)
# print(indexMax)
# print(indexMax['i'])
# Special case for the grid sims, probably all other modern ones, ignore the size of kfx
print('Ignoring kfx\'s size while generating the reader as it seems to be a relic')
indexMax['kfx'].val = Unknown()
def deque_rotator(deq, reverse=False):
n = len(deq)
v = -1 if reverse else 1
for i in range(n):
deq.rotate(v)
yield list(deq)
noChange = categories['Variables that do not change with time']
change = categories['Variables that change with time']
def check_and_correct_indices(d):
variableNotInFile = []
for k, v in d.items():
try:
if type(v[0]) is Array:
loadedArray = keys[k]
if len(v[0].shape) == len(loadedArray.shape):
idxs = v[0].shape
correctIdxs = idxs
if not all(indexMax[idx].val == loadedArray.shape[j] for j, idx in enumerate(idxs)):
if any(type(indexMax[i].val) is Unknown for i in idxs):
if len(v[0].shape) == 1:
# It's only 1D -- has to be correct
break
# If the 'all' passes then it still was correct -- just had an unknown in a consistent location
if not all(indexMax[idx].val == loadedArray.shape[j] for j, idx in enumerate(idxs) if type(indexMax[idx]) is not Unknown):
# Test cyclic permutations. I don't think we need anything more than this
# The order of rotation is such that everything shifts 1 to the right
# This looked the most reasonable from looking at a few examples by eye
# So we just take the first succesful permutation
# if there aren't any, then we need to have a closer look
rot = deque_rotator(deque(idxs))
cyclic = [perm for perm in rot]
success = [all(indexMax[i].val == loadedArray.shape[j] for j, i in enumerate(c) if type(indexMax[i].val) is not Unknown) for c in cyclic]
if not any(success):
print('Cyclic permutation (with unknown) failed for: %s' % k)
exit()
for j, correct in enumerate(success):
if correct:
correctIdxs = tuple(cyclic[j])
break
else:
# First test cyclic permutations -- Should be sufficient
deq = deque(idxs)
rot = deque_rotator(deque(idxs))
cyclic = [perm for perm in rot]
success = [all(indexMax[i].val == loadedArray.shape[j] for j, i in enumerate(c)) for c in cyclic]
if not any(success):
print('Cyclic permutation failed for: %s' % k)
exit()
for j, correct in enumerate(success):
if correct:
correctIdxs = tuple(cyclic[j])
break
if v[0].shape != correctIdxs:
print('Changing indexing string of %s from %s to %s' % (k, v[0].shape.__repr__(), correctIdxs.__repr__()))
v[0].shape = correctIdxs
else:
print('Expected number of dimensions different to CDF File: %s, %dD in Example File (%s), and %dD in Doc'
% (k, len(loadedArray.shape), str(loadedArray), len(v[0].shape)))
exit()
except KeyError:
variableNotInFile.append(k)
print('Need to compute the values for key %s in the generated class' % k)
return variableNotInFile
calculateVar = []
calculateVar += check_and_correct_indices(noChange)
calculateVar += check_and_correct_indices(change)
def check_and_remove_nonexistent_vals(d):
removeList = []
for k, v in d.items():
if type(v[0]) is Val:
try:
loaded = keys[k]
except KeyError:
print('Removing key %s as not present in sample data.\nIf this is an attribute then it will be loaded by the final stage automatically' % k)
removeList.append(k)
for k in removeList:
del d[k]
check_and_remove_nonexistent_vals(noChange)
check_and_remove_nonexistent_vals(change)
def convert_to_help_dict(d):
res = {}
for k, v in d.items():
if k in additions:
continue
if (type(v[0]) is Array):
res[k] = (k+v[0].idl_repr(), v[1])
else:
res[k] = (k, v[1])
return res
def convert_to_type_dict(d):
res = {}
for k, v in d.items():
if k in additions:
continue
if (type(v[0]) is Array):
res[k] = 'Array'
else:
res[k] = 'Val'
return res
constVars = convert_to_help_dict(noChange)
timeVaryingVars = convert_to_help_dict(change)
c = categories['Index convention']
for k in c:
c[k] = (c[k][0], c[k][1].replace('-1', ''))
c['nu'] = (None, 'frequency index [0:nq[kr]] or [1:nq[kr]+1]')
# To fix the strings in varinfo it looks like it's sufficient to just replace instances of nq[kr] wioth nq[kr]+1
def update_nu_indexing(d):
for k in d:
if 'nu=1:nq[kr]' in d[k]:
d[k] = d[k].replace('nu=1:nq[kr]', 'nu=1:nq[kr]+1')
return d
update_nu_indexing(constVars)
update_nu_indexing(timeVaryingVars)
helpInfo = []
helpInfo.append(categories['Index convention'])
helpInfo.append({'const': constVars, 'timeVary': timeVaryingVars})
typeDict = {**convert_to_type_dict(noChange), **convert_to_type_dict(change)}
helpInfo.append(typeDict)
helpInfo.append(calculateVar)
with open('RadynFormatHelp.pickle', 'wb') as p:
pickle.dump(helpInfo, p)
| 14,115 | 30.299335 | 248 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/CdfLoader.py
|
import pickle
import os
import numpy as np
import cdflib
fileLocation = os.path.dirname(os.path.abspath(__file__)) + '/'
with open(fileLocation + 'RadynFormatHelp.pickle', 'rb') as p:
helpInfo = pickle.load(p)
typeDict = helpInfo[2]
allVars = {**helpInfo[1]['const'], **helpInfo[1]['timeVary']}
def maybe_lookup(d, k):
try:
return d[k]
except KeyError:
return None
def index_convention():
for k, v in helpInfo[0].items():
print(k)
print(v[1], '\n')
def var_info(var):
if type(var) is list:
var_info_list(var)
elif type(var) is str:
if var == '*':
var_info_all()
return
var_info_str(var)
else:
raise ValueError('Unknown type "%s" in argument "%s" to var_info' % (str(type(var)), str(var)))
def var_info_all():
print('Constant in time:')
for k, v in helpInfo[1]['const'].items():
print(v[0])
print(' ', v[1])
print('\nTime-varying:')
for k, v in helpInfo[1]['timeVary'].items():
print(v[0])
print(' ', v[1])
def var_info_str(var):
val = maybe_lookup(helpInfo[1]['const'], var)
if val is None:
val = maybe_lookup(helpInfo[1]['timeVary'], var)
timeNature = 'Time-varying'
if val is None:
raise ValueError('Unknown variable "%s"' % var)
else:
timeNature = 'Constant in time'
print(val[0])
print(' ', val[1].replace('\n', '\n '))
print(' ', timeNature)
def var_info_list(varList):
assert(all(type(x) is str for x in varList))
for v in varList:
var_info_str(v)
class RadynData:
'''
Container for the RADYN simulation data loaded from a CDF file.
All required variables must be specified at construction.
The loaded variables are added as attributes to constructed instance.
Parameters
----------
cdfPath : str
The complete path to the CDF file
varList : list of str or str
The variables to be loaded from the file. If '*' is passed then all will be loaded,
a single str will be the only variable loaded, and a list of strs will all be loaded.
parseFilenameParams : bool, optional
If the filename is in the F-CHROMA format then parse the heating parameters (default True)
Attributes
----------
varName : np.ndarray
The variables that have been loaded
'''
def __init__(self, cdfPath, varList, parseFilenameParams=True):
if type(varList) is str:
if varList == '*':
varList = allVars.keys()
else:
varList = [varList]
assert(all(type(x) is str for x in varList))
for var in varList:
if not var in allVars:
raise ValueError('Non-existent Radyn CDF variable "%s" requested from load_vars' % var)
cdf = cdflib.CDF(cdfPath)
notLoadedVars = []
for var in allVars.keys():
# Just load all the scalar values, they're small enough, and most of them are
# important
if typeDict[var] == 'Val':
setattr(self, var, cdf.varget(var).item())
else:
if var in varList:
setattr(self, var, cdf.varget(var))
else:
notLoadedVars.append(var)
atts = cdf.globalattsget()
if len(atts) > 0:
for k in atts:
setattr(self, k, str(atts[k]))
self.notLoadedVars = notLoadedVars
cdf.close()
cdfFilename = os.path.basename(cdfPath)
if cdfFilename != 'radyn_out.cdf':
if parseFilenameParams and cdfFilename.startswith('radyn_out.'):
p = cdfFilename[cdfFilename.find('.')+1:]
params = p.split('_')
self.filenameParams = params
if len(params) != 6:
raise ValueError('FilenameParams should contain 6 underscore seperated terms.\n'
'See FCHROMA simulation documentation for examples.\n'
'If you don\'t want to parse these then call with parseFilenameParams=False')
self.startingModelAtmosphere = params[0]
self.beamSpectralIndex = float(params[1][1:])
self.totalBeamEnergy = float(params[2])
self.beamPlulseType = params[3]
self.cutoffEnergy = params[4]
self.beamType = params[5]
def __getattr__(self, name):
if name in self.notLoadedVars:
raise AttributeError('Array "%s" was not requested to be loaded' % name)
else:
raise AttributeError('Unknown attribute "%s" requested' % name)
def index_convention(self):
'''
Return a string explaining the RADYN index convention.
'''
return index_convention()
def var_info(self, var):
'''
Return a string explaining the specified variable(s) and their axes.
Parameters
----------
var : str or list of str
Specifies the variable to return information about.
'*' will return information on all variables, and a list of variables can also be requested.
'''
return var_info(var)
def __repr__(self):
s = 'RadynData: Variables loaded:\n'
for k in self.__dict__.keys():
if k != 'notLoadedVars':
info = maybe_lookup(allVars, k)
if info is None:
s += (' %s\n' % k)
else:
s += (' %s: %s\n' % (info[0], info[1].replace('\n', '\n ')))
return s
class LazyRadynData:
'''
Container for the RADYN simulation data lazily loaded from a CDF file.
Variables are loaded as required when used as atrtributes.
The loaded variables are added as attributes to constructed instance.
Parameters
----------
cdfPath : str
The complete path to the CDF file
parseFilenameParams : bool, optional
If the filename is in the F-CHROMA format then parse the heating parameters (default True)
Attributes
----------
varName : np.ndarray
The variables that have been loaded
'''
def __init__(self, cdfPath, parseFilenameParams=True):
self.cdf = cdflib.CDF(cdfPath)
for var in allVars.keys():
# Just load all the scalar values, they're small enough, and most of them are
# important
if typeDict[var] == 'Val':
setattr(self, var, self.cdf.varget(var).item())
atts = self.cdf.globalattsget()
if len(atts) > 0:
for k in atts:
setattr(self, k, str(atts[k]))
cdfFilename = os.path.basename(cdfPath)
if cdfFilename != 'radyn_out.cdf':
if parseFilenameParams and cdfFilename.startswith('radyn_out.'):
p = cdfFilename[cdfFilename.find('.')+1:]
params = p.split('_')
self.filenameParams = params
if len(params) != 6:
raise ValueError('FilenameParams should contain 6 underscore seperated terms.\n'
'See FCHROMA simulation documentation for examples.\n'
'If you don\'t want to parse these then call with parseFilenameParams=False')
self.startingModelAtmosphere = params[0]
self.beamSpectralIndex = float(params[1][1:])
self.totalBeamEnergy = float(params[2])
self.beamPlulseType = params[3]
self.cutoffEnergy = params[4]
self.beamType = params[5]
def __getattr__(self, name):
if name in allVars:
var = self.cdf.varget(name)
setattr(self, name, var)
return getattr(self, name)
else:
raise AttributeError('Unknown attribute "%s" requested' % name)
def load_var(self, name):
'''
Load a variable that may not be present in the help documentation,
and hence not auto-loaded by __getattr__. Variable will be loaded
into self.name.
Raises : ValueError if variable with name not present in CDF.
'''
var = self.cdf.varget(name)
setattr(self, name, var)
def index_convention(self):
'''
Return a string explaining the RADYN index convention.
'''
return index_convention()
def var_info(self, var):
'''
Return a string explaining the specified variable(s) and their axes.
Parameters
----------
var : str or list of str
Specifies the variable to return information about.
'*' will return information on all variables, and a list of variables can also be requested.
'''
return var_info(var)
def __repr__(self):
s = 'RadynData: Variables loaded:\n'
for k in self.__dict__.keys():
info = maybe_lookup(allVars, k)
if info is None:
s += (' %s\n' % k)
else:
s += (' %s: %s\n' % (info[0], info[1].replace('\n', '\n ')))
return s
def close(self):
'''
Closes the CDF file, if it is open. Should only be called when the object is no longer required.
'''
if self.cdf is not None:
self.cdf.close()
self.cdf = None
def __del__(self):
self.close()
| 9,570 | 33.677536 | 113 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/RadynKeyFile.py
|
import os
import cdflib
import pickle
from radynpy.cdf.auxtypes import Val, Array
import numpy as np
cdfFile = '/data/crisp/RadynGrid/radyn_out.val3c_d3_1.0e11_t20s_10kev_fp'
res = {}
cdf = cdflib.CDF(cdfFile)
for k in cdf.cdf_info()['zVariables']:
var = cdf.varget(k)
if len(var.shape) == 0:
# When the shape is () we have a 0-d ndarray in cdf[k][...].
# The only way to get the single value is with .item()
res[k] = Val(var.item())
else:
res[k] = Array(var.shape)
# Add ntime, because it's a useful value
res['ntime'] = Val(cdf.varget('time').shape[0])
# And max number of atomic levels
res['maxatomlevels'] = Val(cdf.varget('nk').max())
cdf.close()
with open('RadynKeySizes.pickle', 'wb') as p:
pickle.dump(res, p)
| 780 | 25.931034 | 73 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/__init__.py
|
from .CdfLoader import RadynData, LazyRadynData
| 47 | 47 | 47 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/auxtypes/AuxTypes.py
|
class Val:
def __init__(self, val):
self.val = val
def __repr__(self):
return 'Val(%s)' % self.val.__repr__()
class Array:
def __init__(self, shape):
self.shape = shape
def __repr__(self):
return 'Array%s' % self.shape.__repr__()
def idl_repr(self):
s = '['
for i, idx in enumerate(self.shape):
assert(type(idx) == str)
s += idx
if i != len(self.shape)-1:
s +=','
s += ']'
return s
class Unknown:
def __init__(self):
pass
def __repr__(self):
return 'Unknown'
| 626 | 18.59375 | 48 |
py
|
radynpy
|
radynpy-master/radynpy/cdf/auxtypes/__init__.py
|
from .AuxTypes import *
| 23 | 23 | 23 |
py
|
radynpy
|
radynpy-master/radynpy/matsplotlib/ContribFn.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import pchip_interpolate, interp1d, griddata
from skimage.exposure import equalize_adapthist, equalize_hist
from palettable.colorbrewer.sequential import Blues_9, PuBu_9
from colour import Color
from matplotlib.ticker import MaxNLocator, LogLocator, ScalarFormatter
import warnings
from radynpy.matsplotlib import xt_calc
def contrib_fn(cdf, kr, tStep=0, yRange=[-0.08, 2.5], vRange=[300.0, -300.0], mu=-1,
heatPerParticle=False, wingOffset=None,
dvMouseover=False, withBackgroundOpacity=True, colors={},
tightenLayout=True, printMiscTopData=True, stdoutInfo=False, returnData=False,
opctabPath=None):
'''
Plots the contribution function of a transition. Based on the original 4
panel Mats Carlsson plot, but using the 6 panel layout of Paulo Simoes.
Gratefully developed from the IDL scripts of M. Carlsson, G. Kerr, and P. Simoes.
This function produces a plot in matplotlib, that will then need to be shown or printed.
Parameters
----------
cdf : LazyRadynData or RadynData
The RadynData object containing the data to compute the contribution function from.
Due to the number of variables required, it is easier to use a LazyRadynData, if possible.
kr : int
The transition index, see `index_convention` and `var_info` on the RadynData object for
more information about what this means.
tStep : int, optional
The time index at which to compute the contribution function see 't' in the `index_convention`
of RadynData for more information. (default: 0)
yRange : list of float, optional
The height range to plot over, in Mm. (default: [-0.08, 2.5])
vRange : list of float, optional
The velocity range to plot over (converted to a wavelength range around the transition core). (default: [-300, 300])
mu : int, optional
Index of the mu ray to be used. Default is the closest to the normal to the atmosphere. (default: -1)
heatPerParticle : bool, optional
Display heating per particle or per unit volume. (default: False)
wingOffset : float, optional
The offset of the slice to take through the wing in Angstrom from the line core.
dvMouseover : bool, optional
Display the mouseover x-values in units of velocity (default: False, angstrom are used).
withBackgroundOpacity : bool, optional
Include the background opacity at this wavelength in the calculation. (default: True)
colors : dict[str, str], optional
Dictionary of line names and the hex code to color them.
Keys and default values:
'tau': Color('OrangeRed').hex,
'vz': Color('Chocolate').hex,
'sv': Color('OrangeRed').hex,
'bb': Color('LightSeaGreen').hex,
'pop1': Blues_9.mpl_colors[3],
'pop2': Blues_9.mpl_colors[6],
'ne': Color('Coral').hex,
'line': Color('LightSeaGreen').hex,
'temp': Color('OrangeRed').hex,
'cf': Blues_9.mpl_colors[6],
'heat': Color('Salmon').hex,
'core': Blues_9.mpl_colors[6],
'wing': Blues_9.mpl_colors[3]
tightenLayout : bool, optional
Tighten the whitespace around the plots. (default: True)
printMiscTopData : bool, optional
Add data describing the plot to the top of the figure (line, frequency, timestep...). (default: True)
stdoutInfo : bool, optional
Print progress information to stdout. (default: False)
returnData : bool, optional
Return a dictionary of the data computed to produce the plots. (default: False)
opctab : str, optional
Path to non-standard opctab.dat if needed. (default: False)
'''
with warnings.catch_warnings():
# There are a few log10(0) in here which naturally raise annoying RuntimeWarnings
# They're not a real problem though, so we'll ignore them for this function
warnings.simplefilter('ignore', RuntimeWarning)
# This function works in ~ 2 parts, computation, and then lots and lots of plotting
# debug info
if stdoutInfo:
print('=============================')
print('atom: %s' % cdf.atomid[cdf.ielrad[kr]-1].upper())
print('transition: %d -> %d' % (cdf.jrad[kr], cdf.irad[kr]))
print('angle: %f degrees' % np.rad2deg(np.arccos(cdf.zmu[mu])))
print('=============================')
nDep = cdf.tg1.shape[1]
iTrans = cdf.irad[kr] - 1
jTrans = cdf.jrad[kr] - 1
iel = cdf.ielrad[kr] - 1
# Line intensity data
outMu = cdf.outint[tStep,:,mu,:]
x_ny, tauq_ny = xt_calc(cdf, tStep, iel, kr, withBackgroundOpacity=withBackgroundOpacity, opctabPath=opctabPath)
dtau = np.zeros((nDep, cdf.nq[kr]))
dtau[1:,:] = tauq_ny[1:nDep,:] - tauq_ny[:nDep-1,:]
# Source function from level pops in CDF file
sll = cdf.a[kr] * cdf.n1[tStep, :, jTrans, iel] \
/ (cdf.bij[kr] * cdf.n1[tStep, :, iTrans, iel] - cdf.bji[kr] * cdf.n1[tStep, :, jTrans, iel])
nq = cdf.nq[kr]
wavelength = cdf.alamb[kr] / (cdf.q[:nq, kr] * cdf.qnorm * 1e5 / cdf.cc + 1)
# POSITIVE velocity is blueshift, towards shorter wavelength
# NEGATIVE velocity is downflow, redshift, towards longer wavelength
dVel = cdf.q[:nq, kr] * cdf.qnorm # frequency in km/s
vMax = np.abs(vRange).max()
wlIdxs = np.argwhere(np.abs(dVel) < vMax).ravel()
nFreq = len(wlIdxs)
# some comments courtesy of PJAS!
# x_ny -> opacity (z1 = X*mu)
# tauq_ny -> tau (z2 = tau * exp(-tau / mu))
# z3 = 1 / tau
# z4 = S (source fn)
ny0 = wlIdxs[0]
z1Local = x_ny[:, wlIdxs] / cdf.zmu[mu]
z2 = tauq_ny[:, wlIdxs] * np.exp(-tauq_ny[:, wlIdxs] / cdf.zmu[mu])
z3 = 1.0 / tauq_ny[:, wlIdxs]
z4 = np.tile(sll, (nFreq, 1)).T
y = (cdf.z1[tStep, :nDep-1] + cdf.z1[tStep, 1:nDep]) * 0.5e-8
y = np.insert(y, 0, 2 * y[0] - y[1])
# Find the tau=1 line using a cubic hermite spline interpolation that avoids overshoot
tau1 = np.zeros(nFreq)
for ny in range(nFreq):
tau1[ny] = pchip_interpolate(np.log10(tauq_ny[1:, ny+ny0]), y[1:], 0.0)
# Build a silly 4D matrix to contain the data... this would probably just as well be
# a list of 4 matrices given the way we use it
zTot = np.zeros((nFreq, nDep, 2, 2))
zTot[:, :, 0, 0] = (z1Local * z3).T # Xv / tau
zTot[:, :, 0, 1] = z2.T # tau * exp(-tau)
zTot[:, :, 1, 0] = z4.T # Sv, uniform across wavelength
zTot[:, :, 1, 1] = (z1Local * z2 *z3 *z4).T # contrib fn
x = dVel[wlIdxs] # x-axis shrunk to plotting range
# Find the indices for the range we want to plot
iwy = np.argwhere((y > np.min(yRange)) & (y < np.max(yRange))).flatten()
iwx = np.argwhere((x > np.min(vRange)) & (x < np.max(vRange))).flatten()
vRange = [np.max(x[iwx]), np.min(x[iwx])] # invert x axis
# Find the deltav / frequency indices for the core and wing indices,
# where the wingOffset is specified in Angstrom from the line core.
coreIdx = np.argmin(np.abs(dVel[wlIdxs]))
if wingOffset is None:
wingIdx = np.argmin(np.abs(dVel[wlIdxs] - vMax / 2))
else:
wingIdx = np.argmin(np.abs(wavelength[wlIdxs] - cdf.alamb[kr] - wingOffset))
# Print wing choice if debugging in stdout
if stdoutInfo:
print('Wing index: %d' % wingIdx)
print('Wing wavelength: %f Angstrom' % (wavelength[wlIdxs[wingIdx]] - cdf.alamb[kr]))
# Plotting code starts here
# returns the radiation temperature for arrays of specific intensity and wavelength (Angstrom)
def radiation_temperature(intens, lamb):
c = 2.99792458e10
h = 6.626176e-27
k = 1.380662e-16
l = lamb*1e-8
tRad = h * c / k / l / np.log(2.0 * h * c / intens / (l**3) + 1.0)
return tRad
# Set up our default colours, non-hideous
defaultColors = {
'tau': Color('OrangeRed').hex,
'vz': Color('Chocolate').hex,
'sv': Color('OrangeRed').hex,
'bb': Color('LightSeaGreen').hex,
'pop1': Blues_9.mpl_colors[3],
'pop2': Blues_9.mpl_colors[6],
'ne': Color('Coral').hex,
'line': Color('LightSeaGreen').hex,
'temp': Color('OrangeRed').hex,
'cf': Blues_9.mpl_colors[6],
'heat': Color('Salmon').hex,
'core': Blues_9.mpl_colors[6],
'wing': Blues_9.mpl_colors[3],
}
choose_color = lambda col: colors[col] if col in colors else defaultColors[col]
tauColor = choose_color('tau')
vzColor = choose_color('vz')
svColor = choose_color('sv')
bbColor = choose_color('bb')
pop1Color = choose_color('pop1')
pop2Color = choose_color('pop2')
neColor = choose_color('ne')
lineColor = choose_color('line')
tempColor = choose_color('temp')
cfColor = choose_color('cf')
heatColor = choose_color('heat')
coreColor = choose_color('core')
wingColor = choose_color('wing')
# Code to add the wavelength axis to plots, in addition to the delta-vel axis
def add_wl_axis(ax, label=False, numbers=False):
wlAx = ax.twiny()
lambdaRange = -np.array(ax.get_xlim())*cdf.alamb[kr] / cdf.cc * 1e5
# wlAx.set_xlim(lambdaRange[-1], lambdaRange[0])
wlAx.set_xlim(lambdaRange)
wlAx.tick_params('x', direction='in')
if label:
# wlAx.set_xlabel(r'$\Delta\lambda$ [$\AA$]')
wlAx.set_xlabel(r'$\lambda-\lambda_0$ [$\AA$]')
else:
wlAx.tick_params('x', labeltop=numbers)
if dvMouseover:
axDupe = ax.figure.add_axes(ax.get_position(True), sharex=ax, sharey=ax, frameon=False)
axDupe.xaxis.set_visible(False)
axDupe.yaxis.set_visible(False)
# add the vertical lines for the core and wings to the image plots
def add_core_wing_lines(ax):
ax.axvline(dVel[wlIdxs[coreIdx]], c=coreColor)
ax.axvline(dVel[wlIdxs[wingIdx]], c=wingColor)
# Add a customised legend, with no box, or line segment, but the text with the line's colour
def add_legend(ax, loc='lower right'):
leg = ax.legend(loc=loc, handlelength=0, frameon=False, labelspacing=0.0)
for line, text in zip(leg.get_lines(), leg.get_texts()):
text.set_color(line.get_color())
# Add a simple black label to the upper left corner of an image plot
def add_image_label(ax, label):
ax.annotate(label, xy=(0.04, 0.89), xycoords=ax.transAxes)
# make figure
fig, ax = plt.subplots(3,2, figsize=(8, 10), sharey=True, constrained_layout=True)
## First image plot: emissivity
ix = 0
iy = 0
z = np.copy(zTot[np.ix_(iwx, iwy)][:,:,ix, iy])
z = np.transpose(z, (1,0))
zi = np.log10(z)
ziMax = np.max(zi[np.isfinite(zi)])
ziMin = np.min(zi[np.isfinite(zi)])
zi = np.clip(zi, ziMin, ziMax)
# Use histogram equalisation to improve contrast - thanks Paulo!
zi = equalize_hist(zi)
# These plots are like images on irregular "pixel" centres, pcolormesh can handle this though,
# if we work out where the edges are
xEdges = 0.5 * (x[iwx][:-1] + x[iwx][1:])
xEdges = np.insert(xEdges, 0, x[iwx][0])
xEdges = np.insert(xEdges, -1, x[iwx][-1])
yEdges = 0.5 * (y[iwy][:-1] + y[iwy][1:])
yEdges = np.insert(yEdges, 0, y[iwy][0])
yEdges = np.insert(yEdges, -1, y[iwy][-1])
ax[0,0].pcolormesh(xEdges, yEdges, zi, cmap=PuBu_9.mpl_colormap)
# Link all the subplot axes and invert the x-axis for readability
ax[0,0].invert_xaxis()
ax[0,0].get_shared_x_axes().join(ax[0,0], ax[1,0])
ax[0,0].get_shared_x_axes().join(ax[0,0], ax[2,0])
# Save xlim as it can be clobbered by plotting some of the lines
lim = ax[0,0].get_xlim()
# Add the fluid velocity and tau=1 line
ax[0,0].plot(cdf.vz1[tStep][iwy] * 1e-5, y[iwy], c=vzColor, ls='--', label=r'v$_\mathrm{z}$')
ax[0,0].plot(x, tau1, c=tauColor, label=r'$\tau_\nu$=1')
# Add the extra info
# We now add the wl axis at the end of the image plots, since there are so many
# things that seem to want to mess with the axis scale, and after plotting everything
# we can set it for good, then add these extra axes, as they are computed relative
# to the axis limits at the time of creation
# add_wl_axis(ax[0,0], label=True)
add_legend(ax[0,0], loc='upper right')
add_core_wing_lines(ax[0,0])
# Label and turn ticks in for space
ax[0,0].set_xlabel(r'$\Delta$v [km s$^{-1}$]')
ax[0,0].set_ylabel('Height (Mm)')
ax[0,0].tick_params('both', direction='in')
add_image_label(ax[0,0], r'$\chi_\nu$ / $\tau_\nu$')
# Second image plot: opacity
ix = 0
iy = 1
z = np.copy(zTot[np.ix_(iwx, iwy)][:,:,ix, iy])
z = np.transpose(z, (1,0))
zi = z
ziMax = np.max(zi[np.isfinite(zi)])
ziMin = np.min(zi[np.isfinite(zi)])
zi = np.clip(zi, ziMin, ziMax)
# Same again
ax[1,0].pcolormesh(xEdges, yEdges, zi, cmap=PuBu_9.mpl_colormap)
ax[1,0].plot(cdf.vz1[tStep][iwy] * 1e-5, y[iwy], c=vzColor, ls='--')
ax[1,0].plot(x, tau1, c=tauColor)
# add_wl_axis(ax[1,0], label=True)
add_core_wing_lines(ax[1,0])
ax[1,0].set_xlabel(r'$\Delta$v [km s$^{-1}$]')
ax[1,0].set_ylabel('Height (Mm)')
ax[1,0].tick_params('both', direction='in')
add_image_label(ax[1,0], r'$\tau_\nu$ exp($-\tau_\nu$)')
# Third image plot: contribution functions and line profile
ix = 1
iy = 1
z = np.copy(zTot[np.ix_(iwx, iwy)][:,:,ix, iy])
z = np.transpose(z, (1,0))
zi = np.log10(z)
ziMax = np.max(zi[np.isfinite(zi)])
ziMin = np.min(zi[np.isfinite(zi)])
zi = np.clip(zi, ziMin, ziMax)
# Same again
zi = equalize_hist(zi)
ax[2,0].pcolormesh(xEdges, yEdges, zi, cmap=PuBu_9.mpl_colormap)
ax[2,0].plot(cdf.vz1[tStep][iwy] * 1e-5, y[iwy], c=vzColor, ls='--')
ax[2,0].plot(x, tau1, c=tauColor)
# Adjust line profile to fill plot
lineProfile = cdf.outint[tStep, 1:cdf.nq[kr]+1, mu, kr]
lineProfile -= lineProfile.min()
lineProfile /= lineProfile.max()
lineProfile *= (y[iwy][0] - y[iwy][-1])
lineProfile += y[iwy][-1]
ax[2,0].plot(dVel, lineProfile, c=lineColor, label='Line Profile')
# axes and labelling
# add_wl_axis(ax[2,0], label=True)
add_core_wing_lines(ax[2,0])
ax[2,0].set_xlabel(r'$\Delta$v [km s$^{-1}$]')
ax[2,0].set_ylabel('Height (Mm)')
ax[2,0].tick_params('both', direction='in')
add_image_label(ax[2,0], r'C$_\mathrm{I}$')
add_legend(ax[2,0], loc='upper right')
# restore xlim from ages ago and add the wavelength axes
ax[0,0].set_xlim(lim)
add_wl_axis(ax[0,0], label=True)
add_wl_axis(ax[1,0], numbers=True)
add_wl_axis(ax[2,0], numbers=True)
# The "normal" (line) plots auto-adjust the y range, so we want to preserve it and reapply it at the end
yLim = ax[0,0].get_ylim()
# Plot 4: Level populations
pop1 = np.log10(cdf.n1[tStep, iwy, iTrans, iel])
pop2 = np.log10(cdf.n1[tStep, iwy, jTrans, iel])
ne = np.log10(cdf.ne1[tStep, iwy])
if iel == 0:
def make_label(i):
ionI = 'I' * cdf.ion[i, iel]
ionI = r'$_\mathrm{' + ionI + '}$'
return 'H ' + ionI + ' ' + cdf.label[i, iel].strip()
else:
def make_label(i):
l = cdf.label[i, iel].strip().split(' ')
l[0] = l[0][0].upper() + l[0][1:]
l[1] = r'$_\mathrm{' + l[1].upper() + '}$'
return ' '.join(l)
labelI = make_label(iTrans)
labelJ = make_label(jTrans)
popRange = (lambda v: [np.min(v), np.max(v)])(np.stack((pop1, pop2)))
ax[0,1].plot(pop1, y[iwy], c=pop1Color, label=labelI)
ax[0,1].plot(pop2, y[iwy], c=pop2Color, label=labelJ)
# dummy plot for legend
ax[0,1].plot(pop1[1], y[iwy][1], c=neColor, label='electron')
neAx = ax[0,1].twiny()
neAx.plot(ne, y[iwy], c=neColor)
ax[0,1].tick_params('both', direction='in')
neAx.tick_params('x', direction='in')
ax[0,1].set_xlabel(r'log$_{10}$ ion density [cm$^{-3}$]')
neAx.set_xlabel(r'log$_{10}$ electron density [cm$^{-3}$]')
add_legend(ax[0,1], loc='upper right')
# Plot 5: Radiation Temperatures, and Core and Wing opacities
# Source function
sv0 = sll
# Black-body function
# bb0 = blackbody_nu(Q(cdf.alamb[kr], 'Angstrom'), cdf.tg1[tStep, iwy])
# bbTemp = np.log10(radiation_temperature(bb0.value, cdf.alamb[kr]))
# The radiation temperautre of a blackbody is simply its temperature,
# by definition, so we don't need the blackbody stuff
bbTemp = np.log10(cdf.tg1[tStep])
svTemp = np.log10(radiation_temperature(sv0, cdf.alamb[kr]))
ax[1,1].plot(bbTemp[iwy], y[iwy], c=bbColor, label='T')
ax[1,1].plot(svTemp[iwy], y[iwy], c=svColor, label=r'T(S$_\nu$)')
# dummy plots for legend
ax[1,1].plot(bbTemp[iwy][1], 0, c=coreColor, label=r'$\tau$ core')
ax[1,1].plot(bbTemp[iwy][1], 0, c=wingColor, label=r'$\tau$ wing')
ax[1,1].tick_params('both', direction='in')
tauAx = ax[1,1].twiny()
tauAx.semilogx(tauq_ny[iwy, wlIdxs[coreIdx]], y[iwy], c=coreColor)
tauAx.semilogx(tauq_ny[iwy, wlIdxs[wingIdx]], y[iwy], c=wingColor)
tauAx.set_xlim([0.5e-5, 200])
tauAx.tick_params('x', which='both', direction='in')
tauAx.xaxis.set_major_locator(LogLocator(numticks=4))
ax[1,1].set_xlabel(r'log$_{10}$ Temperature [K]')
tauAx.set_xlabel(r'$\tau$')
add_legend(ax[1,1])
# Plot 6: Heating, and Core and Wing Contribution Functions
heat = cdf.bheat1[tStep, iwy]
if heatPerParticle:
# Only using hydrogen density
heat /= cdf.n1[tStep,:,:6,0].sum(axis=1)[iwy]
contFn = np.copy(z)
contFn = np.log10(contFn / np.max(contFn))
contFnRange = np.maximum([np.nanmin(contFn), np.nanmax(contFn)], np.nanmax(contFn) - 10.2)
ax[2,1].plot(contFn[:,coreIdx], y[iwy], c=coreColor, label=r'Core C$_\mathrm{I}$')
ax[2,1].plot(contFn[:,wingIdx], y[iwy], c=wingColor, label=r'Wing C$_\mathrm{i}$')
# dummy plot for legend
ax[2,1].plot(0, 0, c=heatColor, label='Heat')
ax[2,1].set_xlim(contFnRange)
# We have to scale heat manually since matplotlib doesn't realise it needs put the offset
# (e.g. x1e-8) at the top, and there's no way to move it
heatAx = ax[2,1].twiny()
if 1e-2 <= np.max(heat) <= 1e3:
# Don't need to scale
heatAx.plot(heat, y[iwy], c=heatColor)
offsetStr = ''
else:
power = np.floor(np.log10(np.max(heat)))
scale = 10**power
heat /= scale
heatAx.plot(heat, y[iwy], c=heatColor)
offsetStr = '10$^\mathrm{%d}$ ' % power
ax[2,1].tick_params('both', direction='in')
heatAx.tick_params('x', direction='in')
ax[2,1].set_xlabel(r'log$_{10}$ C$_\mathrm{I}$ (normalised)')
if heatPerParticle:
heatAx.set_xlabel(r'Heating per H particle [%serg s$^{-1}$]' % offsetStr)
else:
heatAx.set_xlabel(r'Heating [%serg s$^{-1}$ cm$^{-3}$]' % offsetStr)
add_legend(ax[2,1])
# Add the timestep, viewing angle and Core Wavelength to the top of the plot
if printMiscTopData:
timeVal = cdf.time[tStep]
muVal = cdf.zmu[mu]
lineCoreVal = cdf.alamb[kr]
ax[0,0].annotate(r't = %.1f s $\mu$ = %.4f''\n'r'Line Core: %.1f$\AA$' % (timeVal, muVal, lineCoreVal),
xy=(0.0,1.1), xycoords=('axes fraction', 'axes fraction'), ha='left', va='bottom')
# reapply clobbered yLim
ax[0,0].set_ylim(yLim)
# Tighten the layout to maximise graph space
if tightenLayout:
fig.set_constrained_layout_pads(h_pad=20.0/72.0, w_pad=0.01,
hspace=-0.18, wspace=0.01)
if returnData:
out = {'atomId': cdf.atomid[iel],
'kr': kr,
'iel': iel,
'levels': [jTrans, iTrans],
'labels': [labelI, labelJ],
'emissivity': zTot[np.ix_(iwx, iwy)][:,:, 0, 0],
'opacity': zTot[np.ix_(iwx, iwy)][:,:, 0, 1],
'contFn': zTot[np.ix_(iwx, iwy)][:,:, 1, 1],
'tau1': tau1,
'dVel': dVel,
'xEdges': xEdges,
'yEdges': yEdges,
'y': y
}
return out
| 21,850 | 43.960905 | 124 |
py
|
radynpy
|
radynpy-master/radynpy/matsplotlib/__init__.py
|
from .Opacity import OpcFile, XtNyCalc, xt_calc
from .ContribFn import contrib_fn
| 81 | 40 | 47 |
py
|
radynpy
|
radynpy-master/radynpy/matsplotlib/Opacity.py
|
import numpy as np
from radynpy.utils import voigt_H, gaunt_bf_h, hydrogen_absorption, hydrogen_bf_profile
import struct
import os
here = os.path.dirname(os.path.abspath(__file__)) + '/'
# Almost everything in here is a verbatim translation of the Radyn IDL routines
# supplied by M. Carlsson, many improvements could be made for pythonic-ness
class OpcFile:
def __init__(self, path=None):
if path is None:
path = here+'opctab.dat'
mtPts = 39
xc = np.zeros((4, mtPts, 20))
with open(path, 'rb') as opc:
opc.seek(0, 2)
fileSize = opc.tell()
opc.seek(0)
sumh = np.fromfile(opc, dtype='float64', count=1).item()
grph = np.fromfile(opc, dtype='float64', count=1).item()
xv = np.fromfile(opc, dtype='float64', count=mtPts)
yv = np.fromfile(opc, dtype='float64', count=20)
nelsr = np.fromfile(opc, dtype='int32', count=1).item()
# text is just padding here, it's empty
text = ''.join([s.decode('UTF-8') for s in struct.unpack('s'*(15*80), opc.read(15*80))])
cel = ''.join([s.decode('UTF-8') for s in struct.unpack('s'*(2 * nelsr), opc.read(2 * nelsr))])
cel = [cel[i:i+2] for i in range(0,len(cel),2)]
abund = np.fromfile(opc, dtype='float64', count=nelsr)
awi = np.fromfile(opc, dtype='float64', count=nelsr)
loopmx = np.fromfile(opc, dtype='int32', count=1).item()
xl = np.fromfile(opc, dtype='float64', count=loopmx)
wavel = xl
recNum = 1
xcs = []
while True:
if fileSize - opc.tell() <= xc.shape[0] * xc.shape[1] * xc.shape[2] * 8:
break
opc.seek(3120*8*recNum)
record = np.fromfile(opc, dtype='float64', count=3120).reshape((xc.shape[2], xc.shape[1], xc.shape[0])).T
xcs.append(record)
recNum += 1
self.sumh = sumh
self.grph = grph
self.xv = xv
self.yv = yv
self.nelsr = nelsr
self.text = text
self.cel = cel
self.abund = abund
self.awi = awi
self.loopmx = loopmx
self.wavel = wavel
self.xcs = xcs
def roptab(self, tg1, ne1, irec):
kB = 1.380662e-16
alge = 1.0 / np.log(10.0)
nDep = tg1.shape[0]
y = np.zeros((nDep, 4))
y1 = np.zeros((nDep, 4))
y2 = np.zeros((nDep, 4))
y12 = np.zeros((nDep, 4))
v = np.zeros(nDep)
dvdt = np.zeros(nDep)
dvdne = np.zeros(nDep)
xcs = self.xcs
if irec == 4 or self.i1 is None:
i1 = []
i2 = []
theta = 5040.0 / tg1
pelg = np.log10(ne1 * kB * tg1)
for k in range(nDep):
leftIdx = np.searchsorted(self.xv, theta[k])
if leftIdx == 0 or leftIdx == len(self.xv):
print('roptab: T outside range')
i1.append(leftIdx)
leftIdx = np.searchsorted(self.yv, pelg[k])
if leftIdx == 0 or leftIdx == len(self.yv):
print('roptab: pe outside range')
i2.append(leftIdx)
self.i1 = np.array(i1) - 1
self.i2 = np.array(i2) - 1
i1 = self.i1
i2 = self.i2
dx1 = self.xv[i1+1] - self.xv[i1]
dx2 = self.yv[i2+1] - self.yv[i2]
self.dx1 = dx1
self.dx2 = dx2
self.theta = theta
self.pelg = pelg
else:
i1 = self.i1
i2 = self.i2
dx1 = self.dx1
dx2 = self.dx2
pelg = self.pelg
theta = self.theta
# read record
xc = xcs[irec-2]
for k in range(nDep):
y[k,0] = xc[0, i1[k], i2[k]]
y[k,1] = xc[0, i1[k]+1, i2[k]]
y[k,2] = xc[0, i1[k]+1, i2[k]+1]
y[k,3] = xc[0, i1[k], i2[k]+1]
y1[k,0] = xc[1, i1[k], i2[k]]
y1[k,1] = xc[1, i1[k]+1, i2[k]]
y1[k,2] = xc[1, i1[k]+1, i2[k]+1]
y1[k,3] = xc[1, i1[k], i2[k]+1]
y2[k,0] = xc[2, i1[k], i2[k]]
y2[k,1] = xc[2, i1[k]+1, i2[k]]
y2[k,2] = xc[2, i1[k]+1, i2[k]+1]
y2[k,3] = xc[2, i1[k], i2[k]+1]
y12[k,0] = xc[3, i1[k], i2[k]]
y12[k,1] = xc[3, i1[k]+1, i2[k]]
y12[k,2] = xc[3, i1[k]+1, i2[k]+1]
y12[k,3] = xc[3, i1[k], i2[k]+1]
# bicubic interpolation, looks horrible, but copied from original and works
wt = np.zeros((16,16))
wt[:,0] = [ 1.,0.,-3., 2.,0.,0., 0., 0.,-3., 0., 9.,-6., 2., 0.,-6., 4.]
wt[:,1] = [ 0.,0., 0., 0.,0.,0., 0., 0., 3., 0.,-9., 6.,-2., 0., 6.,-4.]
wt[:,2] = [ 0.,0., 0., 0.,0.,0., 0., 0., 0., 0., 9.,-6., 0., 0.,-6., 4.]
wt[:,3] = [ 0.,0., 3.,-2.,0.,0., 0., 0., 0., 0.,-9., 6., 0., 0., 6.,-4.]
wt[:,4] = [ 0.,0., 0., 0.,1.,0.,-3., 2.,-2., 0., 6.,-4., 1., 0.,-3., 2.]
wt[:,5] = [ 0.,0., 0., 0.,0.,0., 0., 0.,-1., 0., 3.,-2., 1., 0.,-3., 2.]
wt[:,6] = [ 0.,0., 0., 0.,0.,0., 0., 0., 0., 0.,-3., 2., 0., 0., 3.,-2.]
wt[:,7] = [ 0.,0., 0., 0.,0.,0., 3.,-2., 0., 0.,-6., 4., 0., 0., 3.,-2.]
wt[:,8] = [ 0.,1.,-2., 1.,0.,0., 0., 0., 0.,-3., 6.,-3., 0., 2.,-4., 2.]
wt[:,9] = [ 0.,0., 0., 0.,0.,0., 0., 0., 0., 3.,-6., 3., 0.,-2., 4.,-2.]
wt[:,10]= [ 0.,0., 0., 0.,0.,0., 0., 0., 0., 0.,-3., 3., 0., 0., 2.,-2.]
wt[:,11]= [ 0.,0.,-1., 1.,0.,0., 0., 0., 0., 0., 3.,-3., 0., 0.,-2., 2.]
wt[:,12]= [ 0.,0., 0., 0.,0.,1.,-2., 1., 0.,-2., 4.,-2., 0., 1.,-2., 1.]
wt[:,13]= [ 0.,0., 0., 0.,0.,0., 0., 0., 0.,-1., 2.,-1., 0., 1.,-2., 1.]
wt[:,14]= [ 0.,0., 0., 0.,0.,0., 0., 0., 0., 0., 1.,-1., 0., 0.,-1., 1.]
wt[:,15]= [ 0.,0., 0., 0.,0.,0.,-1., 1., 0., 0., 2.,-2., 0., 0.,-1., 1.]
d1 = dx1
d2 = dx2
# pack temporary x
x = np.zeros((nDep, 16))
for i in range(4):
x[:,i] = y[:,i]
x[:,i+4] = y1[:,i] * d1
x[:,i+8] = y2[:,i] * d2
x[:,i+12] = y12[:,i]*d1*d2
rc = np.zeros((nDep, 4, 4))
for i in range(4):
for j in range(4):
l = i*4 + j
for m in range(16):
rc[:,i,j] += wt[l,m] * x[:, m]
t = (theta - self.xv[i1]) / dx1
u = (pelg - self.yv[i2]) / dx2
for l in range(3, -1, -1):
v = t*v + ((rc[:,l,3]*u + rc[:,l,2])*u + rc[:,l,1])*u + rc[:,l,0]
dvdt = dvdt*u + (3.0 * rc[:,3,l]*t + 2.0*rc[:,2,l])*t + rc[:,1,l]
dvdne = t*dvdne + (3.0*rc[:,l,3]*u + 2.0*rc[:,l,2])*u + rc[:,l,1]
v = 10**v
dvdne = dvdne/dx2 * v
dvdt = -dvdt/dx1*v/alge*theta+dvdne
return {'v': v, 'dvdt': dvdt, 'dvdne': dvdne, 'wavel': self.wavel}
class XtNyCalc:
def __init__(self, cdf, t, iel, kr, dnyd, withBackgroundOpacity=True, opctabPath=None):
self.cdf = cdf
self.t = t
self.iel = iel
self.kr = kr
self.dnyd = dnyd
self.withBackgroundOpacity = withBackgroundOpacity
self.i = cdf.irad[kr] - 1
self.j = cdf.jrad[kr] - 1
self.vel = cdf.vz1[t, :] * 1e-5 / cdf.qnorm
self.nDep = cdf.tg1.shape[1]
xlamb = cdf.alamb[kr]
xlamb5 = 5000.0
temp = cdf.tg1[t]
ne = cdf.ne1[t]
nh = cdf.n1[t,:,:,0]
toth = cdf.n1[t,:,:,0].sum(axis=1)
opcFile = OpcFile(path=opctabPath)
opc = opcFile.roptab(temp, ne, 4)
wavel = opc['wavel']
xnorm5 = opc['v'] * toth
xconth, xconth_lte = hydrogen_absorption(xlamb5, 0, temp, ne, nh)
xnorm5 += xconth
w2 = 1.0 / (5000.0*5000.0)
w4 = w2**2
scatrh = w4 * (5.799e-13+w2*(1.422e-6+w2*2.784))*nh[:,0]
scatne = 6.655e-25 * ne
xnorm5 = xnorm5 + scatrh + scatne
xnormtCurrent = np.copy(xnorm5)
iw = np.argwhere(np.abs(xlamb - wavel) < 0.5)
if len(iw) == 0:
raise ValueError('xlamb not found in opctab, wavelength='+repr(cdf.alamb[kr]))
opcm = opcFile.roptab(temp, ne, iw[0].item()+4)
xcontm = opcm['v'] * toth
xconth, xconth_lte = hydrogen_absorption(xlamb, 0, temp, ne, nh)
w2 = 1.0 / (xlamb*xlamb)
w4 = w2**2
scatrh = w4 * (5.799e-13+w2*(1.422e-6+w2*2.784))*nh[:,0]
scatne = 6.655e-25 * ne
chi = xcontm + xconth +scatrh + scatne
xcont = chi / xnorm5
self.xcont = xcont
self.xnormtCurrent = xnormtCurrent
def xt_nycalc(self, ny):
cdf = self.cdf
t = self.t
kr = self.kr
i = self.i
j = self.j
iel = self.iel
vel = self.vel
dnyd = self.dnyd
if not cdf.cont[kr]:
# The 1.0 was xmu -- holdover from mhd
v = (cdf.q[ny, kr] - 1.0 * vel) / dnyd
h = voigt_H(cdf.adamp[t, :, kr], v)
# h = radyn_voigt(cdf.adamp[t, :, kr], v)
phi = h / (dnyd * np.sqrt(np.pi))
gijk = cdf.g[i, iel] / cdf.g[j, iel]
hn3c2 = cdf.a[kr] / cdf.bji[kr]
z = cdf.n1[t, :, i, iel] - gijk * cdf.n1[t, :, j, iel]
alpha = cdf.bij[kr] * phi * cdf.hny4p
xlamb = cdf.alamb[kr]
else:
raise NotImplementedError('Not Implemented for continua')
x = z * alpha / self.xnormtCurrent
if self.withBackgroundOpacity:
x += self.xcont
tauq = np.zeros(self.nDep)
tauq[0] = x[0] * 0.0 # tau[0] -- which should always be 0
for k in range(1, self.nDep):
tauq[k] = tauq[k-1] + 0.5 * (x[k] + x[k-1]) * (cdf.tau[t,k] - cdf.tau[t,k-1])
return x, tauq, self.xnormtCurrent
def xt_calc(cdf, t, iel, kr, withBackgroundOpacity=True, opctabPath=None):
nDep = cdf.tg1.shape[1]
kB = 1.380662e-16
nq0 = cdf.nq[kr]
x_ny = np.zeros((nDep, nq0))
tauq_ny = np.zeros((nDep, nq0))
dnyd = np.sqrt(2 * kB * cdf.tg1[t, :] / cdf.awgt[iel]) * 1.0e-5 / cdf.qnorm
dnyd = np.sqrt(dnyd**2 + (cdf.vturb * 1e-5 / cdf.qnorm)**2)
xt = XtNyCalc(cdf, t, iel, kr, dnyd, withBackgroundOpacity=withBackgroundOpacity, opctabPath=opctabPath)
for ny in range(nq0):
x, tauq, xnorm = xt.xt_nycalc(ny)
x_ny[:,ny] = x * xnorm
tauq_ny[:,ny] = tauq
return x_ny, tauq_ny
| 10,596 | 35.167235 | 121 |
py
|
radynpy
|
radynpy-master/radynpy/utils/Utils.py
|
import numpy as np
from scipy import special
import warnings
def voigt_H(a, v):
'''
Voigt H function -- same as is used in Lightweaver.
'''
z = (v + 1j * a)
return special.wofz(z).real
def gaunt_bf_h(n, qf):
# ;+
# ; gaunt(nn,qf)
# ;
# ; computes the bound-free gaunt factors for
# ; hydrogen n=quantum number of level considered
# ; x=reciprocal wavelength in inverse mincrons
# ; qf = frequency
# ;
# ;-
# TODO(cmo): Replace with more general one underneath
warnings.warn('This function is to be replaced with the more general gaunt_bf', DeprecationWarning)
x = qf/2.99793e14
if n == 1:
gaunt = 1.2302628 + x*( -2.9094219e-3+x*(7.3993579e-6-8.7356966e-9*x) )+ (12.803223/x-5.5759888)/x
elif n == 2:
gaunt = 1.1595421+x*(-2.0735860e-3+2.7033384e-6*x)+ ( -1.2709045+(-2.0244141/x+2.1325684)/x )/x
elif n == 3:
gaunt = 1.1450949+x*(-1.9366592e-3+2.3572356e-6*x)+ ( -0.55936432+(-0.23387146/x+0.52471924)/x )/x
elif n == 4:
gaunt = 1.1306695+ x*( -1.3482273e-3+x*(-4.6949424e-6+2.3548636e-8*x) )+ ( -0.31190730+(0.19683564-5.4418565e-2/x)/x )/x
elif n == 5:
gaunt = 1.1190904+ x*( -1.0401085e-3+x*(-6.9943488e-6+2.8496742e-8*x) )+ ( -0.16051018+(5.5545091e-2-8.9182854e-3/x)/x )/x
elif n == 6:
gaunt = 1.1168376+ x*( -8.9466573e-4+x*(-8.8393133e-6+3.4696768e-8*x) )+ ( -0.13075417+(4.1921183e-2-5.5303574e-3/x)/x )/x
elif n == 7:
gaunt = 1.1128632+ x*( -7.4833260e-4+x*(-1.0244504e-5+3.8595771e-8*x) )+ ( -9.5441161e-2+(2.3350812e-2-2.2752881e-3/x)/x )/x
elif n == 8:
gaunt = 1.1093137+ x*( -6.2619148e-4+x*(-1.1342068e-5+4.1477731e-8*x) )+ ( -7.1010560e-2+(1.3298411e-2-9.7200274e-4/x)/x )/x
elif n == 9:
gaunt = 1.1078717+ x*( -5.4837392e-4+x*(-1.2157943e-5+4.3796716e-8*x) )+ ( -5.6046560e-2+(8.5139736e-3-4.9576163e-4/x)/x )/x
elif n == 10:
gaunt = 1.1052734+x*( -4.4341570e-4+x*(-1.3235905e-5+4.7003140e-8*x) )+( -4.7326370e-2+(6.1516856e-3-2.9467046e-4/x)/x )/x
else:
gaunt = 1.0
return gaunt
def gaunt_bf(wvls, n = 1, Z = 1):
'''
Calculates the bound-free Gaunt fn for a given wavelength and quantum
number.
M. J. Seaton (1960), Rep. Prog. Phys. 23, 313
This is the same calculation that is used in RH.
Parameters
----------
wvls : `array_like` (1D)
The wavelengths (Angstrom).
n : `int`, optional
Principal quantum number (default = 1).
Z : `int`, optional
Charge (default = 1).
Returns
-------
gbf : `array`
The bound-free Gaunt factors.
Graham Kerr, Feb 17th 2020
'''
wvls = np.asarray(wvls)
x = (6.626e-27 * 3e18) / wvls / (2.1799e-11 * Z**2)
x3 = x**(1.0/3.0)
nsqx = 1.0 / (n**2 * x)
gbf = 1.0 + 0.1728*x3 * (1.0 - 2.0*nsqx) - 0.0496*(x3)**2 * (1.0 - (1.0 - nsqx)*(2.0/3.0)*nsqx)
return gbf
def hydrogen_bf_profile(wl, z, i):
# ;+
# ; prfhbf(wl,z,i)
# ;
# ; absorption crossection profile for hydrogenic ion
# ;
# ;
# ; wl - wavelength
# ; z - nuclear charge
# ; i - level from which absorption takes place, FORTRAN numbering
# ;
# ;-
# ;
prfhbf = 0.0
wl0 = 911.7535278/(z*z)*i*i
if wl > wl0:
return 0.0
frq = 2.9979e18/wl
g = gaunt_bf_h(i,frq)
pr0 = 1.04476e-14*z*z*z*z
a5 = float(i)**5
wm = wl*1.0e-4
wm3 = wm*wm*wm
prfhbf = pr0*wm3*g/a5
return prfhbf
def hydrogen_absorption(xlamb, icont, temp, ne, nh): #xconth, xconth_lte):
# ;+
# ; abshyd,xlamb,icont,temp,nne,nh,xconth,xconth_lte
# ;
# ; gives total true absorption coefficient for all
# ; hydrogen-combinations in cm**2 per cm**3
# ; icont .ge. 0 will exclude hydrogen b-f continuum from level
# ; icont
# ; based on abshyd.f
# ;
# ;-
cc=2.99792e+10
bk=1.38066e-16
ee=1.60219e-12
hh=6.62618e-27
xnu=cc*1.0e8/xlamb
ex=np.exp(-1.438786e8/temp/xlamb) # stimulated emission correction
ndep = len(temp)
# xconth = np.zeros(ndep)
totnhi = nh[:,:5].sum(axis=1) # neutral hydrogen
# ;
# ; free-free contribution from hydrogen minus ion
# ; expressions from gray p. 149-150. note misprint of first
# ; term of f0 in gray
# ;
if xlamb > 3038.0 and xlamb < 91130.0:
x1=np.log10(xlamb)
x2=x1*x1
x3=x2*x1
f0=-31.63602+0.48735*x1+0.296585*x2-0.0193562*x3
f1=15.3126-9.33651*x1+2.000242*x2-0.1422568*x3
f2=-2.6117+3.22259*x1-1.082785*x2+0.1072635*x3
thlg=np.log10(5040./temp)
thlg2=thlg*thlg
abhmff=ne*bk*temp*10.0**(f0+f1*thlg+f2*thlg2)*totnhi
else:
abhmff=np.zeros(ndep)
# ;
# ; bound-free contribution from hydrogen minus ion
# ; expressions from gray p. 147-149
# ;
if xlamb > 1500.0 and xlamb < 20000.0:
if xlamb < 5250.0:
f0=-16.20450
f1=0.17280e-3
f2=0.39422e-7
f3=0.51345e-11
elif xlamb < 11250.0:
f0=-16.40383
f1=0.61356e-6
f2=-0.11095e-7
f3=0.44965e-11
elif xlamb < 20000.0:
f0=-15.95015
f1=-0.36067e-3
f2=0.86108e-7
f3=-0.90741e-11
crhmbf=f0+f1*(xlamb-8500.)+f2*(xlamb-8500.)**2+f3*(xlamb-8500.)**3
crhmbf=10.0**crhmbf
abhmbf=1.03526e-16*ne*crhmbf/temp**1.5* np.exp(0.754*ee/bk/temp)*totnhi*(1.0-ex)
else:
abhmbf=np.zeros(ndep)
# ;
# ; free-free contribution from hydrogen
# ; expressions from gray p. 146-147
# ;
gff=1.0+3.3512*xlamb**(-0.3333333)*(6.95e-9*xlamb*temp+0.5)
abhff=3.69e8/np.sqrt(temp)*ne/xnu/xnu/xnu*nh[:,5]*(1.0-ex)*gff
# ;
# ; bound-free contribution from hydrogen bound-free, high levels
# ; opacity from levels above nk(iel)-1 up to level 8 are included
# ; assuming lte populations relative to the continuum
# ;
zz=1.0
abhbf=np.zeros(ndep)
for i in range(6,9): #; FORTRAN level numbering
crhbf=hydrogen_bf_profile(xlamb,zz,i)
if crhbf > 0.0:
hnukt=157896./(i*i*temp)
const=4.1416e-16*ne*nh[:,5]/(temp*np.sqrt(temp))
pop=i*i*const*np.exp(hnukt)
abhbf=abhbf+crhbf*pop*(1.0-ex)
xconth_lte = abhmbf+abhmff+abhff+abhbf
# ;
# ; bound-free contribution from hydrogen, explicit levels
# ;
zz=1.0
for i in range(1,6):
if i != icont:
crhbf=hydrogen_bf_profile(xlamb,zz,i)
if crhbf > 0.0:
hnukt=157896./(i*i*temp)
const=4.1416e-16*ne*nh[:,5]/(temp*np.sqrt(temp))
pop=i*i*const*np.exp(hnukt)
abhbf=abhbf+crhbf*(nh[:,i-1]-pop*ex)
xconth_lte=xconth_lte+crhbf*pop*(1.0-ex)
xconth=abhmbf+abhmff+abhbf+abhff
return xconth, xconth_lte
def planck_fn(wvls, tg):
'''
Calculates the Planck fn in units of [erg / s / cm^2 / sr / AA] given
wavelength in angstrom and temperature in Kelvin.
e.g. to produce the Planck Fn at 5800K from 2000 to 10000 A every 1
angstrom:
```
wvls = np.arange(2000,10001,1,dtype=float)
bb = planck_fn(wvls, tg=5800.00)
```
Parameters
----------
wvls : `array_like` (1D)
The wavelengths (Angstrom).
tg : `array_like` (1D)
Gas temperature (K).
Returns
-------
bb : `array` (2D)
Black-body intensity as a squeezed 2D array of shape [len(wvls), len(tg)].
Graham Kerr, Feb 18th 2020
'''
# Convert to np array in case it is in input
# as a regular list
wvls = np.atleast_1d(np.asarray(wvls))
tg = np.atleast_1d(np.array(tg))
# Convert to wavelength in cm
w = wvls / 1.0e8
# Constants appropriate to cgs units.
c1 = 3.7417749e-05 # =2*pi*h*c**2
c2 = 1.4387687e0 # =h*c/k
bbflux = c1 / (w[:, None]**5 * (np.exp(c2 / tg[None, :] / w[:, None]) - 1.0))
bbflux *= 1e-8 / np.pi
return bbflux.squeeze()
| 8,398 | 30.456929 | 133 |
py
|
radynpy
|
radynpy-master/radynpy/utils/__init__.py
|
from .Utils import *
| 20 | 20 | 20 |
py
|
GigaSpeech
|
GigaSpeech-main/toolkits/athena/prepare_data.py
|
#!/usr/bin/python
# coding=utf-8
# Copyright (C) 2021 ATHENA AUTHORS; Shuaijiang Zhao; Xiaoning Lei
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# reference https://github.com/SpeechColab/GigaSpeech/tree/main/utils
import os
import re
import sys
import json
from absl import logging
SUBSETS = ["XL", "DEV", "TEST"]
garbage_utterance_tags = "<SIL>|<MUSIC>|<NOISE>|<OTHER>"
punctuation_tags = "<COMMA>|<EXCLAMATIONPOINT>|<PERIOD>|<QUESTIONMARK>"
def extract_json(json_file='', output_dir=''):
input_dir = os.path.dirname(json_file)
try:
with open(json_file, 'r') as JSONFILE:
json_data = json.load(JSONFILE)
except:
sys.exit(f'Failed to load input json file: {json_file}')
else:
if json_data['audios'] is not None:
with open(f'{output_dir}/utt2subsets', 'w') as utt2subsets, \
open(f'{output_dir}/text', 'w') as utt2text, \
open(f'{output_dir}/segments', 'w') as segments, \
open(f'{output_dir}/opus.scp', 'w') as wavscp:
for long_audio in json_data['audios']:
try:
long_audio_path = os.path.realpath(
os.path.join(input_dir, long_audio['path']))
aid = long_audio['aid']
segments_lists = long_audio['segments']
assert (os.path.exists(long_audio_path))
assert ('opus' == long_audio['format'])
assert (16000 == long_audio['sample_rate'])
except AssertionError:
print(f'Warning: {aid} something is wrong, maybe AssertionError, skipped')
continue
except:
print(f'Warning: {aid} something is wrong, maybe the error path: '
f'{long_audio_path}, skipped')
continue
else:
wavscp.write(f'{aid}\t{long_audio_path}\n')
for segment_file in segments_lists:
try:
sid = segment_file['sid']
start_time = segment_file['begin_time']
end_time = segment_file['end_time']
text = segment_file['text_tn']
segment_subsets = segment_file["subsets"]
except:
print(f'Warning: {segment_file} something is wrong, skipped')
continue
else:
utt2text.write(f'{sid}\t{text}\n')
segments.write(f'{sid}\t{aid}\t{start_time}\t{end_time}\n')
segment_sub_names = " ".join(segment_subsets)
utt2subsets.write(f'{sid}\t{segment_sub_names}\n')
def convert_opus2wav(opus_scp='', wav_scp='', rm_opus=False):
with open(opus_scp, 'r') as oscp, open(wav_scp, 'w') as wscp:
for line in oscp:
line = line.strip()
utt, opus_path = re.split('\s+', line)
wav_path = opus_path.replace('.opus', '.wav')
cmd = f'ffmpeg -y -i {opus_path} -ac 1 -ar 16000 {wav_path}'
try:
os.system(cmd)
wscp.write(f'{utt}\t{wav_path}\n')
except:
sys.exit(f'Failed to run the cmd: {cmd}')
if rm_opus is True:
os.remove(opus_path)
def prepare_data(data_dir='', subset='XL'):
subset_file = os.path.join(data_dir, 'utt2subsets')
text_file = os.path.join(data_dir, 'text')
segment_file = os.path.join(data_dir, 'segments')
wav_scp = os.path.join(data_dir, 'wav.scp')
out_f = os.path.join(data_dir, subset + '.csv')
subset_dict = {}
with open(subset_file) as SUBSET:
subset_lines = SUBSET.readlines()
for line in subset_lines:
line_list = line.strip().split()
utt_key = line_list[0]
subset_dict[utt_key] = line_list[1:]
with open(text_file) as TEXT:
text_lines = TEXT.readlines()
time_d = {}
with open(segment_file) as SEGMENT:
seg_lines = SEGMENT.readlines()
for i in seg_lines:
item = i.strip().split('\t')
utt_key = item[0]
start_time = item[2]
end_time = item[3]
time_d[utt_key] = str(int((float(end_time) - float(start_time)) * 1000))
text_d = {}
for i in text_lines:
utt_key = i.split('\t')[0]
speaker, k1 = utt_key.split('_')
if speaker not in text_d:
text_d[speaker] = []
transcriptions = i.split(utt_key)[1].strip()
if utt_key in time_d:
if re.search(garbage_utterance_tags, transcriptions):
continue
if '{' + subset + '}' not in subset_dict[utt_key]:
continue
# remove the punctuation tags
transcriptions = re.sub(punctuation_tags, "", transcriptions)
# convert two spaces to one space
transcriptions = re.sub(" ", " ", transcriptions)
text_d[speaker].append(utt_key + '\t' + time_d[utt_key] +
'\t' + transcriptions + '\t' + speaker)
with open(wav_scp) as f:
lines = f.readlines()
utt_key_wav = {}
for i in lines:
utt_key = i.split('\t')[0]
if utt_key not in utt_key_wav:
utt_key_wav[utt_key] = 0
with open(out_f, 'w') as f:
f.write('wav_filename\twav_len\ttranscript\tspeaker\n')
for speaker in text_d:
if speaker in utt_key_wav:
for utt_sample in text_d[speaker]:
f.write(utt_sample + '\n')
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
if len(sys.argv) < 3:
print('Usage: python {} dataset_dir output_dir\n'
' dataset_dir : directory contains GigaSpeech dataset\n'
' output_dir : GigaSpeech data working directory'.format(sys.argv[0]))
exit(1)
DATASET_DIR = sys.argv[1]
OUTPUT_DIR = sys.argv[2]
json_file = os.path.join(DATASET_DIR, "GigaSpeech.json")
extract_json(json_file=json_file, output_dir=OUTPUT_DIR)
print(f'Converting opus to wave, please be patient')
opus_scp = os.path.join(OUTPUT_DIR, 'opus.scp')
wav_scp = os.path.join(OUTPUT_DIR, 'wav.scp')
convert_opus2wav(opus_scp, wav_scp, False)
for subset in SUBSETS:
prepare_data(data_dir=OUTPUT_DIR, subset=subset)
| 7,248 | 40.1875 | 98 |
py
|
GigaSpeech
|
GigaSpeech-main/toolkits/athena/extract_meta.py
|
#!/usr/bin/python
# coding=utf-8
# Copyright 2021 Xiaomi Corporation (Author: Yongqing Wang)
# Athena Authors (Shuaijiang Zhao)
import sys
import os
import argparse
import json
def get_args():
parser = argparse.ArgumentParser(description="""
This script is used to process raw json dataset of GigaSpeech,
where the long wav is splitinto segments and
data of Athena format is generated.
""")
parser.add_argument('--pipe-format', action='store_true', default='False',
help="""If true, wav.scp is generated with pipeline format""")
parser.add_argument('input_json', help="""Input json file of Gigaspeech""")
parser.add_argument('output_dir', help="""Output dir for prepared data""")
args = parser.parse_args()
return args
def meta_analysis(input_json, output_dir, pipe):
input_dir = os.path.dirname(input_json)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
try:
with open(input_json, 'r') as injson:
json_data = json.load(injson)
except:
sys.exit(f'Failed to load input json file: {input_json}')
else:
if json_data['audios'] is not None:
with open(f'{output_dir}/utt2spk', 'w') as utt2spk, \
open(f'{output_dir}/utt2dur', 'w') as utt2dur, \
open(f'{output_dir}/utt2subsets', 'w') as utt2subsets, \
open(f'{output_dir}/text', 'w') as utt2text, \
open(f'{output_dir}/segments', 'w') as segments, \
open(f'{output_dir}/wav.scp', 'w') as wavscp, \
open(f'{output_dir}/reco2dur', 'w') as reco2dur:
for long_audio in json_data['audios']:
try:
long_audio_path = os.path.realpath(os.path.join(input_dir, long_audio['path']))
aid = long_audio['aid']
segments_lists = long_audio['segments']
duration = long_audio['duration']
assert(os.path.exists(long_audio_path))
assert('opus' == long_audio['format'])
assert(16000 == long_audio['sample_rate'])
except AssertionError:
print(f'Warning: {aid} something is wrong, maybe AssertionError, skipped')
continue
except:
print(f'Warning: {aid} something is wrong, maybe the error path: {long_audio_path}, skipped')
continue
else:
if pipe is True:
wavscp.write(f'{aid}\tffmpeg -i {long_audio_path} -ar 16000 -f wav pipe:1 |\n')
else:
wavscp.write(f'{aid}\t{long_audio_path}\n')
reco2dur.write(f'{aid}\t{duration}\n')
for segment_file in segments_lists:
try:
sid = segment_file['sid']
start_time = segment_file['begin_time']
end_time = segment_file['end_time']
dur = end_time - start_time
text = segment_file['text_tn']
segment_subsets = segment_file["subsets"]
except:
print(f'Warning: {segment_file} something is wrong, skipped')
continue
else:
utt2spk.write(f'{sid}\t{sid}\n')
utt2dur.write(f'{sid}\t{dur}\n')
utt2text.write(f'{sid}\t{text}\n')
segments.write(f'{sid}\t{aid}\t{start_time}\t{end_time}\n')
segment_sub_names = " " .join(segment_subsets)
utt2subsets.write(f'{sid}\t{segment_sub_names}\n')
def main():
args = get_args()
meta_analysis(args.input_json, args.output_dir, args.pipe_format)
if __name__ == '__main__':
main()
| 3,579 | 36.291667 | 105 |
py
|
GigaSpeech
|
GigaSpeech-main/toolkits/kaldi/extract_meta.py
|
# Copyright 2021 Xiaomi Corporation (Author: Yongqing Wang)
import sys
import os
import argparse
import json
def get_args():
parser = argparse.ArgumentParser(description="""
This script is used to process raw json dataset of GigaSpeech,
where the long wav is splitinto segments and
data of kaldi format is generated.
""")
parser.add_argument('--pipe-format', action='store_true', default='False',
help="""If true, wav.scp is generated with pipeline format""")
parser.add_argument('input_json', help="""Input json file of Gigaspeech""")
parser.add_argument('output_dir', help="""Output dir for prepared data""")
args = parser.parse_args()
return args
def meta_analysis(input_json, output_dir, pipe):
input_dir = os.path.dirname(input_json)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
try:
with open(input_json, 'r') as injson:
json_data = json.load(injson)
except:
sys.exit(f'Failed to load input json file: {input_json}')
else:
if json_data['audios'] is not None:
with open(f'{output_dir}/utt2spk', 'w') as utt2spk, \
open(f'{output_dir}/utt2dur', 'w') as utt2dur, \
open(f'{output_dir}/utt2subsets', 'w') as utt2subsets, \
open(f'{output_dir}/text', 'w') as utt2text, \
open(f'{output_dir}/segments', 'w') as segments, \
open(f'{output_dir}/wav.scp', 'w') as wavscp, \
open(f'{output_dir}/reco2dur', 'w') as reco2dur:
for long_audio in json_data['audios']:
try:
long_audio_path = os.path.realpath(os.path.join(input_dir, long_audio['path']))
aid = long_audio['aid']
segments_lists = long_audio['segments']
duration = long_audio['duration']
assert(os.path.exists(long_audio_path))
assert('opus' == long_audio['format'])
assert(16000 == long_audio['sample_rate'])
except AssertionError:
print(f'Warning: {aid} something is wrong, maybe AssertionError, skipped')
continue
except:
print(f'Warning: {aid} something is wrong, maybe the error path: {long_audio_path}, skipped')
continue
else:
if pipe is True:
wavscp.write(f'{aid}\tffmpeg -i {long_audio_path} -ar 16000 -f wav pipe:1 |\n')
else:
wavscp.write(f'{aid}\t{long_audio_path}\n')
reco2dur.write(f'{aid}\t{duration}\n')
for segment_file in segments_lists:
try:
sid = segment_file['sid']
start_time = segment_file['begin_time']
end_time = segment_file['end_time']
dur = end_time - start_time
text = segment_file['text_tn']
segment_subsets = segment_file["subsets"]
except:
print(f'Warning: {segment_file} something is wrong, skipped')
continue
else:
utt2spk.write(f'{sid}\t{sid}\n')
utt2dur.write(f'{sid}\t{dur}\n')
utt2text.write(f'{sid}\t{text}\n')
segments.write(f'{sid}\t{aid}\t{start_time}\t{end_time}\n')
segment_sub_names = " " .join(segment_subsets)
utt2subsets.write(f'{sid}\t{segment_sub_names}\n')
def main():
args = get_args()
meta_analysis(args.input_json, args.output_dir, args.pipe_format)
if __name__ == '__main__':
main()
| 3,494 | 36.580645 | 105 |
py
|
GigaSpeech
|
GigaSpeech-main/toolkits/wenet/extract_meta.py
|
# Copyright 2021 Xiaomi Corporation (Author: Yongqing Wang)
# Mobvoi Corporation (Author: Di Wu)
import sys
import os
import argparse
import json
def get_args():
parser = argparse.ArgumentParser(description="""
This script is used to process raw json dataset of GigaSpeech,
where the long wav is splitinto segments and
data of wenet format is generated.
""")
parser.add_argument('input_json', help="""Input json file of Gigaspeech""")
parser.add_argument('output_dir', help="""Output dir for prepared data""")
args = parser.parse_args()
return args
def meta_analysis(input_json, output_dir):
input_dir = os.path.dirname(input_json)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
try:
with open(input_json, 'r') as injson:
json_data = json.load(injson)
except:
sys.exit(f'Failed to load input json file: {input_json}')
else:
if json_data['audios'] is not None:
with open(f'{output_dir}/text', 'w') as utt2text, \
open(f'{output_dir}/segments', 'w') as segments, \
open(f'{output_dir}/utt2dur', 'w') as utt2dur, \
open(f'{output_dir}/wav.scp', 'w') as wavscp, \
open(f'{output_dir}/utt2subsets', 'w') as utt2subsets, \
open(f'{output_dir}/reco2dur', 'w') as reco2dur:
for long_audio in json_data['audios']:
try:
long_audio_path = os.path.realpath(os.path.join(input_dir, long_audio['path']))
aid = long_audio['aid']
segments_lists = long_audio['segments']
duration = long_audio['duration']
assert(os.path.exists(long_audio_path))
assert('opus' == long_audio['format'])
assert(16000 == long_audio['sample_rate'])
except AssertionError:
print(f'Warning: {aid} something is wrong, maybe AssertionError, skipped')
continue
except:
print(f'Warning: {aid} something is wrong, maybe the error path: {long_audio_path}, skipped')
continue
else:
wavscp.write(f'{aid}\t{long_audio_path}\n')
reco2dur.write(f'{aid}\t{duration}\n')
for segment_file in segments_lists:
try:
sid = segment_file['sid']
start_time = segment_file['begin_time']
end_time = segment_file['end_time']
dur = end_time - start_time
text = segment_file['text_tn']
segment_subsets = segment_file["subsets"]
except:
print(f'Warning: {segment_file} something is wrong, skipped')
continue
else:
utt2text.write(f'{sid}\t{text}\n')
segments.write(f'{sid}\t{aid}\t{start_time}\t{end_time}\n')
utt2dur.write(f'{sid}\t{dur}\n')
segment_sub_names = " " .join(segment_subsets)
utt2subsets.write(f'{sid}\t{segment_sub_names}\n')
def main():
args = get_args()
meta_analysis(args.input_json, args.output_dir)
if __name__ == '__main__':
main()
| 3,125 | 35.348837 | 105 |
py
|
GigaSpeech
|
GigaSpeech-main/utils/extract_subset_segments.py
|
#!/usr/bin/env python3
# coding=utf8
# Copyright 2022 Jiayu DU
'''
This tool is used to extract supervised segments from GigaSpeech,
segments are saved in .wav format, supervisions are saved in a simple .tsv file:
--- exampler tsv begin ---
ID AUDIO BEGIN DURATION TEXT
POD1000000004_S0000017 audio/POD1000000004_S0000017.wav 0 3.163 YOU KNOW TO PUT THIS STUFF TOGETHER
...
...
--- exampler tsv end---
It can be, but not should be used to extract large subsets such as L, XL (because it would be extremely slow).
'''
import os, sys
import argparse
import csv
from speechcolab.datasets.gigaspeech import GigaSpeech
import torchaudio
gigaspeech_punctuations = ['<COMMA>', '<PERIOD>', '<QUESTIONMARK>', '<EXCLAMATIONPOINT>']
gigaspeech_garbage_utterance_tags = ['<SIL>', '<NOISE>', '<MUSIC>', '<OTHER>']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save the audio segments into wav, and meta into tsv.')
parser.add_argument('--subset', choices = ['XS', 'S', 'M', 'L', 'XL', 'DEV', 'TEST'], default='XS', help='The subset name')
parser.add_argument('gigaspeech_dataset_dir', help='The GigaSpeech corpus directory')
parser.add_argument('dst_dir', help='Ouput subset directory')
args = parser.parse_args()
os.makedirs(args.dst_dir, exist_ok = True)
gigaspeech = GigaSpeech(args.gigaspeech_dataset_dir)
subset = '{' + args.subset + '}'
with open(os.path.join(args.dst_dir, 'metadata.tsv'), 'w+', encoding='utf8') as fo:
csv_header_fields = ['ID', 'AUDIO', 'DURATION', 'TEXT']
csv_writer = csv.DictWriter(fo, delimiter='\t', fieldnames=csv_header_fields, lineterminator='\n')
csv_writer.writeheader()
for audio in gigaspeech.audios(subset):
aid = audio['aid']
audio_path = os.path.join(args.gigaspeech_dataset_dir, audio["path"])
audio_info = torchaudio.info(audio_path)
opus_sample_rate = audio_info.sample_rate
assert opus_sample_rate == 48000
nc = audio_info.num_channels
assert nc == 1
sample_rate = 16000
long_waveform, _ = torchaudio.load(audio_path)
long_waveform = torchaudio.transforms.Resample(opus_sample_rate, sample_rate)(long_waveform)
for segment in audio['segments']:
sid = segment['sid']
if subset not in segment['subsets']:
continue
text = segment['text_tn']
for punctuation in gigaspeech_punctuations:
text = text.replace(punctuation, '').strip()
text = ' '.join(text.split())
if text in gigaspeech_garbage_utterance_tags:
continue
begin = segment['begin_time']
duration = segment['end_time'] - segment['begin_time']
frame_offset = int(begin * sample_rate)
num_frames = int(duration * sample_rate)
waveform = long_waveform[0][frame_offset : frame_offset + num_frames] # mono
segment_path = os.path.join('audio', aid, f'{sid}.wav')
os.makedirs(os.path.join(args.dst_dir, os.path.dirname(segment_path)), exist_ok = True)
torchaudio.save(
os.path.join(args.dst_dir, segment_path),
waveform.unsqueeze(0),
sample_rate = sample_rate,
format = 'wav',
encoding = 'PCM_S',
bits_per_sample = 16,
)
utt = {'ID': segment['sid'], 'AUDIO': segment_path, 'DURATION': f'{duration:.4f}', 'TEXT': text }
csv_writer.writerow(utt)
| 3,742 | 39.247312 | 127 |
py
|
GigaSpeech
|
GigaSpeech-main/utils/opus_to_wav.py
|
# Copyright 2021 Xiaomi (Author:Yongqing Wang)
import os
import argparse
import re
def get_args():
parser = argparse.ArgumentParser(description="""
This script is used to convert opus file into wav file.""")
parser.add_argument('--remove-opus', action='store_true', default='False',
help="""If true, remove opus files""")
parser.add_argument('opus_scp', help="""Input opus scp file""")
args = parser.parse_args()
return args
def convert_opus2wav(opus_scp, rm_opus):
with open(opus_scp, 'r') as oscp:
for line in oscp:
line = line.strip()
utt, opus_path = re.split('\s+', line)
wav_path = opus_path.replace('.opus', '.wav')
cmd = f'ffmpeg -y -i {opus_path} -ac 1 -ar 16000 {wav_path}'
try:
os.system(cmd)
except:
sys.exit(f'Failed to run the cmd: {cmd}')
if rm_opus is True:
os.remove(opus_path)
def main():
args = get_args()
convert_opus2wav(args.opus_scp, args.remove_opus)
if __name__ == '__main__':
main()
| 1,021 | 23.926829 | 76 |
py
|
GigaSpeech
|
GigaSpeech-main/utils/gigaspeech_scoring.py
|
#!/usr/bin/env python3
import os
import argparse
conversational_filler = ['UH', 'UHH', 'UM', 'EH', 'MM', 'HM', 'AH', 'HUH', 'HA', 'ER', 'OOF', 'HEE' , 'ACH', 'EEE', 'EW']
unk_tags = ['<UNK>', '<unk>']
gigaspeech_punctuations = ['<COMMA>', '<PERIOD>', '<QUESTIONMARK>', '<EXCLAMATIONPOINT>']
gigaspeech_garbage_utterance_tags = ['<SIL>', '<NOISE>', '<MUSIC>', '<OTHER>']
non_scoring_words = conversational_filler + unk_tags + gigaspeech_punctuations + gigaspeech_garbage_utterance_tags
def asr_text_post_processing(text):
# 1. convert to uppercase
text = text.upper()
# 2. remove hyphen
# "E-COMMERCE" -> "E COMMERCE", "STATE-OF-THE-ART" -> "STATE OF THE ART"
text = text.replace('-', ' ')
# 3. remove non-scoring words from evaluation
remaining_words = []
for word in text.split():
if word in non_scoring_words:
continue
remaining_words.append(word)
return ' '.join(remaining_words)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This script evaluates GigaSpeech ASR result via SCTK's tool sclite")
parser.add_argument('ref', type=str, help="sclite's standard transcription(trn) reference file")
parser.add_argument('hyp', type=str, help="sclite's standard transcription(trn) hypothesis file")
parser.add_argument('work_dir', type=str, help='working dir')
args = parser.parse_args()
if not os.path.isdir(args.work_dir):
os.mkdir(args.work_dir)
REF = os.path.join(args.work_dir, 'REF')
HYP = os.path.join(args.work_dir, 'HYP')
RESULT = os.path.join(args.work_dir, 'RESULT')
for io in [(args.ref, REF), (args.hyp, HYP)]:
with open(io[0], 'r', encoding='utf8') as fi, open(io[1], 'w+', encoding='utf8') as fo:
for line in fi:
line = line.strip()
if line:
cols = line.split()
text = asr_text_post_processing(' '.join(cols[0:-1]))
uttid_field = cols[-1]
print(F'{text} {uttid_field}', file=fo)
os.system(F'sclite -r {REF} trn -h {HYP} trn -i swb | tee {RESULT}') # GigaSpeech's uttid comforms to swb
| 2,186 | 39.5 | 121 |
py
|
MIED
|
MIED-main/setup.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mied",
version="0.0.1",
author="Lingxiao Li",
description="Mollified interactive energy descent for constrained/unconstrained sampling",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
| 626 | 27.5 | 94 |
py
|
MIED
|
MIED-main/mied/validators/metrics.py
|
'''
Metrics to evaluate samples:
1. With ground truth samples, we can compute
a. Wasserstein distance between mu and mu^*
b. KL(mu^* || mu)
2. With access to ground truth density (which is always the case), we can compute
a. KSD, directly applicable
b. KL(mu || mu^*)
'''
import torch
import numpy as np
import ot
from scipy import stats
from mied.solvers.ksdd import compute_ksd
from mied.utils.kernels import GaussianKernel
from mied.utils.batch_jacobian import compute_jacobian
def estimate_log_p(X, P):
'''
:param X: (B, D), samples used to build KDE
:param P: (B, D), samples to evaluate at
:return: density, (B,)
'''
kernel = stats.gaussian_kde(X.detach().cpu().numpy().T)
P_log_p = kernel.logpdf(P.detach().cpu().numpy().T)
return torch.from_numpy(P_log_p).to(P.device)
def filter_samples(samples, filter_range):
if filter_range > 0:
mask = torch.logical_and(samples < filter_range,
samples > -filter_range).all(-1)
return samples[mask]
return samples
def batch_expected_diff_norm(X, Y, batch_size=1000):
'''
Compute E[||X-Y||] for energy distance.
:param X: (N, D)
:param Y: (M, D)
'''
total_size = Y.shape[0]
cur = 0
total = 0
while cur < total_size:
cur_size = min(total_size - cur, batch_size)
tmp = X.unsqueeze(1) - Y[cur:cur+cur_size].unsqueeze(0)
tmp = tmp.square().sum(-1).sqrt().sum()
total += tmp.item() / X.shape[0]
cur += cur_size
return total / total_size
def compute_metric(source_samples, target_problem, *,
metric,
refresh,
gt_samples,
gt_multiplier,
ot_lib='pol',
ksd_sigma=1.0,
filter_range=-1,
strip_last_n=-1):
'''
:param source_samples: (B, D), can be on any device
:param target_problem: an instance of ProblemBase
:return: a scalar of the computed metric
'''
source_samples = filter_samples(source_samples, filter_range)
if strip_last_n > 0:
source_samples = source_samples[:, :-strip_last_n]
if metric in ['sinkhorn', 'KL_st', 'chi2_st', 'energy_dist']:
if gt_samples is None:
target_samples = target_problem.sample_gt(
gt_multiplier * source_samples.shape[0],
refresh=refresh
)
target_samples = filter_samples(target_samples, filter_range)
assert(target_samples is not None)
else:
target_samples = gt_samples
if strip_last_n > 0:
target_samples = target_samples[:, :-strip_last_n]
if metric == 'sinkhorn':
if ot_lib == 'pol':
import ot
source_weights = (np.ones(source_samples.shape[0]) /
source_samples.shape[0])
target_weights = (np.ones(target_samples.shape[0]) /
target_samples.shape[0])
M = ot.dist(source_samples.cpu().detach().numpy(),
target_samples.cpu().detach().numpy())
W = ot.emd2(source_weights, target_weights, M)
return W
else:
assert(ot_lib == 'geomloss')
import geomloss
loss = geomloss.SamplesLoss('sinkhorn', blur=0.0)
return loss(source_samples, target_samples)
if metric == 'energy_dist':
# SS = (source_samples.unsqueeze(1) -
# source_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# ST = (source_samples.unsqueeze(1) -
# target_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# TT = (target_samples.unsqueeze(1) -
# target_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# return (2 * ST - SS - TT).item()
SS = batch_expected_diff_norm(source_samples, source_samples)
ST = batch_expected_diff_norm(source_samples, target_samples)
TT = batch_expected_diff_norm(target_samples, target_samples)
return (2 * ST - SS - TT)
if metric in ['KL_ts', 'chi2_ts']:
source_log_p = estimate_log_p(source_samples,
source_samples)
target_log_p = target_problem.eval_log_p(source_samples)
if metric == 'KL_ts':
return (source_log_p - target_log_p).mean().item()
else:
return ((target_log_p - source_log_p).exp() - 1).square().mean().item()
if metric in ['KL_st', 'chi2_st']:
target_log_p = target_problem.eval_log_p(target_samples)
source_log_p = estimate_log_p(source_samples,
target_samples)
if metric == 'KL_st':
return (target_log_p - source_log_p).mean().item()
else:
return ((source_log_p - target_log_p).exp() - 1).square().mean().item()
if metric == 'KL_sym':
return (compute_metric(source_samples, target_problem,
metric='KL_st') +
compute_metric(source_samples, target_problem,
metric='KL_ts'))
if metric == 'chi2_sym':
return (compute_metric(source_samples, target_problem,
metric='chi2_st') +
compute_metric(source_samples, target_problem,
metric='chi2_ts'))
if metric == 'ksd':
kernel = GaussianKernel(sigma=ksd_sigma)
X = source_samples.detach().clone()
X.requires_grad_(True)
log_p = target_problem.eval_log_p(X)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1),
X,
create_graph=False,
retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2)
return compute_ksd(X, grad_log_p, kernel).item()
| 5,968 | 36.074534 | 83 |
py
|
MIED
|
MIED-main/mied/validators/particle.py
|
import torch
import numpy as np
import math
from mied.utils.h5_helpers import save_dict_h5
from mied.utils.batch_eval import batch_eval_index
from mied.validators.metrics import compute_metric
class ParticleValidator:
def __init__(self, *,
problem):
self.problem = problem
self.device = problem.device
def generate_density_grid(self, *,
density_bbox,
density_grid_len=500):
assert(density_bbox.shape[0] == 2)
x_linspace = torch.linspace(
density_bbox[0, 0],
density_bbox[0, 1],
density_grid_len, device=self.device)
y_linspace = torch.linspace(
density_bbox[1, 0],
density_bbox[1, 1],
density_grid_len, device=self.device)
grid_x, grid_y = torch.meshgrid(x_linspace, y_linspace, indexing='ij') # (L, L) x 2
grid = torch.stack([grid_x, grid_y], -1) # (L, L, 2)
grid_flat = grid.reshape(-1, 2) # (L*L, 2)
density = batch_eval_index(
lambda inds: self.problem.eval_log_p(
grid_flat[inds, :]),
grid_flat.shape[0],
no_tqdm=True,
batch_size=10000
)
density = torch.cat(density, 0) # (L*L)
density = density.reshape(grid.shape[:2]) # (L, L)
return {
'density_bbox': density_bbox.detach().cpu(),
'grid_x': grid_x.detach().cpu(),
'grid_y': grid_y.detach().cpu(),
'grid_density': density.detach().cpu()
}
def run(self, *,
samples,
updates=None,
save_path=None,
include_density=False,
metrics=[],
num_trial=1,
gt_samples=None,
gt_multiplier=10,
filter_range=-1,
strip_last_n=-1,
include_gt=False,
**kwargs):
result_dict = {}
result_dict.update({
'samples': samples.detach().cpu(),
})
if updates is not None:
result_dict.update({
'updates': updates.detach().cpu()
})
if include_gt:
assert(gt_samples is None)
target_samples = self.problem.sample_gt(
gt_multiplier * samples.shape[0],
refresh=False
)
result_dict['target_samples'] = target_samples.detach().cpu()
result_dict.update(
self.problem.custom_eval(samples)
)
if include_density:
result_dict.update(self.generate_density_grid(
density_bbox=kwargs['density_bbox']
))
for metric in metrics:
result_list = []
for trial in range(num_trial):
tmp = compute_metric(samples,
self.problem,
metric=metric,
gt_samples=gt_samples,
refresh=(num_trial > 1),
gt_multiplier=gt_multiplier,
filter_range=filter_range,
strip_last_n=strip_last_n)
result_list.append(tmp)
result_list = np.array(result_list)
# if metric in ['KL_st', 'KL_ts', 'chi2_st', 'chi2_ts']:
# tmp = math.log(abs(tmp))
result_dict[metric] = np.mean(result_list)
if num_trial > 1:
result_dict[metric + '_std'] = np.std(result_list)
if save_path is not None:
save_dict_h5(result_dict, save_path, create_dir=True)
return result_dict
| 3,751 | 32.20354 | 91 |
py
|
MIED
|
MIED-main/mied/solvers/ksdd.py
|
import torch
import numpy as np
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.kernels import GaussianKernel
from mied.solvers.particle_base import ParticleBasedSolver
def compute_ksd(X, grad_log_p, kernel):
'''
:param X: (B, D)
:param grad_log_p: (B, D)
:param kernel: an instance of KernelBase, assumed to be symmetric
:return: a scalar, the kernel stein discrepancy
'''
B, D = X.shape
X_ex1 = X.unsqueeze(1).expand(-1, B, -1).reshape(-1, D) # (BB, D)
X_ex2 = X.unsqueeze(0).expand(B, -1, -1).reshape(-1, D) # (BB, D)
score_ex1 = grad_log_p.unsqueeze(1).expand(-1, B, -1).reshape(-1, D)
score_ex2 = grad_log_p.unsqueeze(0).expand(B, -1, -1).reshape(-1, D)
k = kernel.eval(X_ex1, X_ex2) # (BB,)
grad_1_k = kernel.grad_1(X_ex1, X_ex2) # (BB, D)
grad_2_k = kernel.grad_1(X_ex2, X_ex1) # (BB, D)
div_2_grad_1_k = kernel.div_2_grad_1(X_ex1, X_ex2) # (BB,)
tmp = (score_ex1 * score_ex2).sum(-1) * k # (BB,)
tmp = tmp + (score_ex1 * grad_2_k).sum(-1)
tmp = tmp + (score_ex2 * grad_1_k).sum(-1)
tmp = tmp + div_2_grad_1_k
return tmp.mean()
class KSDD(ParticleBasedSolver):
def __init__(self,
sigma=1.0,
**kwargs):
super().__init__(**kwargs)
self.kernel = GaussianKernel(sigma=sigma)
def compute_update(self, i, X):
'''
:return: (B, D)
'''
log_p = self.problem.eval_log_p(X) # (B,)
# Note: KSDD requires second-order derivatives of log_p.
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=True, retain_graph=True)
grad_log_p = grad_log_p.squeeze(-2) # (B, D)
F = compute_ksd(X, grad_log_p, self.kernel)
self.last_F = F.item()
grad_F = torch.autograd.grad(F, X)[0] # (B, D)
return -grad_F
def custom_post_step(self, i):
return {
'KSD': self.last_F
}
def get_progress_msg(self):
return 'KSD: {:6f}, G_vio: {:6f}'.format(
self.last_F, self.projector.get_violation())
| 2,133 | 30.382353 | 75 |
py
|
MIED
|
MIED-main/mied/solvers/lmc.py
|
import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
from mied.solvers.mirror_maps import BoxMap, BoxEntropicMap
class LMC(ParticleBasedSolver):
def __init__(self,
lmc_lr,
mirror_map,
**kwargs):
super().__init__(direct_update=True,
**kwargs)
assert(self.optimizer_conf['cls'] == 'SGD' and
self.optimizer_conf['lr'] == 1.0)
self.lr = lmc_lr
if mirror_map == 'box':
self.mirror_map = BoxMap()
elif mirror_map == 'box_entropic':
self.mirror_map = BoxEntropicMap()
else:
raise Exception(f'Unknown mirror map: {mirror_map}')
def compute_update(self, i, X):
B, D = X.shape
log_p = self.problem.eval_log_p(X) # (B,)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2) # (B, D)
Z = self.mirror_map.nabla_phi(X) # (B, D)
xi = torch.randn([B, D], device=X.device) # (B, D)
drift = np.sqrt(2 * self.lr) * self.mirror_map.nabla2_phi_sqrt_mul(X, xi) # (B, D)
Z_new = Z + self.lr * grad_log_p + drift
return self.mirror_map.nabla_phi_star(Z_new)
| 1,405 | 32.47619 | 90 |
py
|
MIED
|
MIED-main/mied/solvers/no_op_projector.py
|
import torch
from mied.solvers.projector_base import ProjectorBase
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class NoOpProjector(ProjectorBase):
def __init__(self):
pass
def step(self, X, update, problem):
return update
def get_violation(self):
return 0.0
| 343 | 17.105263 | 61 |
py
|
MIED
|
MIED-main/mied/solvers/particle_base.py
|
from abc import ABC, abstractmethod
import torch
import numpy as np
from pathlib import Path
from tqdm import trange
from mied.utils.batch_hessian import compute_hessian
class ParticleBasedSolver(ABC):
def __init__(self, *,
problem,
projector,
num_particle,
precondition,
optimizer_conf,
direct_update=False,
val_freq,
ckpt_path,
logger_fn):
'''
Abstract base class for particle-based solvers that differ only in
the updates.
The default parameters are set in ExperimentCoordinator class.
:param problem: the constrained sampling problem
:param projector: handler of the constraints
:param num_particle: number of particles
'''
self.problem = problem
self.projector = projector
self.precondition = precondition
self.direct_update = direct_update
self.val_freq = val_freq
self.ckpt_path = ckpt_path
self.logger_fn = logger_fn
self.particles = self.problem.sample_prior(num_particle)
self.particles.requires_grad_(True)
self.optimizer_conf = optimizer_conf
self.create_optimizer()
self.init_global_step = 0
def create_optimizer(self):
conf = self.optimizer_conf
if conf['cls'] == 'Adam':
self.optimizer = torch.optim.Adam(
[self.particles], lr=conf['lr'],
betas=(conf['beta1'], conf['beta2']),
)
elif conf['cls'] == 'LBFGS':
self.optimizer = torch.optim.LBFGS(
[self.particles], lr=conf['lr'])
elif conf['cls'] == 'SGD':
self.optimizer = torch.optim.SGD(
[self.particles], lr=conf['lr'],
momentum=conf['beta1']
)
elif self.optimizer_conf['cls'] == 'RMSprop':
self.optimizer = torch.optim.RMSprop(
[self.particles], lr=conf['lr'],
alpha=conf['beta1']
)
else:
raise Exception(f'Unknown optimizer class {self.optimizer_conf["cls"]}')
def load_ckpt(self):
'''
:return: the current global step.
'''
p = Path(self.ckpt_path)
if not p.exists():
print('No checkpoint file found. Use default initialization.')
self.init_global_step = 0
return
ckpt = torch.load(self.ckpt_path)
global_step = ckpt['global_step']
self.particles = ckpt['particles']
self.particles.to(self.problem.device)
self.particles.requires_grad_(True)
self.create_optimizer()
self.optimizer.load_state_dict(ckpt['optimizer_state_dict'])
print('Loading solver from {} at step {}...'.format(p, global_step))
np.random.set_state(ckpt['np_rng_state'])
torch.set_rng_state(ckpt['torch_rng_state'])
self.init_global_step = global_step
def save_ckpt(self, global_step):
print('Saving solver at global step {}...'.format(global_step))
p = Path(self.ckpt_path)
p.parent.mkdir(parents=True, exist_ok=True)
all_dict = {
'optimizer_state_dict': self.optimizer.state_dict(),
'particles': self.particles,
'global_step': global_step,
'np_rng_state': np.random.get_state(),
'torch_rng_state': torch.get_rng_state()
}
torch.save(all_dict, self.ckpt_path)
@abstractmethod
def compute_update(self, i, X):
'''
:param i: current step
:param X: particles (after reparameterization)
:return: (B, in_dim),
* if direct_update = False, then update directions so that
x_new = x_old + eta * x_update,
where eta is modulated by the optimizer and the projector.
* if direct_update = True, then x_new = x_update
'''
pass
def step(self, i):
if self.direct_update:
# Skip using optimizer (e.g. in LMC).
self.particles = self.compute_update(i, self.particles).detach()
self.particles.requires_grad_(True)
return
self.optimizer.zero_grad()
X = self.problem.reparametrize(self.particles)
# Compute update w.r.t. X.
update = self.compute_update(i, X).detach() # (B, D)
# Optional preconditioning.
if self.precondition:
log_p_fn = lambda X: self.problem.eval_log_p(X)
hess = compute_hessian(log_p_fn, X) # (B, D, D)
update = torch.linalg.lstsq(-hess, update.unsqueeze(-1)).solution # (B, D)
update = update.squeeze(-1)
# The projector may modify update.
update = self.projector.step(X,
update=update,
problem=self.problem)
# Manual chain rule.
if self.particles.grad is not None:
self.particles.grad.zero_()
X.backward(gradient=update, inputs=self.particles)
update = self.particles.grad.detach()
self.particles.grad = -update.detach()
self.optimizer.step()
def get_samples(self):
'''
Obtain the resulting samples.
'''
return self.problem.reparametrize(self.particles)
def post_step(self, i):
'''
Stuff to do after each step, e.g., update log_msg.
'''
pass
def run(self, *,
num_itr,
ckpt_save_freq=-1,
post_step_fn=None):
if ckpt_save_freq == -1:
ckpt_save_freq = num_itr
loop_range = trange(self.init_global_step, num_itr)
for i in loop_range:
self.step(i)
if post_step_fn is not None:
post_step_fn(i)
self.post_step(i)
loop_range.set_description(self.get_progress_msg())
global_step = i + 1
if self.ckpt_path and ckpt_save_freq:
if global_step % ckpt_save_freq == 0:
self.save_ckpt(global_step)
def post_step(self, i):
if self.logger_fn is not None:
if (i + 1) % self.val_freq == 0:
log_dict = {
'step': i + 1,
'violation': self.projector.get_violation(),
}
log_dict.update(self.problem.custom_post_step(self.particles))
log_dict.update(self.custom_post_step(i))
self.logger_fn(log_dict)
def compute_variance(self):
samples = self.get_samples() # (B, D)
mean = samples.mean(0) # (D,)
dist = (samples - mean).square().sum(-1) # (B,)
return dist.mean()
def compute_min_separation(self):
samples = self.get_samples() # (B, D)
dist = (samples.unsqueeze(1) - samples.unsqueeze(0)).square().sum(-1) # (B, B)
val, _ = torch.topk(dist, 2, largest=False, dim=-1) # (B, 2)
return (val[:, 1].min() + 1e-8).sqrt()
def get_progress_msg(self):
return 'G_vio: {:6f}'.format(self.projector.get_violation())
def custom_post_step(self, i):
return {}
| 7,284 | 31.092511 | 86 |
py
|
MIED
|
MIED-main/mied/solvers/dynamic_barrier.py
|
import torch
from mied.solvers.projector_base import ProjectorBase
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.proj_polyhedra import proj_polyhedra
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class DynamicBarrier(ProjectorBase):
def __init__(self, *,
alpha_db=1.0,
merge_eq=True,
max_proj_itr=20):
self.alpha = alpha_db
self.merge_eq = merge_eq
self.max_proj_itr = max_proj_itr
self.violation = 0.0
def step(self, X, update, problem):
G = problem.eval_ineq(X) # (B, N_ineq)
H = problem.eval_eq(X) # (B, N_eq)
B = X.shape[0]
if G is None and H is None:
v = update
else:
if G is None:
G = torch.zeros([B, 0]).to(X)
if H is None:
H = torch.zeros([B, 0]).to(X)
if self.merge_eq:
G = torch.cat([G, H.square().sum(-1, keepdim=True)], -1)
else:
G = torch.cat([G, H.square()], -1)
self.violation = G.relu().sum()
grad_G = compute_jacobian(G, X, create_graph=True,
retain_graph=True) # (B, N_ineq, D)
barrier = -self.alpha * G # (B, N_ineq)
# Constraints are grad_G^T v <= barrier
v = proj_polyhedra(update, grad_G, barrier,
max_num_itr=self.max_proj_itr) # (B, D)
return v
def get_violation(self):
return self.violation
| 1,597 | 29.150943 | 73 |
py
|
MIED
|
MIED-main/mied/solvers/mirror_maps.py
|
from abc import ABC, abstractmethod
import torch
def safe_log(x):
# return torch.log(torch.maximum(1e-32, x))
# return torch.log(torch.maximum(1e-8, x))
return torch.log(x + 1e-32)
class MirrorMapBase(ABC):
@abstractmethod
def phi(self, theta):
pass
@abstractmethod
def nabla_phi(self, theta):
pass
@abstractmethod
def nabla_phi_star(self, eta):
pass
@abstractmethod
def nabla2_phi_sqrt_mul(self, theta, rhs):
pass
class BoxMap(MirrorMapBase):
def phi(self, theta):
return (-safe_log(1-theta) - safe_log(1+theta)).sum(-1)
def nabla_phi(self, theta):
return 1 / (1 - theta) - 1 / (1 + theta)
def nabla_phi_star(self, eta):
return ((1 + eta.square()).sqrt() - 1) / eta
def nabla2_phi_sqrt_mul(self, theta, rhs):
diag = 1 / (1-theta).square() + 1 / (1+theta).square()
diag = diag.sqrt()
return diag * rhs
class BoxEntropicMap(MirrorMapBase):
def phi(self, theta):
return (1 + theta) * safe_log(1 + theta) + (1 - theta) * safe_log(1 - theta)
def nabla_phi(self, theta):
return safe_log(1 + theta) - safe_log(1 - theta)
def nabla_phi_star(self, eta):
return 1 - 2 / (eta.exp() + 1)
def nabla2_phi_sqrt_mul(self, theta, rhs):
diag = 1 / (1-theta) + 1 / (1+theta)
diag = diag.sqrt()
return diag * rhs
| 1,416 | 22.616667 | 84 |
py
|
MIED
|
MIED-main/mied/solvers/svgd.py
|
import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
def svgd_update(P, grad_log_p, kernel='gaussian', kernel_h=-1,
riesz_s=-1, riesz_eps=1e-4):
'''
SVGD update with Gaussian kernel.
:param P: (B, D)
:return: update direction, (B, D)
'''
assert(not P.isnan().any())
assert(not grad_log_p.isnan().any())
n = P.shape[0]
P_diff = P.unsqueeze(1) - P.unsqueeze(0) # (B, B, D)
dist_sqr = P_diff.square().sum(-1) # (B, B)
if kernel == 'gaussian':
if kernel_h < 0:
mean_dist_sqr = dist_sqr.reshape(-1).median()
h = mean_dist_sqr / (np.log(n) + 1e-6)
else:
h = kernel_h
K = torch.exp(- dist_sqr / (h + 1e-8)) # (B, B)
grad_K = 2 * K.unsqueeze(-1) * P_diff / (h + 1e-8) # (B, B, D)
else:
assert(kernel == 'riesz')
if riesz_s < 0:
riesz_s = P.shape[-1] + 1.0
K = torch.pow(dist_sqr + riesz_eps, -riesz_s / 2)
grad_K = (riesz_s/2) * torch.pow(
dist_sqr + riesz_eps, -riesz_s / 2 - 1).unsqueeze(-1) * 2 * P_diff
'''
phi(x_i) = 1/n * \sum_j k(x_i, x_j) grad_log_p(x_j) + grad_K_x_j(x_i, x_j)
'''
Phi = K.unsqueeze(-1) * grad_log_p.unsqueeze(0) + grad_K # (B, B, D)
Phi = Phi.mean(1) # (B, D)
assert(not Phi.isnan().any())
return Phi
class SVGD(ParticleBasedSolver):
def __init__(self,
kernel_h=-1,
kernel='gaussian',
**kwargs):
super().__init__(**kwargs)
self.kernel = kernel
self.kernel_h = kernel_h
def compute_update(self, i, X):
log_p = self.problem.eval_log_p(X) # (B,)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2)
update = svgd_update(X, grad_log_p, kernel=self.kernel,
kernel_h=self.kernel_h)
self.last_update_norm = update.square().sum().item()
P_diff = X.unsqueeze(1) - X.unsqueeze(0) # (B, B, D)
dist_sqr = P_diff.square().sum(-1) # (B, B)
self.last_med_dist = dist_sqr.reshape(-1).median().sqrt().item()
return update
def custom_post_step(self, i):
return {
'Update norm': self.last_update_norm,
'Median': self.last_med_dist,
}
def get_progress_msg(self):
return 'Norm: {:6f}, G_vio: {:6f}'.format(
self.last_update_norm, self.projector.get_violation())
| 2,656 | 29.193182 | 78 |
py
|
MIED
|
MIED-main/mied/solvers/projector_base.py
|
from abc import ABC, abstractmethod
import torch
class ProjectorBase:
def __init__(self):
pass
@abstractmethod
def step(self, particles, update_grad, problem, optimizer):
'''
Update particles given update directions update_grad while projecting
to the constraints given by the problem.
'''
pass
@abstractmethod
def get_violation(self):
pass
| 422 | 19.142857 | 77 |
py
|
MIED
|
MIED-main/mied/solvers/mied.py
|
import torch
import numpy as np
import math
from mied.solvers.particle_base import ParticleBasedSolver
def log_exp_diff(a, b):
'''
Compute log|e^a - e^b| * sign(a-b)
:param a, b: torch scalars
'''
if a > b:
return a + torch.log(1 - torch.exp(b - a))
else:
return -(b + torch.log(1 - torch.exp(a - b)))
class MIED(ParticleBasedSolver):
def __init__(self, *,
kernel,
eps,
riesz_s,
alpha_mied,
include_diag,
diag_mul=1.3,
**kwargs):
'''
Descent on energy
E(\mu) = \iint \phi_\eps(x-y) (p(x)p(y))^{-\alpha} \dd\mu(x)\dd\mu(y)
:param kernel: ['riesz', 'gaussian', 'laplace']
:param eps: epsilon in the kernel
:param include_diag: ['ignore', 'include', 'diag_only', 'nnd']
'''
super().__init__(**kwargs)
self.device = self.problem.device
if kernel in ['gaussian', 'laplace']:
assert(eps >= 1e-6)
embed_dim = self.problem.get_embed_dim()
self.embed_dim = embed_dim
self.alpha = alpha_mied
if kernel == 'riesz' and riesz_s < 0:
assert(self.alpha >= 0.5) # requirement for hypersingular riesz energy
riesz_s = 2 * self.alpha * embed_dim + 1e-4
self.kernel = kernel
self.eps = eps
self.riesz_s = riesz_s
self.include_diag = include_diag
self.diag_mul = diag_mul
def compute_energy(self, X):
'''
:param X: (B, D)
:return: a scalar, the weighted riesz energy
'''
log_p = self.problem.eval_log_p(X) # (B,)
B = X.shape[0]
diff = X.unsqueeze(1) - X.unsqueeze(0) # (B, B, D)
diff_norm_sqr = diff.square().sum(-1) # (B, B)
if self.include_diag == 'nnd_scale':
vals, _ = torch.topk(diff_norm_sqr, 2, dim=-1, largest=False)
vals = vals.detach()[:, 1]
# Use \phi(h_i / (1.3d)^{1/d}) for the diagonal term.
vals = vals / math.pow(self.diag_mul * self.embed_dim, 2.0 / self.embed_dim)
diff_norm_sqr = diff_norm_sqr + torch.diag(vals)
if self.kernel == 'gaussian':
# \phi(x-y) = \exp(-||x-y||^2/(2 * eps))
tmp = -diff_norm_sqr / (2 * self.eps)
elif self.kernel == 'laplace':
tmp = -(diff_norm_sqr + 1e-10).sqrt() / self.eps
else:
assert(self.kernel == 'riesz')
log_dist_sqr = (diff_norm_sqr + self.eps).log() # (B, B)
tmp = log_dist_sqr * -self.riesz_s / 2
tmp2 = (log_p.unsqueeze(1) + log_p.unsqueeze(0)) # (B, B)
tmp2 = tmp2 * -self.alpha # (B, B)
tmp = tmp + tmp2
mask = torch.eye(B, device=X.device, dtype=torch.bool) # (B, B)
mask = torch.logical_not(mask) # (B, B)
if self.include_diag != 'ignore':
mask = torch.logical_or(mask,
torch.eye(B, device=X.device,
dtype=torch.bool))
else:
mask = torch.eye(B, device=X.device, dtype=torch.bool) # (B, B)
mask = mask.reshape(-1)
tmp = tmp.reshape(-1)
tmp = torch.masked_select(tmp, mask)
energy = torch.logsumexp(tmp, 0) # scalar
# if self.include_diag in ['include', 'nnd']:
# energy = energy + -2 * math.log(B)
# else:
# energy = energy + -2 * math.log(B - 1)
return energy
def step(self, i):
if self.optimizer_conf['cls'] == 'LBFGS':
def closure():
self.optimizer.zero_grad()
X = self.problem.reparametrize(self.particles)
# Subclass must have a compute_energy function.
F = self.compute_energy(X)
self.last_F = F.item()
F.backward()
return F
self.optimizer.step(closure)
else:
super().step(i)
def compute_update(self, i, X):
'''
:return: (B, D)
'''
F = self.compute_energy(X) # scalar
self.last_F = F.item()
grad_F = torch.autograd.grad(F, X)[0] # (B, D)
return -grad_F
def custom_post_step(self, i):
return {
'Riesz energy': self.last_F
}
def get_progress_msg(self):
return 'E: {:6f}, G_vio: {:6f}'.format(self.last_F,
self.projector.get_violation())
| 4,561 | 29.824324 | 88 |
py
|
MIED
|
MIED-main/mied/solvers/ipd.py
|
import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
'''
Independent particle descent, a dumb baseline.
'''
class IPD(ParticleBasedSolver):
def __init__(self,
**kwargs):
super().__init__(**kwargs)
def compute_update(self, i, X):
log_p = self.problem.eval_log_p(X) # (B,)
self.last_log_p = log_p.mean()
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
update = grad_log_p.squeeze(1)
return update
def get_progress_msg(self):
return 'log_p: {:6f}, G_vio: {:6f}'.format(
self.last_log_p, self.projector.get_violation())
| 798 | 25.633333 | 77 |
py
|
MIED
|
MIED-main/mied/problems/problem_base.py
|
from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class ProblemBase(ABC):
def __init__(self, *,
device,
in_dim):
'''
A problem describes the sampling problem with unnormalized density
p(x) and constraints g(x) <= 0, h(x) = 0.
:param device: used to generate ambient samples
:param in_dim: ambient dimension
'''
self.device = device
self.in_dim = in_dim
@abstractmethod
def sample_prior(self, batch_size):
'''
Sample prior particles (before applying reparameterization).
'''
pass
@abstractmethod
def get_embed_dim(self):
pass
@abstractmethod
def eval_log_p(self, P):
'''
Evaluate the log density.
- For sampling, can ignore the constant.
- For evaluation purpose however the constant should be included
if possible.
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size,)
'''
pass
def sample_gt(self, batch_size, refresh):
'''
Implement this in cases where the problem can be sampled (for evaluation).
'''
return None
def reparametrize(self, Z):
'''
:param Z: (B, Z)
:return: (B, D)
'''
return Z
def eval_eq(self, P):
'''
Evaluate the constraints h(x).
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size, num_eq)
'''
return None
def eval_ineq(self, P):
'''
Evaluate the constraints g(x).
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size, num_ineq)
'''
return None
def custom_eval(self, samples):
return {}
def custom_post_step(self, samples):
return {}
| 1,956 | 20.988764 | 82 |
py
|
MIED
|
MIED-main/mied/problems/logistics.py
|
import torch
import torch.distributions
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
from tqdm import tqdm
import scipy.io
from mied.problems.problem_base import ProblemBase
class BayesianLogistics(ProblemBase):
def __init__(self, *,
device,
data_path,
data_name='banana',
exp_lambda=0.01,
split_seed=42,
batch_size=50):
self.exp_lambda = exp_lambda
self.exp_dist = torch.distributions.Exponential(exp_lambda)
data = scipy.io.loadmat(data_path)
if data_name == 'covtype':
X = torch.from_numpy(data['covtype'][:, 1:])
Y = torch.from_numpy(data['covtype'][:, 0])
Y[Y == 2] = 0
self.use_batch = True
else:
X = torch.from_numpy(data[data_name]['x'][0][0]) # NxM
Y = torch.from_numpy(data[data_name]['t'][0][0]) # Nx1
Y = Y.squeeze(-1) # N
Y[Y == -1] = 0
self.use_batch = False
dataset = TensorDataset(X, Y)
N, self.M = X.shape
N_train = int(N * 0.8)
N_test = N - N_train
self.train_dset, self.test_dset = torch.utils.data.random_split(
dataset, [N_train, N_test],
generator=torch.Generator().manual_seed(split_seed))
# Always use batch for test.
self.test_dl = DataLoader(self.test_dset,
batch_size=batch_size,
shuffle=False)
if self.use_batch:
self.train_dl = DataLoader(self.train_dset,
batch_size=batch_size,
shuffle=True)
self.train_dl_itr = iter(self.train_dl)
else:
# Otherwise put everything onto device.
self.train_X, self.train_Y = self.train_dset[:]
self.train_X = self.train_X.to(device)
self.train_Y = self.train_Y.to(device)
self.dim = self.M + 1
super().__init__(device=device,
in_dim=self.dim)
def mcmc(self, num_warmup, num_sample, *,
log_file=None):
'''
Generate posterior samples using MCMC to serve as ground truth.
'''
import jax
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def model(data, labels):
alpha = numpyro.sample('alpha', dist.Exponential(self.exp_lambda))
W = numpyro.sample('W', dist.Normal(jnp.zeros(self.M), 1.0 / alpha))
logits = jnp.sum(W * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
data, labels = self.train_dset[:]
data = data.numpy()
labels = labels.numpy()
mcmc = MCMC(NUTS(model=model), num_warmup=num_warmup,
num_samples=num_sample)
mcmc.run(jax.random.PRNGKey(0), data, labels)
from contextlib import ExitStack, redirect_stdout
samples = mcmc.get_samples()
W = torch.from_numpy(np.array(samples['W']))
alpha = torch.from_numpy(np.array(samples['alpha'])).log()
P = torch.cat([W, alpha.unsqueeze(-1)], -1)
test_acc = self.eval_test_accurarcy(P.to(self.device))
with ExitStack() as stack:
if log_file is not None:
f = stack.enter_context(open(log_file, 'w'))
stack.enter_context(redirect_stdout(f))
mcmc.print_summary()
print('MCMC test accuracy: {}'.format(test_acc))
return P
def sample_prior(self, batch_size):
alpha = self.exp_dist.sample([batch_size, 1]).to(self.device)
W = torch.randn([batch_size, self.M],
device=self.device) / alpha.sqrt()
return torch.cat([W, alpha.log()], -1)
def get_embed_dim(self):
return self.dim
def eval_log_p(self, P):
if self.use_batch:
try:
X, Y = self.train_dl_itr.next()
except StopIteration:
self.train_dl_itr = iter(self.train_dl)
X, Y = self.train_dl_itr.next()
X = X.to(self.device)
Y = Y.to(self.device)
else:
X, Y = (self.train_X, self.train_Y)
W = P[:, :-1] # BxM
alpha = P[:, -1].exp() # B
log_p = -(W.square().sum(-1) * alpha / 2)
log_p = log_p - self.exp_lambda * alpha
out_logit = (W.unsqueeze(1) * X.unsqueeze(0)).sum(-1) # BxN
log_p_data = -F.binary_cross_entropy_with_logits(
out_logit,
Y.unsqueeze(0).expand(W.shape[0], -1).float(),
reduction='none')
log_p_data = log_p_data.sum(-1) # B
log_p = log_p + log_p_data
return log_p
def eval_test_accurarcy(self, P):
W = P[:, :-1] # BxM
alpha = P[:, -1].exp() # B
total_correct = 0
total_test = 0
for test_batch in self.test_dl:
X, Y = test_batch
X = X.to(self.device) # NxM
Y = Y.to(self.device) # N
pred = (W.unsqueeze(1) * X.unsqueeze(0)).sum(-1) # BxN
pred = pred.sigmoid() # BxN
# We average pred from all particles before computing accuracy.
total_correct += (Y == (pred.mean(0) > 0.5)).sum()
total_test += X.shape[0]
return total_correct / total_test
def custom_post_step(self, P):
return {
'train_log_p': self.eval_log_p(P).mean(),
'test_acc': self.eval_test_accurarcy(P)
}
| 5,753 | 32.649123 | 83 |
py
|
MIED
|
MIED-main/mied/problems/analytical_problems.py
|
import torch
import numpy as np
import math
from abc import ABC, abstractmethod
'''
bbox corresponds to the dimension of the variable (=dim) which is
not the same as the intrinsic dimension.
'''
def sample_simplex(dim, batch_size, device):
samples = torch.from_numpy(np.random.dirichlet(
torch.ones([dim]) * 5, size=batch_size)).float().to(device)
return samples
'''
Below are reparameterization options.
reparam_fn always come together with eq_fn and ineq_fn,
and prior_sample_fn.
'''
def id_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.randn([B, dim], device=device)),
}
def box_id_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.cat([-1 - X, X - 1], -1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.rand([B, dim], device=device) - 0.5),
}
def box_tanh_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: torch.tanh(Z),
'prior_sample_fn': (lambda B, device:
torch.atanh(torch.rand([B, dim], device=device) - 0.5)),
}
def box_mirror_reparam(dim, entropic=False):
if entropic:
def nabla_psi(X):
return torch.log(1 + X) - torch.log(1 - X)
def nabla_psi_star(Z):
return 1 - 2 / (Z.exp() + 1)
else:
def nabla_psi(X):
return (1 / (1 - X)) - (1 / (1 + X))
def nabla_psi_star(Z):
return ((1 + Z**2).sqrt() - 1) / Z
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: nabla_psi_star(Z),
'prior_sample_fn': (lambda B, device:
nabla_psi(torch.rand([B, dim], device=device) - 0.5)),
}
def sphere_reparam(dim):
return {
'eq_fn': lambda X: X.square().sum(-1, keepdim=True) - 1,
'ineq_fn': None,
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.randn([B, dim], device=device)),
}
def heart_id_reparam():
return {
'eq_fn': None,
'ineq_fn': lambda X: ((X[:,0]**2+X[:,1]**2-1)**3-(X[:,0]**2)*(X[:,1]**3)).unsqueeze(-1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.rand([B, 2], device=device)),
}
def period_id_reparam():
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.cat([
((torch.cos(3 * np.pi * X[:, 0]) + torch.cos(3 * np.pi * X[:, 1])).square() - 0.3).unsqueeze(-1),
-1-X,
X-1], -1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
0.5+0.5*torch.rand([B, 2], device=device)),
}
def simplex_id_reparam(dim):
return {
'eq_fn': lambda X: (X.sum(-1, keepdim=True) - 1),
'ineq_fn': lambda X: -X,
'reparam_fn': None,
'prior_sample_fn': lambda B, device: sample_simplex(
dim, B, device=device),
}
def simplex_pos_sum_one_reparam(dim):
return {
'eq_fn': lambda X: (X.sum(-1, keepdim=True) - 1),
'ineq_fn': None,
'reparam_fn': lambda Z: Z.square(),
'prior_sample_fn': (
lambda b, device: sample_simplex(
dim, b, device=device).sqrt()),
}
def simplex_pos_sum_one_ineq_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.stack([X.sum(-1) - 1, 1 - X.sum(-1)], -1),
'reparam_fn': lambda Z: Z.square(),
'prior_sample_fn': (
lambda B, device: sample_simplex(
dim, B, device=device).sqrt()),
}
def simplex_softmax_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: torch.nn.functional.softmax(Z, dim=-1),
'prior_sample_fn': (
lambda B, device: sample_simplex(
dim, B, device=device).log()),
}
# def cube_constraint(dim, bound=[0, 1]):
# return {
# 'ineq_fn': lambda X: torch.cat([bound[0] - X, X - bound[1]], -1),
# 'bbox': torch.tensor([bound] * dim),
# 'embed_dim': dim,
# # 'reparam_dict': {
# # 'reparam_fn': lambda Z: torch.sigmoid(Z) * (bound[1] - bound[0]) + bound[0],
# # 'prior_sample_fn': lambda B, device: torch.randn(
# # [B, dim], device=device),
# # }
# }
# def sphere_constraint(dim):
# return {
# 'eq_fn': lambda X: X.square().sum(-1, keepdim=True) - 1,
# 'bbox': torch.tensor([[-1, 1]] * dim),
# 'embed_dim': dim - 1,
# }
# def free_constraint(dim, r=3):
# return {
# 'bbox': torch.tensor([[-r, r]] * dim),
# 'embed_dim': dim,
# # 'prior_sample_fn': prior_sample_fn,
# }
# def ellipse_constraint(dim):
# assert(dim == 2)
# return {
# 'eq_fn': lambda X: (X[..., 0].square() / 9 +
# X[..., 1].square() / 1).unsqueeze(-1) - 1,
# 'bbox': torch.tensor([[-4, 4], [-2, 2]]),
# 'embed_dim': dim - 1,
# }
class DistributionBase(ABC):
def __init__(self, *, dim, embed_dim, bbox, device):
'''
:param dim: ambient dimension
'''
self.dim = dim
self.embed_dim = embed_dim
self.bbox = bbox
self.device = device
self.gt_samples = None
@abstractmethod
def log_p(self, X):
'''
:param X: (B, D)
:return: (B,)
'''
pass
def get_reject_ineq_fn(self):
'''
:return: a function (B, D) -> (B, K)
'''
return None
def sample_gt(self, B, refresh):
if not refresh:
if self.gt_samples is not None and self.gt_samples.shape[0] == B:
return self.gt_samples
# We sample multiple times and reject samples that don't satisfy
# inequality constraints.
ineq_fn = self.get_reject_ineq_fn()
remain = B
sample_list = []
while remain > 0:
samples = self.sample_gt_impl(2 * remain) # (2B, D)
if samples is None:
return None
if ineq_fn is not None:
satisfy = (ineq_fn(samples) <= 0).all(-1) # (2B)
else:
satisfy = torch.ones([samples.shape[0]],
device=samples.device, dtype=torch.bool)
samples = samples[satisfy, :]
count = min(remain, samples.shape[0])
remain -= count
sample_list.append(samples[:count])
self.gt_samples = torch.cat(sample_list, 0) # (B, D)
assert(self.gt_samples.shape[0] == B)
assert(self.gt_samples is not None)
return self.gt_samples
def sample_gt_impl(self, B):
'''
Can be overriden if it is possible to sample from the ground truth.
:return: (B, D)
'''
return None
class Dirichlet(DistributionBase):
def __init__(self, dim, *, device):
super().__init__(dim=dim, embed_dim=dim - 1,
bbox=torch.tensor([[0, 1]] * dim),
device=device)
alpha = np.ones([dim],
dtype=np.float64) * 0.1
if alpha.shape[0] >= 3:
alpha[:3] += np.array([90., 5., 5.])
alpha = torch.from_numpy(alpha).float().to(device)
self.alpha = alpha
def log_p(self, X):
assert(self.alpha.shape[0] == X.shape[-1])
return ((self.alpha - 1) / 2 * (X.square() + 1e-6).log()).sum(-1)
# return ((self.alpha - 1) * (X + 1e-40).log()).sum(-1)
# return ((self.alpha - 1) * (X).log()).sum(-1)
def sample_gt_impl(self, B):
# rng = np.random.RandomState(123)
return torch.from_numpy(np.random.dirichlet(
self.alpha.cpu().detach(), size=B)).float().to(self.device)
class QuadraticFullDim(DistributionBase):
def __init__(self, dim, *,
device,
ineq_fn=None,
seed=123):
'''
:param temp: smaller temp leads to smaller variance
'''
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-2,2]] * dim, device=device),
device=device)
self.ineq_fn = ineq_fn
if seed is None:
# Standard Gaussian
A = np.eye(dim)
else:
rng = np.random.RandomState(seed)
A_sqrt = rng.uniform(-1.0, 1.0, size=(dim, dim))
A = A_sqrt @ A_sqrt.T
A = np.linalg.inv(A)
A /= np.linalg.det(A) # to have unit determinant
self.A = torch.from_numpy(A).float().to(device)
from torch.distributions.multivariate_normal import MultivariateNormal
self.dist = MultivariateNormal(loc=torch.zeros([dim], device=device),
covariance_matrix=self.A)
def log_p(self, X):
assert(self.A.shape[0] == X.shape[-1])
return self.dist.log_prob(X)
def sample_gt_impl(self, B):
return self.dist.sample([B])
def get_reject_ineq_fn(self):
return self.ineq_fn
class StudentTFullDim(DistributionBase):
def __init__(self, dim, *,
device,
ineq_fn=None,
df=2.0,
seed=50):
'''
:param temp: smaller temp leads to smaller variance
'''
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-5, 5]] * dim, device=device),
device=device)
self.ineq_fn = ineq_fn
if seed is None:
# Standard student T
A = np.eye(dim)
else:
rng = np.random.RandomState(seed)
A_sqrt = rng.uniform(-1.0, 1.0, size=(dim, dim))
A = A_sqrt @ A_sqrt.T
A = np.linalg.inv(A)
A /= np.linalg.det(A) # to have unit determinant
self.A = torch.from_numpy(A).float().to(device)
self.A_inv = torch.from_numpy(np.linalg.inv(A)).float().to(device)
from torch.distributions.studentT import StudentT
self.dist = StudentT(df=df)
def log_p(self, X):
# X: (B, D), A: (D, D)
assert(self.A.shape[0] == X.shape[-1])
B = X.shape[0]
Z = torch.bmm(self.A_inv.unsqueeze(0).expand(B, -1, -1), X.unsqueeze(-1)).squeeze(-1) # (B, D)
return self.dist.log_prob(Z).sum(-1) # (B,)
def sample_gt_impl(self, B):
Z = self.dist.sample([B, self.dim]).to(self.device)
X = torch.bmm(self.A.unsqueeze(0).expand(B, -1, -1), Z.unsqueeze(-1)).squeeze(-1) # (B, D)
return X
def get_reject_ineq_fn(self):
return self.ineq_fn
class UniformBox(DistributionBase):
def __init__(self, dim, *, device):
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-1.3, 1.3]] * dim, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, B):
return 2 * torch.rand([B, self.dim], device=self.device) - 1
class UniformHeart(DistributionBase):
def __init__(self, *, device):
super().__init__(dim=2, embed_dim=2,
bbox=torch.tensor([[-1.3, 1.3]] * 2, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, B):
return 3 * (torch.rand([B, 2], device=self.device) - 0.5)
def get_reject_ineq_fn(self):
return heart_id_reparam()['ineq_fn']
class UniformSimplex3D(DistributionBase):
def __init__(self, dim, *, device):
assert(dim==3)
super().__init__(dim=dim, embed_dim=dim-1,
bbox=torch.tensor([[-0.3, 1.3]] * dim, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, batch_size):
A = np.array([1, 0, 0], dtype=np.float64)
B = np.array([0, 1, 0], dtype=np.float64)
C = np.array([0, 0, 1], dtype=np.float64)
r1 = np.expand_dims(np.random.random([batch_size]), -1)
r2 = np.expand_dims(np.random.random([batch_size]), -1)
P = ((1-np.sqrt(r1)) * A + (np.sqrt(r1)*(1-r2)) * B + (r2 * np.sqrt(r1)) * C)
return torch.from_numpy(P).to(self.device).float()
class GaussianMixtureBox(DistributionBase):
def __init__(self, *, dim,
ineq_fn=None,
centers, variances, weights, bbox,
device):
'''
:param centers: (M, D)
:param variances: (M,)
:param weights: (M,)
'''
super().__init__(dim=dim, embed_dim=dim, bbox=bbox, device=device)
assert(dim == centers.shape[1])
self.centers = centers.to(device)
self.variances = variances.to(device)
self.weights = weights.to(device)
self.weights /= self.weights.sum()
self.ineq_fn = ineq_fn
def log_p(self, X):
'''
:param X: (B, D)
'''
tmp = X.unsqueeze(1) - self.centers.unsqueeze(0) # (B, M, D)
tmp = -tmp.square().sum(-1) / (2 * self.variances.unsqueeze(0)) # (B, M)
coef = torch.pow(2 * np.pi * self.variances, self.dim / 2) # (M,)
tmp = tmp.exp() / coef # (B, M)
log_p = (tmp * self.weights).sum(-1).log()
return log_p
def sample_gt_impl(self, B):
tmp = torch.randn([B, self.dim], device=self.device).unsqueeze(1) # (B, 1, D)
tmp = tmp * self.variances.unsqueeze(-1).sqrt() # (B, M, D)
tmp = tmp + self.centers # (B, M, D)
inds = torch.multinomial(self.weights, B,
replacement=True).to(self.device) # (B,)
M = self.centers.shape[0]
D = self.dim
flatten_idx = ((torch.arange(B, device=self.device) * M * D +
inds * D).unsqueeze(-1) +
torch.arange(D, device=self.device)) # (B, D)
# Want: out[i, j] = tmp[i, inds[i], j]
out = tmp.reshape(-1)[flatten_idx.reshape(-1)].reshape(B, D) # (B, D)
return out
def get_reject_ineq_fn(self):
return box_id_reparam(2)['ineq_fn']
def create_problem(device, prob, reparam_name):
from mied.problems.analytical import Analytical
import re
m = re.search('([0-9]+)d$', prob)
dim_group = m.group(0)
dim = int(dim_group[:-1])
prob_name = prob[:-len(dim_group)-1]
if prob_name == 'dirichlet':
dist = Dirichlet(dim, device=device)
elif prob_name == 'quadratic_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None)
elif prob_name == 'quadratic_2_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None, seed=40)
elif prob_name == 'std_gaussian_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None, seed=None)
elif prob_name == 'student_uc':
dist = StudentTFullDim(dim, device=device, ineq_fn=None)
elif prob_name == 'uniform_box':
dist = UniformBox(dim, device=device)
elif prob_name == 'uniform_heart':
dist = UniformHeart(device=device)
elif prob_name == 'uniform_simplex':
dist = UniformSimplex3D(dim, device=device)
elif prob_name == 'mog_box':
assert(dim == 2)
dist = GaussianMixtureBox(dim=2,
bbox=torch.tensor([[-1.3, 1.3]]*2, device=device),
centers=torch.tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]],
dtype=torch.float32),
variances=torch.tensor([0.3, 0.3, 0.3, 0.3]),
weights=torch.tensor([0.25, 0.25, 0.25, 0.25]),
device=device)
elif prob_name == 'vmf':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None)
else:
raise Exception(f'Unknown problem name: {prob_name}')
if reparam_name == 'id':
reparam = id_reparam(dim)
elif reparam_name == 'box_id':
reparam = box_id_reparam(dim)
elif reparam_name == 'box_tanh':
reparam = box_tanh_reparam(dim)
elif reparam_name == 'box_mirror':
reparam = box_mirror_reparam(dim)
elif reparam_name == 'box_mirror_entropic':
reparam = box_mirror_reparam(dim, True)
elif reparam_name == 'sphere':
reparam = sphere_reparam(dim)
elif reparam_name == 'heart_id':
reparam = heart_id_reparam()
elif reparam_name == 'period_id':
reparam = period_id_reparam()
elif reparam_name == 'simplex_pos_sum_one':
reparam = simplex_pos_sum_one_reparam(dim)
elif reparam_name == 'simplex_pos_sum_one_ineq':
reparam = simplex_pos_sum_one_ineq_reparam(dim)
elif reparam_name == 'simplex_id':
reparam = simplex_id_reparam(dim)
elif reparam_name == 'simplex_softmax':
reparam = simplex_softmax_reparam(dim)
else:
raise Exception(f'Unknown reparametrization name: {reparam_name}')
return Analytical(device=device,
bbox=dist.bbox,
in_dim=dist.dim,
embed_dim=dist.embed_dim,
log_p_fn=lambda X: dist.log_p(X),
gt_sample_fn=lambda B, refresh: dist.sample_gt(B, refresh),
**reparam)
| 17,849 | 31.046679 | 109 |
py
|
MIED
|
MIED-main/mied/problems/analytical.py
|
import torch
from mied.problems.problem_base import ProblemBase
class Analytical(ProblemBase):
def __init__(self, *,
bbox,
embed_dim,
log_p_fn,
prior_sample_fn,
eq_fn=None,
ineq_fn=None,
reparam_fn=None,
gt_sample_fn=None,
**kwargs):
'''
A base class for simple analytical functions.
:param bbox: a (D, 2) tensor, used only for evaluation.
'''
super().__init__(**kwargs)
self.bbox = bbox
self.embed_dim = embed_dim
self.log_p_fn = log_p_fn
self.eq_fn = eq_fn
self.ineq_fn = ineq_fn
self.prior_sample_fn = prior_sample_fn
self.gt_sample_fn = gt_sample_fn
self.reparam_fn = reparam_fn
def get_embed_dim(self):
return self.embed_dim
def eval_log_p(self, P):
return self.log_p_fn(P)
def sample_prior(self, batch_size):
return self.prior_sample_fn(batch_size, self.device)
def reparametrize(self, Z):
if self.reparam_fn is None:
return super().reparametrize(Z)
return self.reparam_fn(Z)
def sample_gt(self, batch_size, refresh):
if self.gt_sample_fn is not None:
return self.gt_sample_fn(batch_size, refresh)
return None
def eval_eq(self, P):
if self.eq_fn is None:
return None
return self.eq_fn(P)
def eval_ineq(self, P):
if self.ineq_fn is None:
return None
return self.ineq_fn(P)
| 1,621 | 23.208955 | 63 |
py
|
MIED
|
MIED-main/mied/problems/fairness_bnn.py
|
import torch
from torch.distributions import Normal
import torch.nn.functional as F
import numpy as np
import random
from mied.problems.problem_base import ProblemBase
from mied.utils.adult_loader import load_data
# Using the same setup as https://proceedings.neurips.cc/paper/2021/hash/c61aed648da48aa3893fb3eaadd88a7f-Abstract.html
class BayesianNN:
def __init__(self, idx, X_train, y_train, batch_size, hidden_dim, thres):
self.idx = idx
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.n_features = X_train.shape[1] - 1
self.hidden_dim = hidden_dim
self.thres = thres
def forward(self, inputs, theta):
assert(theta.shape[1] == (self.n_features + 2) * self.hidden_dim + 1)
# Unpack theta
w1 = theta[:, 0:self.n_features * self.hidden_dim].reshape(-1, self.n_features, self.hidden_dim)
b1 = theta[:, self.n_features * self.hidden_dim:(self.n_features + 1) * self.hidden_dim].unsqueeze(1)
w2 = theta[:, (self.n_features + 1) * self.hidden_dim:(self.n_features + 2) * self.hidden_dim].unsqueeze(2)
b2 = theta[:, -1].reshape(-1, 1, 1)
inputs = inputs.unsqueeze(0).repeat(w1.shape[0], 1, 1)
inter = (torch.bmm(inputs, w1) + b1).relu()
out_logit = torch.bmm(inter, w2) + b2
out = out_logit.squeeze()
return out
def get_log_prob_and_constraint(self, theta):
model_w = theta[:, :]
w_prior = Normal(0., 1.)
random_idx = random.sample([i for i in range(self.X_train.shape[0])], self.batch_size)
X_batch = self.X_train[random_idx]
y_batch = self.y_train[random_idx]
out_logit = self.forward(X_batch[:, self.idx], theta) # [num_particle, batch_size]
y_batch_repeat = y_batch.unsqueeze(0).repeat(out_logit.shape[0], 1)
log_p_data = F.binary_cross_entropy_with_logits(out_logit, y_batch_repeat, reduction='none')
log_p_data = (-1.)*log_p_data.sum(dim=1)
log_p0 = w_prior.log_prob(model_w.t()).sum(dim=0)
log_p = log_p0 + log_p_data * (self.X_train.shape[0] / self.batch_size) # (8) in paper
### NOTE: compute fairness loss
mean_sense = X_batch[:, 45].mean()
weight_sense = X_batch[:, 45] - mean_sense # [batch_size]
#weight_sense = weight_sense.view(1, -1).repeat(self.num_particles, 1)
# Modify here as well.
out = out_logit.sigmoid()
out = out - out.mean(dim=1, keepdim=True) # [num_particle, batch_size]
# constrain = ((weight_sense.unsqueeze(0) * out_logit).mean(-1))**2 - self.thres
constrain = ((weight_sense.unsqueeze(0) * out).mean(-1))**2 - self.thres
return log_p, constrain
class FairnessBNN(ProblemBase):
def __init__(self, data_dir,
thres,
ineq_scale,
device=torch.device('cpu')):
self.ineq_scale = ineq_scale
idx = [i for i in range(87)]
del idx[45]
X_train, y_train, X_test, y_test, start_index, cat_length = load_data(
data_dir, get_categorical_info=True)
X_train = X_train[:20000]
y_train = y_train[:20000]
n = X_train.shape[0]
n = int(0.99 * n)
# Note: X_val is not used.
X_train = X_train[:n, :]
y_train = y_train[:n]
X_train = np.delete(X_train, 46, axis=1)
X_test = np.delete(X_test, 46, axis=1)
X_train = torch.tensor(X_train).float().to(device)
X_test = torch.tensor(X_test).float().to(device)
y_train = torch.tensor(y_train).float().to(device)
y_test = torch.tensor(y_test).float().to(device)
X_train_mean, X_train_std = torch.mean(X_train[:, idx], dim=0), torch.std(X_train[:, idx], dim=0)
X_train[:, idx] = (X_train [:, idx]- X_train_mean) / X_train_std
X_test[:, idx] = (X_test[:, idx] - X_train_mean) / X_train_std
batch_size, hidden_dim = 19800, 50
in_dim = (X_train.shape[1] - 1 + 2) * hidden_dim + 1
super().__init__(device=device,
in_dim=in_dim)
self.bnn = BayesianNN(idx,
X_train, y_train, batch_size, hidden_dim, thres)
self.X_train, self.y_train = X_train, y_train
self.X_test, self.y_test = X_test, y_test
self.idx = idx
def sample_prior(self, batch_size):
return 0.1 * torch.randn([batch_size, self.in_dim], device=self.device)
def eval_log_p(self, theta):
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
return log_p
def eval_ineq(self, theta):
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
return self.ineq_scale * constraint.unsqueeze(-1)
def get_embed_dim(self):
return self.in_dim # full dimension
def custom_eval(self, theta):
X_test = self.X_test
y_test = self.y_test
with torch.no_grad():
prob = self.bnn.forward(X_test[:, self.idx], theta)
y_pred = torch.sigmoid(prob).mean(dim=0) # Average among outputs from different network parameters(particles)
y_pred = y_pred.cpu().numpy()
sum_positive = np.zeros(2).astype(float)
count_group = np.zeros(2).astype(float)
for j in range(2):
A = y_pred[X_test.cpu().numpy()[:,45]==j]
count_group[j] = A.shape[0]
sum_positive[j] = np.sum(A >= 0.5)
ratio = sum_positive/count_group
CV = np.max(ratio) - np.min(ratio)
y_pred[y_pred>= 0.5] = 1
y_pred[y_pred<0.5] = 0
acc_bnn = np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0])
cv_bnn = CV
print('acc: ', np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0]), 'fairness:', CV)
acc_cllt = []
cv_cllt = []
for i in range(prob.shape[0]):
y_pred = torch.sigmoid(prob[i, :])
y_pred = y_pred.cpu().numpy()
sum_positive = np.zeros(2).astype(float)
count_group = np.zeros(2).astype(float)
for j in range(2):
A = y_pred[X_test.cpu().numpy()[:,45]==j]
count_group[j] = A.shape[0]
sum_positive[j] = np.sum(A >= 0.5)
ratio = sum_positive/count_group
CV = np.max(ratio) - np.min(ratio)
y_pred[y_pred>= 0.5] = 1
y_pred[y_pred<0.5] = 0
acc_cllt.append(np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0]))
cv_cllt.append(CV)
# print('mean CV {}, best CV {}, worst CV {}'.format(
# np.mean(np.array(cv_cllt)),
# np.min(np.array(cv_cllt)),
# np.max(np.array(cv_cllt))))
return {
'acc_all': np.stack(acc_cllt, 0),
'cv_all': np.stack(cv_cllt, 0),
'acc_bnn': acc_bnn,
'cv_bnn': cv_bnn,
}
def custom_post_step(self, theta):
eval_dict = self.custom_eval(theta)
del eval_dict['acc_all']
del eval_dict['cv_all']
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
constraint = constraint + self.bnn.thres
# Average across all particles.
eval_dict['log_p'] = log_p.sum(-1).mean()
eval_dict['constraint_mean'] = constraint.mean()
eval_dict['constraint_max'] = constraint.max()
return eval_dict
| 7,599 | 38.175258 | 122 |
py
|
MIED
|
MIED-main/mied/utils/kernels.py
|
from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class KernelBase(ABC):
@abstractmethod
def eval(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B,)
'''
pass
@abstractmethod
def grad_1(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B, D)
'''
pass
@abstractmethod
def div_2_grad_1(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B,)
'''
pass
class GaussianKernel(KernelBase):
def __init__(self, sigma):
'''
k(x, y) = exp(-||x-y||^2/(2 sigma))
:param sigma:
'''
self.sigma = sigma
def eval(self, X, Y):
return torch.exp(-(X - Y).square().sum(-1) / (self.sigma * 2))
def grad_1(self, X, Y):
return -(X - Y) / self.sigma * self.eval(X, Y).unsqueeze(-1)
def div_2_grad_1(self, X, Y):
D = X.shape[-1]
return self.eval(X, Y) * (-(X - Y).square().sum(-1) / (self.sigma ** 2)
+ D / self.sigma)
| 1,184 | 19.431034 | 79 |
py
|
MIED
|
MIED-main/mied/utils/batch_jacobian.py
|
import torch
def compute_jacobian(outputs, inputs,
create_graph=True, retain_graph=True):
'''
Compute Jacobian matrices in batch.
:param outputs: (..., D1)
:param inputs: (..., D2)
:returns: (..., D1, D2), computed Jacobian
'''
J = torch.cat([
torch.autograd.grad(
outputs=outputs[..., d], inputs=inputs,
create_graph=create_graph, retain_graph=retain_graph,
grad_outputs=torch.ones(inputs.size()[:-1], device=inputs.device)
)[0].unsqueeze(-2)
for d in range(outputs.shape[-1])
], -2) # (..., D1, D2)
return J
| 631 | 27.727273 | 77 |
py
|
MIED
|
MIED-main/mied/utils/batch_hessian.py
|
import torch
from mied.utils.batch_jacobian import compute_jacobian
def compute_hessian(func, inputs):
'''
Compute Hessianmatrices in batch.
:param func: (B, D) -> (B,)
:param inputs: (B, D)
:returns: (B, D, D)
'''
outputs = func(inputs) # (B,)
grad = compute_jacobian(outputs.unsqueeze(-1), inputs).squeeze(-2) # (B, D)
result = compute_jacobian(grad, inputs)
return result
| 420 | 21.157895 | 79 |
py
|
MIED
|
MIED-main/mied/utils/random.py
|
import torch
import numpy as np
import random
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 280 | 20.615385 | 42 |
py
|
MIED
|
MIED-main/mied/utils/proj_polyhedra.py
|
import torch
def proj_halfspace(p, c, y):
'''
Project p to halfspace defined by {x: c^T x <= y}.
:param p: (B, D)
:param c: (B, D)
:param y: (B,)
:return: (B, D), projected points
'''
norm = torch.norm(c, dim=-1) + 1e-8 # (B,)
c = c / norm.unsqueeze(-1) # (B, D)
y = y / norm # (B,)
dot = (p * c).sum(-1) # (B,)
return p - (dot - y).relu().unsqueeze(-1) * c
def calc_suboptimality(X, C, Y):
'''
Calculate the suboptimality for projecting X onto the polyhedral
defined by C and Y.
:param X: (B, D)
:param C: (B, K, D)
:param Y: (B, K)
:return: scalar, representing average suboptimality
'''
return (torch.matmul(C, X.unsqueeze(-1)).squeeze(-1) - Y).relu().mean()
def proj_polyhedra(X, C, Y,
parallel=False,
max_num_itr=50, logging=False, early_stop_eps=1e-6):
'''
Project each X to the intersection of {C_i^T x <= Y_i, for all i < K}.
:param X: (B, D)
:param C: (B, K, D)
:param Y: (B, K)
:return: (B, D), projected points
'''
if logging:
hist_loss = [calc_suboptimality(X, C, Y)]
if C.shape[1] == 1:
# Single constraint.
sol = proj_halfspace(X, C[:, 0, :], Y[:, 0])
else:
with torch.no_grad():
K = C.shape[1]
D = C.shape[2]
if parallel:
u_prev_stack = X.unsqueeze(1).expand(-1, K, -1) # (B, K, D)
z_prev_stack = torch.zeros_like(u_prev_stack) # (B, K, D)
else:
u_prev_list = []
z_prev_list = []
for _ in range(K + 1):
u_prev_list.append(X.clone().detach())
z_prev_list.append(torch.zeros_like(X))
for _ in range(max_num_itr):
if parallel:
u0 = u_prev_stack.mean(1) # (B, D)
tmp = u0.unsqueeze(1) + z_prev_stack # (B, K, D)
u_next_stack = proj_halfspace(tmp.reshape(-1, D),
C.reshape(-1, D),
Y.reshape(-1)).reshape(-1, K, D) # (B, K, D)
z_next_stack = tmp - u_next_stack
u_prev_stack = u_next_stack
z_prev_stack = z_next_stack
else:
u_next_list = []
u_next_list.append(u_prev_list[K])
z_next_list = [None]
for i in range(K):
tmp = u_next_list[i] + z_prev_list[i + 1]
u_next_list.append(proj_halfspace(tmp,
C[:, i, :], Y[:, i]))
z_next_list.append(tmp - u_next_list[-1])
u_prev_list = u_next_list
z_prev_list = z_next_list
sol = u_prev_stack.mean(1) if parallel else u_prev_list[-1]
if logging:
subopt = calc_suboptimality(
sol,
C, Y)
hist_loss.append(subopt)
if subopt < early_stop_eps:
break
if logging:
return sol, hist_loss
return sol
| 3,305 | 32.393939 | 94 |
py
|
MIED
|
MIED-main/mied/utils/path.py
|
from pathlib import Path
import numpy as np
import re
import h5py
def grab_step_files(parent_dir, regex='step-([0-9]+)\.(h5|npy)'):
parent_dir = Path(parent_dir)
step_files = []
for f in parent_dir.iterdir():
m = re.search(regex, str(f))
if m is not None:
step_files.append((int(m.group(1)), f))
step_files.sort(key=lambda pr: pr[0])
return step_files
def load_samples(file):
p = Path(file)
if p.suffix == '.npy':
return np.load(p)
else:
assert(p.suffix == '.h5')
h5_handle = h5py.File(file, 'r')
samples = h5_handle['samples'][:]
h5_handle.close()
return samples
| 676 | 23.178571 | 65 |
py
|
MIED
|
MIED-main/mied/utils/h5_helpers.py
|
import torch
import numpy as np
import h5py
from pathlib import Path
def save_dict_h5(save_dict, h5_path, create_dir=False):
def recurse(remain_dict, parent_handle):
for k, v in remain_dict.items():
if isinstance(v, dict):
child_handle = parent_handle.create_group(k)
recurse(v, child_handle)
else:
if torch.is_tensor(v):
arr = v.cpu().detach().numpy()
elif isinstance(v, np.ndarray):
arr = v
else:
# Save as attributes.
parent_handle.attrs[k] = v
continue
parent_handle.create_dataset(k, data=arr)
if create_dir:
Path(h5_path).parent.mkdir(parents=True, exist_ok=True)
root_handle = h5py.File(h5_path, 'w')
recurse(save_dict, root_handle)
| 893 | 33.384615 | 63 |
py
|
MIED
|
MIED-main/mied/utils/ec.py
|
import torch
import argparse
import yaml
import copy
from pathlib import Path
import shutil
from datetime import datetime
from uuid import uuid4
from collections import namedtuple
import wandb
from mied.utils.shortname import \
convert_method_cls_to_str, convert_method_str_to_cls, \
convert_projector_cls_to_str, convert_projector_str_to_cls
class Config:
def __init__(self, param_dict):
self.param_dict = copy.copy(param_dict)
def has_same_params(self, other):
return self.param_dict == other.param_dict
def __getitem__(self, k):
return self.param_dict[k]
def get(self, k, default_v):
return self.param_dict.get(k, default_v)
def __repr__(self):
return str(self.param_dict)
@staticmethod
def from_yaml(yaml_path):
return Config(yaml.safe_load(open(yaml_path, 'r')))
def save_yaml(self, yaml_path):
open(yaml_path, 'w').write(yaml.dump(self.param_dict))
class ConfigBlueprint:
def __init__(self, default_param_dict):
'''
:param default_param_dict: a dict, where the values have to be one of
[string, int, float]
'''
self.default_param_dict = default_param_dict
def prepare_parser(self, parser):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
for k, v in self.default_param_dict.items():
if type(v) == bool:
parser.add_argument('--{}'.format(k), type=str2bool, default=v)
else:
parser.add_argument('--{}'.format(k), type=type(v), default=v)
ECParseResult = namedtuple('ECParseResult',
['tmp_args', 'config', 'exp_dir'])
class ExperimentCoordinator:
def __init__(self, root_dir):
'''
We assume the following hierarchy of directories:
root_dir/exps/exp_name/...
- conf.yml: the configuration corresponding to an instance of
Config class
Then arbitrary files and subfolders can be placed here, e.g.,
- result.h5: result in hdf5 format
- log/: tensorboard log
- ckpt.tar: checkpoint
When possible, we assume each conf.yml corresponds to a
unique exp_name.
:param root_dir: root directory of the experiments
'''
self.root_path = Path(root_dir)
# Temporary blueprints are non-persistent.
self.temporary_blueprints = [ConfigBlueprint({
'device': 'cuda',
'val_freq': 100,
})]
self.common_blueprints = [ConfigBlueprint({
'project': 'uncategorized',
'wandb': True,
'seed': 42,
'optimizer': 'Adam',
'lr': 1e-2,
'beta1': 0.9,
'beta2': 0.999,
# Every method is particle-based.
'num_particle': 50,
'precondition': False,
})]
self.method_blueprint_dict = {}
self.projector_blueprint_dict = {}
def add_temporary_arguments(self, param_dict):
self.temporary_blueprints.append(ConfigBlueprint(param_dict))
def add_common_arguments(self, param_dict):
self.common_blueprints.append(ConfigBlueprint(param_dict))
def add_method_arguments(self, method_cls, param_dict):
self.method_blueprint_dict[method_cls] = ConfigBlueprint(param_dict)
def add_projector_arguments(self, projector_cls, param_dict):
self.projector_blueprint_dict[projector_cls] = ConfigBlueprint(param_dict)
def parse_args(self):
tmp_parser = argparse.ArgumentParser()
'''
* --resume: continue an experiment (the corresponding folder
must have a conf.yml file)
* --exp_name: name of the experiment which is the same as the
folder name containing this experiment's related files. If not
provided, a random unique name will be generated (which can later
be changed).
'''
tmp_parser.add_argument('--resume', type=str)
tmp_parser.add_argument('--override', action='store_true', default=False)
tmp_parser.add_argument('--restart', action='store_true', default=False)
tmp_parser.add_argument('--exp_name', type=str)
for b in self.temporary_blueprints:
b.prepare_parser(tmp_parser)
tmp_args, _ = tmp_parser.parse_known_args()
if tmp_args.resume:
assert(tmp_args.exp_name is None)
exp_dir = self.get_exps_path() / Path(tmp_args.resume)
config = Config.from_yaml(exp_dir / 'conf.yml')
print('Resuming experiment {}...'.format(exp_dir))
else:
common_parser = argparse.ArgumentParser()
common_parser.add_argument('--method', type=str, default='RED')
common_parser.add_argument('--projector', type=str, default='DB')
for b in self.common_blueprints:
b.prepare_parser(common_parser)
common_args, _ = common_parser.parse_known_args()
method_cls = convert_method_str_to_cls(common_args.method)
projector_cls = convert_projector_str_to_cls(common_args.projector)
if method_cls not in self.method_blueprint_dict:
raise Exception('Cannot find blueprint for '
f'method {method_cls}!')
if projector_cls not in self.projector_blueprint_dict:
raise Exception('Cannot find blueprint for '
f'projector {method_cls}!')
method_parser = argparse.ArgumentParser()
self.method_blueprint_dict[method_cls].prepare_parser(
method_parser
)
method_args, _ = method_parser.parse_known_args()
projector_parser = argparse.ArgumentParser()
self.projector_blueprint_dict[projector_cls].prepare_parser(
projector_parser
)
projector_args, _ = projector_parser.parse_known_args()
config_dict = vars(common_args)
config_dict['method_config'] = vars(method_args)
config_dict['projector_config'] = vars(projector_args)
config_dict['wandb_id'] = wandb.util.generate_id()
config = Config(config_dict)
exp_dir = self.make_persistent(config, tmp_args.exp_name,
override=tmp_args.override)
self.parse_result = ECParseResult(
tmp_args=tmp_args,
config=config,
exp_dir=exp_dir
)
return self.parse_result
def create_solver(self, problem):
config = self.parse_result.config
exp_dir = self.parse_result.exp_dir
tmp_args = self.parse_result.tmp_args
wandb.init(
project=config['project'],
mode='online' if config['wandb'] else 'offline',
config={
'exp_dir': exp_dir,
**config.param_dict
},
name=('' if tmp_args.exp_name is None else f'{tmp_args.exp_name}'),
id=config['wandb_id'],
resume='allow'
)
projector_cls = convert_projector_str_to_cls(config['projector'])
projector = projector_cls(**config['projector_config'])
method_cls = convert_method_str_to_cls(config['method'])
solver = method_cls(problem=problem,
projector=projector,
num_particle=config['num_particle'],
precondition=config['precondition'],
val_freq=self.parse_result.tmp_args.val_freq,
ckpt_path=exp_dir / 'ckpt.tar',
logger_fn=lambda d: wandb.log(d),
optimizer_conf={
'cls': config['optimizer'],
'lr': config['lr'],
'beta1': config['beta1'],
'beta2': config['beta2']
},
**config['method_config'])
if not self.parse_result.tmp_args.restart:
solver.load_ckpt()
return solver
def get_exps_path(self):
path = self.root_path / 'exps/'
path.mkdir(exist_ok=True)
return path
def make_persistent(self, config, exp_name, override):
exist = False
# Check if params match any existing conf.yml.
for p in self.get_exps_path().iterdir():
if p.is_dir():
another_exp_name = p.stem
config_path = p / 'conf.yml'
if not config_path.exists():
continue
if exp_name == another_exp_name:
another_config = Config.from_yaml(config_path)
print(f'Found existing experiment {exp_name}!')
diff = False
for k, v in config.param_dict.items():
if k not in another_config.param_dict:
print(f'Existing config missing {k}!')
diff = True
elif another_config[k] != v:
print(f'Existing config has {k}={another_config[k]}'
f' whereas new config has {k}={v}!')
diff = True
for k in another_config.param_dict:
if k not in config.param_dict:
print(f'New config missing {k}!')
diff = True
if not override:
override = input("Override? [Y/N]")
if override == True or override == 'Y':
shutil.rmtree(p)
exist = False
break
if diff:
raise Exception('Found config with same name'
' but different parameters! Abort.')
print('Resuming experiment {} with '.format(p) +
'identical config...')
exist = True
config = another_config
exp_dir = p
if not exist:
# Save config
if exp_name is None:
exp_name = config['wandb_id']
exp_dir = self.get_exps_path() / exp_name
exp_dir.mkdir(parents=True, exist_ok=True)
config.save_yaml(exp_dir / 'conf.yml')
print('Saved a new config to {}.'.format(exp_dir))
return exp_dir
| 11,026 | 35.392739 | 82 |
py
|
MIED
|
MIED-main/mied/utils/adult_loader.py
|
# This file is kindly provided by Xingchao Liu ([email protected]).
import pandas as pd
import numpy as np
from pathlib import Path
def generate_one_hot_mat(mat):
upper_bound = np.max(mat)
mat_one_hot = np.zeros((mat.shape[0], int(upper_bound+1)))
for j in range(mat.shape[0]):
mat_one_hot[j, int(mat[j])] = 1.
return mat_one_hot
def generate_normalize_numerical_mat(mat):
mat = (mat - np.min(mat))/(np.max(mat) - np.min(mat))
#mat = 2 * (mat - 0.5)
return mat
def normalize_data_ours(adult_train, adult_test):
### in this function, we normalize all the data to [0, 1], and bring education_num, capital gain, hours per week to the first three columns, norm to [0, 1]
n_train = adult_train.shape[0]
n_test = adult_test.shape[0]
adult_feature = np.concatenate((adult_train, adult_test), axis=0)
adult_feature_normalized = np.zeros((n_train+n_test, 1))
class_list = [1, 4, 5, 6, 7, 8, 12]
mono_list = [3, 9, 11]
# class_list = [1, 3, 5, 6, 7, 8, 9, 13]
# mono_list = [4, 10, 12]
### store the class variables
start_index = []
cat_length = []
### Normalize Mono Features
for i in range(adult_feature.shape[1]):
if i in mono_list:
if i == mono_list[0]:
mat = adult_feature[:, i]
mat = mat[:, np.newaxis]
adult_feature_normalized = generate_normalize_numerical_mat(mat)
else:
mat = adult_feature[:, i]
mat = generate_normalize_numerical_mat(mat)
mat = mat[:, np.newaxis]
#print(adult_feature_normalized.shape, mat.shape)
adult_feature_normalized = np.concatenate((adult_feature_normalized, mat), axis=1)
else:
continue
### Normalize non-mono features and turn class labels to one-hot vectors
for i in range(adult_feature.shape[1]):
if i in mono_list:
continue
elif i in class_list:
continue
else:
mat = adult_feature[:, i]
mat = generate_normalize_numerical_mat(mat)
mat = mat[:, np.newaxis]
adult_feature_normalized = np.concatenate((adult_feature_normalized, mat), axis=1)
for i in range(adult_feature.shape[1]):
if i in mono_list:
continue
elif i in class_list:
mat = adult_feature[:, i]
mat = generate_one_hot_mat(mat)
start_index.append(adult_feature_normalized.shape[1])
cat_length.append(mat.shape[1])
adult_feature_normalized = np.concatenate((adult_feature_normalized, mat), axis=1)
else:
continue
adult_train = adult_feature_normalized[:n_train, :]
adult_test = adult_feature_normalized[n_train:, :]
assert adult_test.shape[0] == n_test
assert adult_train.shape[0] == n_train
return adult_train, adult_test, start_index, cat_length
def load_data(data_dir, get_categorical_info=False):
data_dir = Path(data_dir)
# Add column names to data set
columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race','sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']
# Read in train data
adult_train = pd.read_csv(data_dir / 'adult.data', header=None, names=columns, skipinitialspace=True)
adult_test = pd.read_csv(data_dir / 'adult.test', header=None, skiprows=1, names=columns, skipinitialspace=True)
adult_train = adult_train.applymap(lambda x: x.strip() if type(x) is str else x)
for col in adult_train:
if adult_train[col].dtype == 'object':
adult_train = adult_train[adult_train[col] != '?']
adult_test = adult_test.applymap(lambda x: x.strip() if type(x) is str else x)
for col in adult_test:
if adult_test[col].dtype == 'object':
adult_test = adult_test[adult_test[col] != '?']
replace_train = [
['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay',
'Never-worked'],
['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th',
'12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool'],
['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent',
'Married-AF-spouse'],
['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty',
'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving',
'Priv-house-serv', 'Protective-serv', 'Armed-Forces'],
['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried'],
['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black'],
['Female', 'Male'],
['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)',
'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland',
'Jamaica', 'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador',
'Taiwan', 'Haiti', 'Columbia', 'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia',
'El-Salvador', 'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands'],
['<=50K', '>50K']
]
for row in replace_train:
adult_train = adult_train.replace(row, range(len(row)))
#print(adult_train)
replace_test = [
['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay',
'Never-worked'],
['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th',
'12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool'],
['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent',
'Married-AF-spouse'],
['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty',
'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving',
'Priv-house-serv', 'Protective-serv', 'Armed-Forces'],
['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried'],
['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black'],
['Female', 'Male'],
['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)',
'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland',
'Jamaica', 'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador',
'Taiwan', 'Haiti', 'Columbia', 'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia',
'El-Salvador', 'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands'],
['<=50K.', '>50K.']
]
for row in replace_test:
adult_test = adult_test.replace(row, range(len(row)))
adult_train = adult_train.drop('education', axis=1)
adult_test = adult_test.drop('education', axis=1)
#print(adult_train, adult_test)
adult_train = adult_train.values
np.random.seed(seed=78712)
np.random.shuffle(adult_train)
X_train = adult_train[:, :13].astype(np.float64)
y_train = adult_train[:, 13].astype(np.uint8)
adult_test = adult_test.values
X_test = adult_test[:, :13].astype(np.float64)
y_test = adult_test[:, 13].astype(np.uint8)
X_train, X_test, start_index, cat_length = normalize_data_ours(X_train, X_test)
if get_categorical_info:
return X_train, y_train, X_test, y_test, start_index, cat_length
else:
return X_train, y_train, X_test, y_test
| 8,040 | 46.023392 | 159 |
py
|
MIED
|
MIED-main/mied/utils/shortname.py
|
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.lmc import LMC
from mied.solvers.dynamic_barrier import DynamicBarrier
from mied.solvers.no_op_projector import NoOpProjector
g_methods = [
(MIED, 'MIED'),
(SVGD, 'SVGD'),
(KSDD, 'KSDD'),
(IPD, 'IPD'),
(LMC, 'LMC'),
]
g_projectors = [
(DynamicBarrier, 'DB'),
(NoOpProjector, 'NOOP'),
]
def convert_method_cls_to_str(method_cls):
for pr in g_methods:
if pr[0] == method_cls:
return pr[1]
raise Exception(f'Unregisted method class {method_cls}!')
def convert_method_str_to_cls(method_str):
for pr in g_methods:
if pr[1] == method_str:
return pr[0]
raise Exception(f'Unregisted method str {method_str}!')
def convert_projector_cls_to_str(projector_cls):
for pr in g_projectors:
if pr[0] == projector_cls:
return pr[1]
raise Exception(f'Unregisted projector class {projector_cls}!')
def convert_projector_str_to_cls(projector_str):
for pr in g_projectors:
if pr[1] == projector_str:
return pr[0]
raise Exception(f'Unregisted projector str {projector_str}!')
| 1,258 | 25.787234 | 67 |
py
|
MIED
|
MIED-main/mied/utils/batch_eval.py
|
import torch
from tqdm import tqdm
def batch_eval_index(f, total_count, batch_size=1024,
result_device=torch.device('cpu'),
detach=True,
no_tqdm=False):
'''
Batch evaluate f.
:param f: function to be evalutated. It should take in (B,) of indices.
:param total_count: total number of indices
:param batch_size: batch size in each invocation of f
:return: a list of results. You might want to call torch.cat afterwards.
'''
result = []
current_count = 0
with tqdm(total=total_count, disable=no_tqdm) as pbar:
while current_count < total_count:
count = min(batch_size, total_count - current_count)
inds = slice(current_count, current_count + count)
cur_result = f(inds)
if detach:
cur_result = cur_result.detach()
result.append(cur_result.to(result_device))
current_count += count
pbar.update(count)
pbar.close()
return result
| 1,050 | 32.903226 | 76 |
py
|
MIED
|
MIED-main/tests/analytical/run.py
|
import torch
import argparse
from pathlib import Path
import math
import wandb
import matplotlib.pyplot as plt
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.problems.analytical_problems import create_problem
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.lmc import LMC
from mied.solvers.dynamic_barrier import DynamicBarrier
from mied.solvers.no_op_projector import NoOpProjector
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 2000,
'traj_freq': 10,
'plot_update': False,
'num_trial': 10,
'gt_multiplier': 10,
})
ec.add_common_arguments({
'prob': 'uniform_box_2d',
'reparam': 'box_tanh',
'filter_range': -1,
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(IPD, {
})
ec.add_method_arguments(SVGD, {
'kernel_h': -1.0,
})
ec.add_method_arguments(LMC, {
'lmc_lr': 1e-3,
'mirror_map': 'box_entropic'
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec.add_projector_arguments(NoOpProjector, {
})
ec_result = ec.parse_args()
tmp_args = ec_result.tmp_args
config = ec_result.config
seed_all(config['seed'])
problem = create_problem(ec_result.tmp_args.device,
config['prob'],
config['reparam'])
solver = ec.create_solver(problem)
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
if tmp_args.traj_freq <= 0:
return
if (i + 1) % (tmp_args.val_freq * tmp_args.traj_freq) == 0:
metrics = ['sinkhorn', 'energy_dist']
result = validator.run(samples=solver.get_samples(),
updates=solver.compute_update(
i, solver.get_samples()),
include_density=False,
metrics=metrics,
num_trial=tmp_args.num_trial,
gt_multipler=tmp_args.gt_multiplier,
filter_range=config['filter_range'],
save_path=(ec_result.exp_dir /
'step-{:05}.h5'.format(i + 1)))
samples = result['samples']
bbox = problem.bbox.cpu().detach()
fig, ax = plt.subplots()
ax.scatter(samples[:, 0], samples[:, 1], s=5, alpha=0.6)
if tmp_args.plot_update:
updates = result['updates']
ax.quiver(samples[:, 0], samples[:, 1],
updates[:, 0], updates[:, 1],
angles='xy', scale_units='xy', scale=1)
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
ax.set_aspect('equal')
log_dict = {
'metrics': {m: result[m] for m in metrics},
'samples': wandb.Image(fig),
}
if tmp_args.num_trial > 1:
log_dict['metrics_std'] = {m: result[m + '_std']
for m in metrics}
plt.close(fig)
gt_samples = problem.sample_gt(
samples.shape[0], refresh=True).cpu().detach()
if gt_samples is not None:
fig, ax = plt.subplots()
ax.scatter(gt_samples[:, 0], gt_samples[:, 1], s=5)
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
ax.set_aspect('equal')
log_dict['gt_samples'] = wandb.Image(fig)
plt.close(fig)
wandb.log(log_dict, commit=False)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn)
validator.run(samples=solver.get_samples(),
include_gt=True,
include_density=problem.in_dim == 2,
density_bbox=problem.bbox,
save_path=ec_result.exp_dir / 'result.h5')
| 4,651 | 33.977444 | 77 |
py
|
MIED
|
MIED-main/tests/fairness_bnn/run.py
|
import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.fairness_bnn import FairnessBNN
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.dynamic_barrier import DynamicBarrier
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 1000,
'traj_freq': 10,
})
ec.add_common_arguments({
'thres': 0.01,
'ineq_scale': 1.0,
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(SVGD, {
})
ec.add_method_arguments(IPD, {
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec_result = ec.parse_args()
tmp_args, config = ec_result.tmp_args, ec_result.config
seed_all(config['seed'])
problem = FairnessBNN(device=tmp_args.device,
data_dir='data/',
thres=config['thres'],
ineq_scale=config['ineq_scale'])
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
pass
solver = ec.create_solver(problem)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn)
print('Validating ...')
validator.run(samples=solver.get_samples(),
include_density=False,
save_path=ec_result.exp_dir / 'result.h5')
| 2,025 | 27.138889 | 60 |
py
|
MIED
|
MIED-main/tests/logistics/run.py
|
import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.logistics import BayesianLogistics
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.dynamic_barrier import DynamicBarrier
g_data_names = ['banana', 'breast_cancer', 'diabetis', 'flare_solar',
'german', 'heart', 'image', 'ringnorm', 'splice',
'thyroid', 'titanic', 'twonorm', 'waveform', 'covtype']
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 1000,
'traj_freq': 10,
'mcmc_only': False,
})
ec.add_common_arguments({
'data_name': 'banana',
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(SVGD, {
'kernel_h': -1.0,
})
ec.add_method_arguments(IPD, {
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec_result = ec.parse_args()
tmp_args, config = ec_result.tmp_args, ec_result.config
seed_all(config['seed'])
data_name = config['data_name']
if data_name in g_data_names:
if data_name != 'covtype':
data_path = 'data/benchmarks.mat'
else:
data_path = 'data/covertype.mat'
else:
raise Exception(f'Unknown dataset name: {data_name}!')
problem = BayesianLogistics(
device=tmp_args.device,
data_path=data_path,
data_name=data_name)
# Generate ground truth using mcmc.
(root_dir / 'mcmc').mkdir(parents=True, exist_ok=True)
mcmc_file = root_dir / 'mcmc' / '{}.h5'.format(data_name)
mcmc_log_file = root_dir / 'mcmc' / '{}.log'.format(data_name)
if data_name != 'covtype':
# MCMC for covtype is just too slow.
if not mcmc_file.exists():
samples = problem.mcmc(num_warmup=10000,
num_sample=10000,
log_file=mcmc_log_file)
save_dict_h5({'samples': samples},
mcmc_file, create_dir=True)
h5_handle = h5py.File(mcmc_file, 'r')
mcmc_samples = torch.from_numpy(h5_handle['samples'][:]).to(problem.device)
h5_handle.close()
if not tmp_args.mcmc_only:
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
if tmp_args.traj_freq <= 0:
return
if (i + 1) % (tmp_args.val_freq * tmp_args.traj_freq) == 0:
metrics = ['sinkhorn', 'energy_dist']
samples = solver.get_samples()
result = validator.run(samples=samples,
metrics=metrics,
gt_samples=mcmc_samples,
strip_last_n=1,
save_path=(ec_result.exp_dir /
'step-{:05}.h5'.format(i + 1)))
log_dict = {
'metrics': {m: result[m] for m in metrics},
}
wandb.log(log_dict, commit=False)
solver = ec.create_solver(problem)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn if data_name != 'covtype' else None)
print('Validating ...')
validator.run(samples=solver.get_samples(),
include_density=False,
save_path=ec_result.exp_dir / 'result.h5')
| 4,112 | 32.991736 | 83 |
py
|
blend
|
blend-master/tools/libsvm-3.22/tools/easy.py
|
#!/usr/bin/env python
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
# svm, grid, and gnuplot executable files
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
| 2,699 | 32.75 | 104 |
py
|
blend
|
blend-master/tools/libsvm-3.22/tools/checkdata.py
|
#!/usr/bin/env python
#
# A format checker for LIBSVM
#
#
# Copyright (c) 2007, Rong-En Fan
#
# All rights reserved.
#
# This program is distributed under the same license of the LIBSVM package.
#
from sys import argv, exit
import os.path
def err(line_no, msg):
print("line {0}: {1}".format(line_no, msg))
# works like float() but does not accept nan and inf
def my_float(x):
if x.lower().find("nan") != -1 or x.lower().find("inf") != -1:
raise ValueError
return float(x)
def main():
if len(argv) != 2:
print("Usage: {0} dataset".format(argv[0]))
exit(1)
dataset = argv[1]
if not os.path.exists(dataset):
print("dataset {0} not found".format(dataset))
exit(1)
line_no = 1
error_line_count = 0
for line in open(dataset, 'r'):
line_error = False
# each line must end with a newline character
if line[-1] != '\n':
err(line_no, "missing a newline character in the end")
line_error = True
nodes = line.split()
# check label
try:
label = nodes.pop(0)
if label.find(',') != -1:
# multi-label format
try:
for l in label.split(','):
l = my_float(l)
except:
err(line_no, "label {0} is not a valid multi-label form".format(label))
line_error = True
else:
try:
label = my_float(label)
except:
err(line_no, "label {0} is not a number".format(label))
line_error = True
except:
err(line_no, "missing label, perhaps an empty line?")
line_error = True
# check features
prev_index = -1
for i in range(len(nodes)):
try:
(index, value) = nodes[i].split(':')
index = int(index)
value = my_float(value)
# precomputed kernel's index starts from 0 and LIBSVM
# checks it. Hence, don't treat index 0 as an error.
if index < 0:
err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i]))
line_error = True
elif index <= prev_index:
err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i]))
line_error = True
prev_index = index
except:
err(line_no, "feature '{0}' not an <index>:<value> pair, <index> integer, <value> real number ".format(nodes[i]))
line_error = True
line_no += 1
if line_error:
error_line_count += 1
if error_line_count > 0:
print("Found {0} lines with error.".format(error_line_count))
return 1
else:
print("No error.")
return 0
if __name__ == "__main__":
exit(main())
| 2,479 | 21.752294 | 130 |
py
|
blend
|
blend-master/tools/libsvm-3.22/tools/grid.py
|
#!/usr/bin/env python
__all__ = ['find_parameters']
import os, sys, traceback, getpass, time, re
from threading import Thread
from subprocess import *
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
telnet_workers = []
ssh_workers = []
nr_local_worker = 1
class GridOption:
def __init__(self, dataset_pathname, options):
dirname = os.path.dirname(__file__)
if sys.platform != 'win32':
self.svmtrain_pathname = os.path.join(dirname, '../svm-train')
self.gnuplot_pathname = '/usr/bin/gnuplot'
else:
# example for windows
self.svmtrain_pathname = os.path.join(dirname, r'..\windows\svm-train.exe')
# svmtrain_pathname = r'c:\Program Files\libsvm\windows\svm-train.exe'
self.gnuplot_pathname = r'c:\tmp\gnuplot\binary\pgnuplot.exe'
self.fold = 5
self.c_begin, self.c_end, self.c_step = -5, 15, 2
self.g_begin, self.g_end, self.g_step = 3, -15, -2
self.grid_with_c, self.grid_with_g = True, True
self.dataset_pathname = dataset_pathname
self.dataset_title = os.path.split(dataset_pathname)[1]
self.out_pathname = '{0}.out'.format(self.dataset_title)
self.png_pathname = '{0}.png'.format(self.dataset_title)
self.pass_through_string = ' '
self.resume_pathname = None
self.parse_options(options)
def parse_options(self, options):
if type(options) == str:
options = options.split()
i = 0
pass_through_options = []
while i < len(options):
if options[i] == '-log2c':
i = i + 1
if options[i] == 'null':
self.grid_with_c = False
else:
self.c_begin, self.c_end, self.c_step = map(float,options[i].split(','))
elif options[i] == '-log2g':
i = i + 1
if options[i] == 'null':
self.grid_with_g = False
else:
self.g_begin, self.g_end, self.g_step = map(float,options[i].split(','))
elif options[i] == '-v':
i = i + 1
self.fold = options[i]
elif options[i] in ('-c','-g'):
raise ValueError('Use -log2c and -log2g.')
elif options[i] == '-svmtrain':
i = i + 1
self.svmtrain_pathname = options[i]
elif options[i] == '-gnuplot':
i = i + 1
if options[i] == 'null':
self.gnuplot_pathname = None
else:
self.gnuplot_pathname = options[i]
elif options[i] == '-out':
i = i + 1
if options[i] == 'null':
self.out_pathname = None
else:
self.out_pathname = options[i]
elif options[i] == '-png':
i = i + 1
self.png_pathname = options[i]
elif options[i] == '-resume':
if i == (len(options)-1) or options[i+1].startswith('-'):
self.resume_pathname = self.dataset_title + '.out'
else:
i = i + 1
self.resume_pathname = options[i]
else:
pass_through_options.append(options[i])
i = i + 1
self.pass_through_string = ' '.join(pass_through_options)
if not os.path.exists(self.svmtrain_pathname):
raise IOError('svm-train executable not found')
if not os.path.exists(self.dataset_pathname):
raise IOError('dataset not found')
if self.resume_pathname and not os.path.exists(self.resume_pathname):
raise IOError('file for resumption not found')
if not self.grid_with_c and not self.grid_with_g:
raise ValueError('-log2c and -log2g should not be null simultaneously')
if self.gnuplot_pathname and not os.path.exists(self.gnuplot_pathname):
sys.stderr.write('gnuplot executable not found\n')
self.gnuplot_pathname = None
def redraw(db,best_param,gnuplot,options,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5
best_log2c,best_log2g,best_rate = best_param
# if newly obtained c, g, or cv values are the same,
# then stop redrawing the contour.
if all(x[0] == db[0][0] for x in db): return
if all(x[1] == db[0][1] for x in db): return
if all(x[2] == db[0][2] for x in db): return
if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(options.png_pathname.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(options.dataset_title).encode().encode())
elif sys.platform == 'win32':
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(options.c_begin,options.c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(options.g_begin,options.g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surface\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n".format(options.dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))
prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()
def calculate_jobs(options):
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret
c_seq = permute_sequence(range_f(options.c_begin,options.c_end,options.c_step))
g_seq = permute_sequence(range_f(options.g_begin,options.g_end,options.g_step))
if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i, j = 0, 0
jobs = []
while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)
resumed_jobs = {}
if options.resume_pathname is None:
return jobs, resumed_jobs
for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall(r'rate=([0-9.]+)',line)
if not rst:
continue
rate = float(rst[0])
c, g = None, None
rst = re.findall(r'log2c=([0-9.-]+)',line)
if rst:
c = float(rst[0])
rst = re.findall(r'log2g=([0-9.-]+)',line)
if rst:
g = float(rst[0])
resumed_jobs[(c,g)] = rate
return jobs, resumed_jobs
class WorkerStopToken: # used to notify the worker to stop or if a worker is dead
pass
class Worker(Thread):
def __init__(self,name,job_queue,result_queue,options):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
self.options = options
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
c, g = None, None
if cexp != None:
c = 2.0**cexp
if gexp != None:
g = 2.0**gexp
rate = self.run_one(c,g)
if rate is None: raise RuntimeError('get no rate')
except:
# we failed, let others do that and we just quit
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put((cexp,gexp))
sys.stderr.write('worker {0} quit.\n'.format(self.name))
break
else:
self.result_queue.put((self.name,cexp,gexp,rate))
def get_cmd(self,c,g):
options=self.options
cmdline = '"' + options.svmtrain_pathname + '"'
if options.grid_with_c:
cmdline += ' -c {0} '.format(c)
if options.grid_with_g:
cmdline += ' -g {0} '.format(g)
cmdline += ' -v {0} {1} {2} '.format\
(options.fold,options.pass_through_string,options.dataset_pathname)
return cmdline
class LocalWorker(Worker):
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
class SSHWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.cwd = os.getcwd()
def run_one(self,c,g):
cmdline = 'ssh -x -t -t {0} "cd {1}; {2}"'.format\
(self.host,self.cwd,self.get_cmd(c,g))
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
class TelnetWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,username,password,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.username = username
self.password = password
def run(self):
import telnetlib
self.tn = tn = telnetlib.Telnet(self.host)
tn.read_until('login: ')
tn.write(self.username + '\n')
tn.read_until('Password: ')
tn.write(self.password + '\n')
# XXX: how to know whether login is successful?
tn.read_until(self.username)
#
print('login ok', self.host)
tn.write('cd '+os.getcwd()+'\n')
Worker.run(self)
tn.write('exit\n')
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = self.tn.write(cmdline+'\n')
(idx,matchm,output) = self.tn.expect(['Cross.*\n'])
for line in output.split('\n'):
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
def find_parameters(dataset_pathname, options=''):
def update_param(c,g,rate,best_c,best_g,best_rate,worker,resumed):
if (rate > best_rate) or (rate==best_rate and g==best_g and c<best_c):
best_rate,best_c,best_g = rate,c,g
stdout_str = '[{0}] {1} {2} (best '.format\
(worker,' '.join(str(x) for x in [c,g] if x is not None),rate)
output_str = ''
if c != None:
stdout_str += 'c={0}, '.format(2.0**best_c)
output_str += 'log2c={0} '.format(c)
if g != None:
stdout_str += 'g={0}, '.format(2.0**best_g)
output_str += 'log2g={0} '.format(g)
stdout_str += 'rate={0})'.format(best_rate)
print(stdout_str)
if options.out_pathname and not resumed:
output_str += 'rate={0}\n'.format(rate)
result_file.write(output_str)
result_file.flush()
return best_c,best_g,best_rate
options = GridOption(dataset_pathname, options);
if options.gnuplot_pathname:
gnuplot = Popen(options.gnuplot_pathname,stdin = PIPE,stdout=PIPE,stderr=PIPE).stdin
else:
gnuplot = None
# put jobs in queue
jobs,resumed_jobs = calculate_jobs(options)
job_queue = Queue(0)
result_queue = Queue(0)
for (c,g) in resumed_jobs:
result_queue.put(('resumed',c,g,resumed_jobs[(c,g)]))
for line in jobs:
for (c,g) in line:
if (c,g) not in resumed_jobs:
job_queue.put((c,g))
# hack the queue to become a stack --
# this is important when some thread
# failed and re-put a job. It we still
# use FIFO, the job will be put
# into the end of the queue, and the graph
# will only be updated in the end
job_queue._put = job_queue.queue.appendleft
# fire telnet workers
if telnet_workers:
nr_telnet_worker = len(telnet_workers)
username = getpass.getuser()
password = getpass.getpass()
for host in telnet_workers:
worker = TelnetWorker(host,job_queue,result_queue,
host,username,password,options)
worker.start()
# fire ssh workers
if ssh_workers:
for host in ssh_workers:
worker = SSHWorker(host,job_queue,result_queue,host,options)
worker.start()
# fire local workers
for i in range(nr_local_worker):
worker = LocalWorker('local',job_queue,result_queue,options)
worker.start()
# gather results
done_jobs = {}
if options.out_pathname:
if options.resume_pathname:
result_file = open(options.out_pathname, 'a')
else:
result_file = open(options.out_pathname, 'w')
db = []
best_rate = -1
best_c,best_g = None,None
for (c,g) in resumed_jobs:
rate = resumed_jobs[(c,g)]
best_c,best_g,best_rate = update_param(c,g,rate,best_c,best_g,best_rate,'resumed',True)
for line in jobs:
for (c,g) in line:
while (c,g) not in done_jobs:
(worker,c1,g1,rate1) = result_queue.get()
done_jobs[(c1,g1)] = rate1
if (c1,g1) not in resumed_jobs:
best_c,best_g,best_rate = update_param(c1,g1,rate1,best_c,best_g,best_rate,worker,False)
db.append((c,g,done_jobs[(c,g)]))
if gnuplot and options.grid_with_c and options.grid_with_g:
redraw(db,[best_c, best_g, best_rate],gnuplot,options)
redraw(db,[best_c, best_g, best_rate],gnuplot,options,True)
if options.out_pathname:
result_file.close()
job_queue.put((WorkerStopToken,None))
best_param, best_cg = {}, []
if best_c != None:
best_param['c'] = 2.0**best_c
best_cg += [2.0**best_c]
if best_g != None:
best_param['g'] = 2.0**best_g
best_cg += [2.0**best_g]
print('{0} {1}'.format(' '.join(map(str,best_cg)), best_rate))
return best_rate, best_param
if __name__ == '__main__':
def exit_with_help():
print("""\
Usage: grid.py [grid_options] [svm_options] dataset
grid_options :
-log2c {begin,end,step | "null"} : set the range of c (default -5,15,2)
begin,end,step -- c_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with c
-log2g {begin,end,step | "null"} : set the range of g (default 3,-15,-2)
begin,end,step -- g_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with g
-v n : n-fold cross validation (default 5)
-svmtrain pathname : set svm executable path and name
-gnuplot {pathname | "null"} :
pathname -- set gnuplot executable path and name
"null" -- do not plot
-out {pathname | "null"} : (default dataset.out)
pathname -- set output file path and name
"null" -- do not output file
-png pathname : set graphic output file path and name (default dataset.png)
-resume [pathname] : resume the grid task using an existing output file (default pathname is dataset.out)
This is experimental. Try this option only if some parameters have been checked for the SAME data.
svm_options : additional options for svm-train""")
sys.exit(1)
if len(sys.argv) < 2:
exit_with_help()
dataset_pathname = sys.argv[-1]
options = sys.argv[1:-1]
try:
find_parameters(dataset_pathname, options)
except (IOError,ValueError) as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.write('Try "grid.py" for more information.\n')
sys.exit(1)
| 15,316 | 29.572854 | 105 |
py
|
blend
|
blend-master/tools/libsvm-3.22/tools/subset.py
|
#!/usr/bin/env python
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]
This script randomly selects a subset of the dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
subset_size = int(argv[i+1])
if i+2 < argc:
subset_file = open(argv[i+2],'w')
if i+3 < argc:
rest_file = open(argv[i+3],'w')
return dataset, subset_size, method, subset_file, rest_file
def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset,'r'))
return sorted(random.sample(xrange(l), subset_size))
def stratified_selection(dataset, subset_size):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)
def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
#uncomment the following line to fix the random seed
#random.seed(0)
selected_lines = []
if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)
#select instances based on selected_lines
dataset = open(dataset,'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv)
| 3,202 | 25.471074 | 84 |
py
|
blend
|
blend-master/tools/libsvm-3.22/python/svm.py
|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
if sys.version_info[0] >= 3:
xrange = range
__all__ = ['libsvm', 'svm_problem', 'svm_parameter',
'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
'SIGMOID', 'c_double', 'svm_model']
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
libsvm = CDLL(path.join(dirname, r'..\windows\libsvm.dll'))
else:
libsvm = CDLL(path.join(dirname, '../libsvm.so.2'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
raise Exception('LIBSVM library not found.')
C_SVC = 0
NU_SVC = 1
ONE_CLASS = 2
EPSILON_SVR = 3
NU_SVR = 4
LINEAR = 0
POLY = 1
RBF = 2
SIGMOID = 3
PRECOMPUTED = 4
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
if not isKernel:
xi = [0] + xi # idx should start from 1
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if not isKernel:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, isKernel=None):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi,isKernel=isKernel)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = svm_parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'sv_indices', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def get_sv_indices(self):
total_sv = self.get_nr_sv()
sv_indices = (c_int * total_sv)()
libsvm.svm_get_sv_indices(self, sv_indices)
return sv_indices[:total_sv]
def get_nr_sv(self):
return libsvm.svm_get_nr_sv(self)
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]
def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()
i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1
result.append(row)
return result
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_sv_indices, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_nr_sv, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])
| 9,603 | 28.015106 | 122 |
py
|
blend
|
blend-master/tools/libsvm-3.22/python/svmutil.py
|
#!/usr/bin/env python
import os
import sys
from svm import *
from svm import __all__ as svm_all
__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name.encode(), model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, options]) -> model | ACC | MSE
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
param = svm_parameter(options)
prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values
| 8,695 | 32.064639 | 113 |
py
|
blend
|
blend-master/tools/beer_2.0/lib/platt.py
|
#!/usr/bin/env python
from sys import argv
#from svm import *
from math import log, exp
from string import atof
from random import randrange
#--[Basic Function]---------------------------------------------------------------------
#input decision_values, real_labels{1,-1}, #positive_instances, #negative_instances
#output [A,B] that minimize sigmoid likilihood
#refer to Platt's Probablistic Output for Support Vector Machines
def SigmoidTrain(deci, label, prior1=None, prior0=None):
#Count prior0 and prior1 if needed
if prior1==None or prior0==None:
prior1, prior0 = 0, 0
for i in range(len(label)):
if label[i] > 0:
prior1+=1
else:
prior0+=1
#Parameter Setting
maxiter=100 #Maximum number of iterations
minstep=1e-10 #Minimum step taken in line search
sigma=1e-12 #For numerically strict PD of Hessian
eps=1e-5
#Construct Target Support
hiTarget=(prior1+1.0)/(prior1+2.0)
loTarget=1/(prior0+2.0)
length=prior1+prior0
t=[]
for i in range(length):
if label[i] > 0:
t.append(hiTarget)
else:
t.append(loTarget)
#Initial Point and Initial Fun Value
A,B=0.0, log((prior0+1.0)/(prior1+1.0))
fval = 0.0
for i in range(length):
fApB = deci[i]*A+B
if fApB >= 0:
fval += t[i]*fApB + log(1+exp(-fApB))
else:
fval += (t[i] - 1)*fApB +log(1+exp(fApB))
for it in range(maxiter):
#Update Gradient and Hessian (use H' = H + sigma I)
h11=h22=sigma #Numerically ensures strict PD
h21=g1=g2=0.0
for i in range(length):
fApB = deci[i]*A+B
if (fApB >= 0):
p=exp(-fApB)/(1.0+exp(-fApB))
q=1.0/(1.0+exp(-fApB))
else:
p=1.0/(1.0+exp(fApB))
q=exp(fApB)/(1.0+exp(fApB))
d2=p*q
h11+=deci[i]*deci[i]*d2
h22+=d2
h21+=deci[i]*d2
d1=t[i]-p
g1+=deci[i]*d1
g2+=d1
#Stopping Criteria
if abs(g1)<eps and abs(g2)<eps:
break
#Finding Newton direction: -inv(H') * g
det=h11*h22-h21*h21
dA=-(h22*g1 - h21 * g2) / det
dB=-(-h21*g1+ h11 * g2) / det
gd=g1*dA+g2*dB
#Line Search
stepsize = 1
while stepsize >= minstep:
newA = A + stepsize * dA
newB = B + stepsize * dB
#New function value
newf = 0.0
for i in range(length):
fApB = deci[i]*newA+newB
if fApB >= 0:
newf += t[i]*fApB + log(1+exp(-fApB))
else:
newf += (t[i] - 1)*fApB +log(1+exp(fApB))
#Check sufficient decrease
if newf < fval + 0.0001 * stepsize * gd:
A, B, fval = newA, newB, newf
break
else:
stepsize = stepsize / 2.0
if stepsize < minstep:
print "line search fails",A,B,g1,g2,dA,dB,gd
return [A,B]
if it>=maxiter-1:
print "reaching maximal iterations",g1,g2
return [A,B]
#reads decision_value and Platt parameter [A,B]
#outputs predicted probability
def SigmoidPredict(deci, AB):
A, B = AB
fApB = deci * A + B
if (fApB >= 0):
return exp(-fApB)/(1.0+exp(-fApB))
else:
return 1.0/(1+exp(fApB))
return prob
deci = []
label = []
with open(argv[1]) as f:
for line in f:
fields = line.split()
deci.append(float(fields[0]))
label.append(int(fields[1]))
[A,B] = SigmoidTrain(deci, label)
print(A, B)
| 3,067 | 22.242424 | 88 |
py
|
blend
|
blend-master/tools/characTER-master/CharacTER.py
|
#! /usr/bin/env python2
# -*- coding:utf-8 -*-
"""
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division, print_function
import re
import codecs
import sys
import itertools
import math
# Character error rate calculator, both hyp and ref are word lists
def cer(hyp, ref):
hyp_words, ref_words = list(hyp), list(ref)
ed_calc = CachedEditDistance(ref_words)
hyp_backup = hyp_words
"""
Shifting phrases of the hypothesis sentence until the edit distance from
the reference sentence is minimized
"""
while True:
(diff, hyp_words) = shifter(hyp_words, ref_words, ed_calc)
if diff <= 0:
break
shift_cost = _shift_cost(hyp_words, hyp_backup)
shifted_chars = list(" ".join(hyp_words))
ref_chars = list(" ".join(ref_words))
edit_cost = edit_distance(shifted_chars, ref_chars) + shift_cost
return edit_cost / len(shifted_chars)
"""
Some phrases in hypothesis sentences will be shifted, in order to minimize
edit distances from reference sentences. As input the hypothesis and reference
word lists as well as the cached edit distance calculator are required. It will
return the difference of edit distances between before and after shifting, and
the shifted version of the hypothesis sentence.
"""
def shifter(hyp_words, ref_words, ed_calc):
pre_score = ed_calc(hyp_words)
scores = []
# Changing the phrase order of the hypothesis sentence
for hyp_start, ref_start, length in couple_discoverer(hyp_words, ref_words):
shifted_words = hyp_words[:hyp_start] + hyp_words[hyp_start+length:]
shifted_words[ref_start:ref_start] = hyp_words[hyp_start:hyp_start+length]
scores.append((pre_score - ed_calc(shifted_words), shifted_words))
# The case that the phrase order has not to be changed
if not scores:
return (0, hyp_words)
scores.sort()
return scores[-1]
"""
This function will find out the identical phrases in sentence_1 and sentence_2,
and yield the corresponding begin positions in both sentences as well as the
maximal phrase length. Both sentences are represented as word lists.
"""
def couple_discoverer(sentence_1, sentence_2):
# Applying the cartesian product to traversing both sentences
for start_1, start_2 in \
itertools.product(range(len(sentence_1)), range(len(sentence_2))):
# No need to shift if the positions are the same
if start_1 == start_2:
continue
# If identical words are found in different positions of two sentences
if sentence_1[start_1] == sentence_2[start_2]:
length = 1
# Go further to next positions of sentence_1 to learn longer phrase
for step in range(1, len(sentence_1) - start_1):
end_1, end_2 = start_1 + step, start_2 + step
# If the new detected phrase is also contained in sentence_2
if end_2 < len(sentence_2) and sentence_1[end_1] == sentence_2[end_2]:
length += 1
else:
break
yield (start_1, start_2, length)
# Identical to Levenshtein distance
def edit_distance(sentence_1, sentence_2):
# Keep sentence_2 as the shorter sentence
if len(sentence_1) < len(sentence_2):
return edit_distance(sentence_2, sentence_1)
"""
If one sentence does not contain any words, the edit distance should be the
length of the other sentence
"""
if len(sentence_2) == 0:
return len(sentence_1)
previous_row = range(len(sentence_2) + 1)
# Go through the first sentence
for i, character_1 in enumerate(sentence_1):
current_row = [i+1]
# Go through the second sentence and check the Levenshtein distance
for j, character_2 in enumerate(sentence_2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (character_1 != character_2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
"""
Shift cost: the average word length of the shifted phrase
shifted_words: list of words in the shifted hypothesis sequence
original_words: list of words in the original hypothesis sequence
"""
def _shift_cost(shifted_words, original_words):
shift_cost = 0
original_start = 0
# Go through all words in the shifted hypothesis sequence
while original_start < len(shifted_words):
avg_shifted_charaters = 0
original_index = original_start
# Go through words with larger index in original hypothesis sequence
for shift_start in range(original_start+1, len(shifted_words)):
# Check whether there is word matching
if original_words[original_start] == shifted_words[shift_start]:
length = 1
"""
Go on checking the following word pairs to find the longest
matched phrase pairs
"""
for pos in range(1, len(original_words) - original_index):
original_end, shift_end = \
original_index + pos, shift_start + pos
# Check the next word pair
if shift_end < len(shifted_words) and \
original_words[original_end] == \
shifted_words[shift_end]:
length += 1
# Skip the already matched word pairs in the next loop
if original_start+1 < len(original_words):
original_start += 1
else:
break
shifted_charaters = 0
# Sum over the lengths of the shifted words
for index in range(length):
shifted_charaters += \
len(original_words[original_index+index])
avg_shifted_charaters = float(shifted_charaters) / length
break
shift_cost += avg_shifted_charaters
original_start += 1
return shift_cost
"""
Function to calculate the number of edits (The same as TER):
1. Dynamic programming for calcualting edit distance
2. Greedy search to find the shift which most reduces minimum edit distance
Python code copyright (c) 2011 Hiroyuki Tanaka
"""
class CachedEditDistance(object):
def __init__(self, rwords):
self.rwds = rwords
self._cache = {}
self.list_for_copy = [0 for _ in range(len(self.rwds) + 1)]
def __call__(self, iwords):
start_position, cached_score = self._find_cache(iwords)
score, newly_created_matrix = \
self._edit_distance(iwords, start_position, cached_score)
self._add_cache(iwords, newly_created_matrix)
return score
def _edit_distance(self, iwords, spos, cache):
if cache is None:
cache = [tuple(range(len(self.rwds) + 1))]
else:
cache = [cache]
l = cache + [list(self.list_for_copy)
for _ in range(len(iwords) - spos)]
assert len(l) - 1 == len(iwords) - spos
for i, j in itertools.product(range(1, len(iwords) - spos + 1),
range(len(self.rwds) + 1)):
if j == 0:
l[i][j] = l[i - 1][j] + 1
else:
l[i][j] = min(l[i - 1][j] + 1,
l[i][j - 1] + 1,
l[i - 1][j - 1] + (0 if iwords[spos + i - 1] ==
self.rwds[j - 1] else 1))
return l[-1][-1], l[1:]
def _add_cache(self, iwords, mat):
node = self._cache
skipnum = len(iwords) - len(mat)
for i in range(skipnum):
node = node[iwords[i]][0]
assert len(iwords[skipnum:]) == len(mat)
for word, row in itertools.izip(iwords[skipnum:], mat):
if word not in node:
node[word] = [{}, None]
value = node[word]
if value[1] is None:
value[1] = tuple(row)
node = value[0]
def _find_cache(self, iwords):
node = self._cache
start_position, row = 0, None
for idx, word in enumerate(iwords):
if word in node:
start_position = idx + 1
node, row = node[word]
else:
break
return start_position, row
# Parsing arguments
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description='CharacTER: Character Level Translation Edit Rate',
epilog="Please apply 'PYTHONIOENCODING' in environment variables, "
"if UnicodeEncodeError occurs."
)
parser.add_argument('-r', '--ref', help='Reference file', required=True)
parser.add_argument('-o', '--hyp', help='Hypothesis file', required=True)
parser.add_argument('-v', '--verbose', help='Print score of each sentence',
action='store_true', default=False)
return parser.parse_args()
def main():
args = parse_args()
hyp_lines = [x for x in codecs.open(args.hyp, 'r', 'utf-8').readlines()]
ref_lines = [x for x in codecs.open(args.ref, 'r', 'utf-8').readlines()]
"""
Check whether the hypothesis and reference files have the same number of
sentences
"""
if len(hyp_lines) != len(ref_lines):
print("Error! {0} lines in the hypothesis file, but {1} lines in the"
" reference file.".format(len(hyp_lines), len(ref_lines)))
sys.exit(1)
scores = []
# Split the hypothesis and reference sentences into word lists
for index, (hyp, ref) in \
enumerate(itertools.izip(hyp_lines, ref_lines), start=1):
ref, hyp = ref.split(), hyp.split()
score = cer(hyp, ref)
scores.append(score)
# Print out scores of every sentence
if args.verbose:
print("{0:.4f}".format(score))
#print("CharacTER of sentence {0} is {1:.4f}".format(index, score))
average = sum(scores) / len(scores)
variance = sum((s - average) ** 2 for s in scores) / len(scores)
standard_deviation = math.sqrt(variance)
#print(average)
if __name__ == '__main__':
main()
| 11,034 | 32.038922 | 86 |
py
|
blend
|
blend-master/tools/dpmf/tools/stanford-postagger-2013-04-04/addStartTag.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
while line:
print '@START',
print line,
line = f.readline()
f.close()
| 165 | 14.090909 | 27 |
py
|
blend
|
blend-master/tools/dpmf/tools/stanford-postagger-2013-04-04/rmPOS.py
|
#!/usr/bin/python
import sys
import string
info = '''
'''
def filter(input, output):
fin = open(input, 'rU')
fout = open(output, 'w')
count = 0
for line in fin:
newline = ''
list = line.split(' ')
for e in range(0,len(list)):
p = list[e].rfind('/')
word = list[e][0:p]
pos = list[e][p+1:len(list[e])]
newline = newline+word +' '
newline = newline.strip()
fout.write(newline+'\n')
count += 1
if count % 10000 == 0:
print count
if __name__ == '__main__':
print info + '\n'
if len(sys.argv) < 3:
print './to_lower.py input_file output_file'
sys.exit(-1)
filter(sys.argv[1], sys.argv[2])
| 754 | 18.358974 | 52 |
py
|
blend
|
blend-master/tools/dpmf/tools/stanford-postagger-2013-04-04/reback.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
lineAll = ''
while line:
lineAll = lineAll + line.strip('\n') +" "
line = f.readline()
f.close()
strlist = lineAll.split('@START')
for v in strlist:
if(len(v)>1):
v = v.strip()
pos = v.find(' ',0)
newline = v[pos+1:len(v)]
wordlist = newline.split(' ')
for word in wordlist:
wpos = word.rfind('_')
print word[0:wpos]+'/'+word[wpos+1:len(word)],
print
| 517 | 23.666667 | 58 |
py
|
blend
|
blend-master/tools/dpmf/scripts/meteorForm-recall.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
#print sys.argv[1]
line = f.readline()
while line:
if(line.find("Segment ") > -1):
strlist = line.split('\t')
pre = strlist[2]
print pre
if(line.find("Recall:")>-1):
pre = line[24:len(line)]
#print "sys level meteor ",pre,
line = f.readline()
f.close()
| 367 | 20.647059 | 39 |
py
|
blend
|
blend-master/tools/dpmf/scripts/addStartTag.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
while line:
print '@START',
print line,
line = f.readline()
f.close()
| 165 | 14.090909 | 27 |
py
|
blend
|
blend-master/tools/dpmf/scripts/2CorreForm.py
|
#!/usr/local/bin/python2.7
import sys
import math
fin = open(sys.argv[1]);
fseg = open(sys.argv[2],'w');
fsys = open(sys.argv[3],'w');
line = fin.readline()
while line:
line=line.strip()
p1=line.find('.')
p2=line.rfind('.')
year=line[0:p1]
sysname=line[p1+1:p2]
lan=line[p2+1:len(line)]
fintmp = open(line);
segCnt=0
sysScore=0
linetmp = fintmp.readline()
while linetmp:
segCnt = segCnt+1
s1=linetmp.find('>')
seg = linetmp[6:s1-1]
score=(10-float(linetmp[s1+1:len(linetmp)-5]))
sysScore = sysScore+float(score)
strs="comb\t" +lan+"\t"+year+"\t"+sysname+"\t"+seg+"\t"+str(score)
fseg.write(strs+"\n")
linetmp = fintmp.readline()
fsys.write("comb\t"+lan+"\t"+year+"\t"+sysname+"\t"+str(sysScore/segCnt)+"\n")
print segCnt
line = fin.readline()
fin.close()
| 877 | 24.823529 | 82 |
py
|
blend
|
blend-master/tools/dpmf/scripts/rmPOS.py
|
#!/usr/bin/python
import sys
import string
info = '''
'''
def filter(input, output):
fin = open(input, 'rU')
fout = open(output, 'w')
count = 0
for line in fin:
newline = ''
list = line.split(' ')
for e in range(0,len(list)):
p = list[e].rfind('/')
word = list[e][0:p]
pos = list[e][p+1:len(list[e])]
newline = newline+word +' '
newline = newline.strip()
fout.write(newline+'\n')
count += 1
if count % 10000 == 0:
print count
if __name__ == '__main__':
print info + '\n'
if len(sys.argv) < 3:
print './to_lower.py input_file output_file'
sys.exit(-1)
filter(sys.argv[1], sys.argv[2])
| 754 | 18.358974 | 52 |
py
|
blend
|
blend-master/tools/dpmf/scripts/stfdeptree2form.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
while line:
if(len(line)>1):
p=line.find('(')
rela=line[0:p]
newline=line[p+1:len(line)-2]
strlist = newline.split(', ')
p=strlist[1].rfind('-')
w1=strlist[1][0:p]
posi1=strlist[1][p+1:len(strlist[1])]
p=strlist[0].rfind('-')
w2=strlist[0][0:p]
posi2=strlist[0][p+1:len(strlist[0])]
print w1+'\t'+'NN'+'\t'+ posi2 + '\t' + rela
else:
print line,
line = f.readline()
f.close()
| 566 | 23.652174 | 52 |
py
|
blend
|
blend-master/tools/dpmf/scripts/probPreciRecall.py
|
#!/usr/local/bin/python2.7
import sys
dpm_f = sys.argv[1]
preci_f = sys.argv[2]
recall_f = sys.argv[3]
arg = float(sys.argv[4])
score = 0
dpmL, preciL, recallL = [], [], []
for idx, line in enumerate(open(dpm_f, 'rU')):
dpmL.append(float(line.strip()))
for idx, line in enumerate(open(preci_f, 'rU')):
preciL.append(float(line.strip()))
for idx, line in enumerate(open(recall_f, 'rU')):
recallL.append(float(line.strip()))
for i in range(len(dpmL)):
v1, v2, v3 = dpmL[i], preciL[i], recallL[i]
if (v2 == 0 or v3 == 0):
score = 0
else:
score = v1 * 1.0 / (arg / v3 + (1.0 - arg) / v2)
if score < 0:
score = 0
print str(score)
| 689 | 21.258065 | 56 |
py
|
blend
|
blend-master/tools/dpmf/scripts/meteorForm-precision.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
#print sys.argv[1]
line = f.readline()
while line:
if(line.find("Segment ") > -1):
strlist = line.split('\t')
pre = strlist[1]
print pre
if(line.find("Precision:")>-1):
pre = line[24:len(line)]
#print "sys level meteor ",pre,
line = f.readline()
f.close()
| 370 | 20.823529 | 39 |
py
|
blend
|
blend-master/tools/dpmf/scripts/syscore-prob.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
line = f.readline()
score=0
cnt=1
while line:
score = score + float(line)
line = f.readline()
cnt = cnt+1
print score,
f.close()
fa = open(sys.argv[1],'a');
score = score/float(3000)
fa.write("sys level t2sts "+str(score))
| 317 | 17.705882 | 39 |
py
|
blend
|
blend-master/tools/dpmf/scripts/probPreciRecall-sys.py
|
#!/usr/local/bin/python2.7
import sys
fin1 = open(sys.argv[1]);
fin2 = open(sys.argv[2]);
fin3 = open(sys.argv[3]);
arg = float(sys.argv[4])
line1 = fin1.readline()
line2 = fin2.readline()
line3 = fin3.readline()
dict1={}
while line1:
strlistd = line1.split('\t')
newstr = strlistd[1] + '\t' + strlistd[2] + '\t' + strlistd[3]
dict1[newstr] = float(strlistd[4])
line1 = fin1.readline()
dict2={}
while line2:
strlistm = line2.split('\t')
newstr = strlistm[1] + '\t' + strlistm[2] + '\t' + strlistm[3]
dict2[newstr] = float(strlistm[4])
line2 = fin2.readline()
dict3={}
while line3:
strlistm = line3.split('\t')
newstr = strlistm[1] + '\t' + strlistm[2] + '\t' + strlistm[3]
dict3[newstr] = float(strlistm[4])
line3 = fin3.readline()
for key,value in dict1.iteritems():
val1 = value
if(key in dict2):
val2 = dict2[key]
if(key in dict3):
val3 = dict3[key]
if(val2==0 or val3==0):
score = 0
else:
score = val1 * 1.0/(arg/val3 + (1.0-arg)/val2 )
if (score < 0):
score = 0
newstr = 'pdf'+'\t'+ key + '\t'+ str(score)
print newstr
| 1,230 | 23.137255 | 68 |
py
|
blend
|
blend-master/tools/dpmf/scripts/reback.py
|
#!/usr/local/bin/python2.7
import sys
f = open(sys.argv[1]);
line = f.readline()
lineAll = ''
while line:
lineAll = lineAll + line.strip('\n') +" "
line = f.readline()
f.close()
strlist = lineAll.split('@START')
for v in strlist:
if(len(v)>1):
v = v.strip()
pos = v.find(' ',0)
newline = v[pos+1:len(v)]
wordlist = newline.split(' ')
for word in wordlist:
wpos = word.rfind('_')
print word[0:wpos]+'/'+word[wpos+1:len(word)],
print
| 517 | 23.666667 | 58 |
py
|
blend
|
blend-master/tools/meteor-1.4/mt-diff/mt-diff.py
|
#!/usr/bin/env python
import math, os, re, shutil, subprocess, sys, tempfile
# MT-Diff: measure changes in segment-level quality between two systems
# according to BLEU and Meteor
bleu_script = os.path.abspath(os.path.join(os.path.dirname(__file__), \
'files', 'mteval-v13m.pl'))
meteor_jar = os.path.abspath(os.path.join(os.path.dirname( \
os.path.dirname(__file__)), 'meteor-1.3.jar'))
langs = 'en cz de es fr ar other'
labels = [(-1.0 + 0.1 * i, -0.9 + 0.1 * i) for i in range(20)]
labels.insert(10, (0, 0))
def main(argv):
# Meteor jar check
if not os.path.exists(meteor_jar):
print 'Please edit the meteor_jar line of {0} to reflect the location of meteor-*.jar'.format(__file__)
sys.exit(1)
# Usage
if len(argv[1:]) < 4:
print 'usage: {0} <lang> <sys1.hyp> <sys2.hyp> <ref1> [ref2 ...]'. \
format(argv[0])
print 'langs: {0}'.format(langs)
sys.exit(1)
# Language
lang = argv[1]
if lang not in langs.split():
print 'langs: {0}'.format(langs)
sys.exit(1)
# Files
hyp1_file = argv[2]
hyp2_file = argv[3]
ref_files = argv[4:]
# Work directory
work_dir = tempfile.mkdtemp(prefix='mt-diff-')
# SGML Files
hyp1_sgm = os.path.join(work_dir, 'hyp1')
hyp2_sgm = os.path.join(work_dir, 'hyp2')
src_sgm = os.path.join(work_dir, 'src')
ref_sgm = os.path.join(work_dir, 'ref')
# Hyp1
write_sgm(hyp1_file, hyp1_sgm, \
'<tstset trglang="any" setid="any" srclang="any">', '</tstset>')
# Hyp2
write_sgm(hyp2_file, hyp2_sgm, \
'<tstset trglang="any" setid="any" srclang="any">', '</tstset>')
# Src (ref1)
ref_len = write_sgm(ref_files[0], src_sgm, \
'<srcset trglang="any" setid="any" srclang="any">', '</srcset>')
# Ref (all refs)
write_ref_sgm(ref_files, ref_sgm, \
'<refset trglang="any" setid="any" srclang="any">', '</refset>')
# BLEU
print 'BLEU scoring hyp1...'
bleu1, bs1 = bleu(hyp1_sgm, ref_sgm, src_sgm, work_dir)
print 'BLEU scoring hyp2...'
bleu2, bs2 = bleu(hyp2_sgm, ref_sgm, src_sgm, work_dir)
bleu_diff = diff_scr(bleu1, bleu2)
bleu_dd = diff_dist(bleu_diff)
# Meteor
print 'Meteor scoring hyp1...'
meteor1, ms1 = meteor(hyp1_sgm, ref_sgm, lang, work_dir)
print 'Meteor scoring hyp2...'
meteor2, ms2 = meteor(hyp2_sgm, ref_sgm, lang, work_dir)
meteor_diff = diff_scr(meteor1, meteor2)
meteor_dd = diff_dist(meteor_diff)
# Header
print ''
print '+---------------------------------+'
print '| Segment Level Difference |'
print '+-------------+--------+----------+'
print '| Change | BLEU | Meteor |'
print '+-------------+--------+----------+'
# Scores
for (l, b, m) in zip(labels, bleu_dd, meteor_dd):
if l == (0, 0):
print '| 0.0 | {2:6} | {3:6} |'.format(l[0], l[1], b, m)
else:
print '| {0:4} - {1:4} | {2:6} | {3:6} |'.format(l[0], l[1], b, m)
# Footer
print '+-------------+--------+----------+'
print '| System2 + | {0:6} | {1:6} |'. \
format(sum(bleu_dd[11:]), sum(meteor_dd[11:]))
print '| System2 - | {0:6} | {1:6} |'. \
format(sum(bleu_dd[0:10]), sum(meteor_dd[0:10]))
print '+-------------+--------+----------+'
print '| # Segments | {0:6} |'.format(ref_len)
print '+-------------+-------------------+'
print '| System Level Score |'
print '+-------------+-------------------+'
print '| System1 | {0:0.4f} | {1:0.4f} |'.format(bs1, ms1)
print '| System2 | {0:0.4f} | {1:0.4f} |'.format(bs2, ms2)
print '+-------------+--------+----------+'
# Cleanup
shutil.rmtree(work_dir)
def bleu(hyp, ref, src, work_dir=os.curdir):
# Run BLEU
bleu_cmd = ['perl', bleu_script, '-t', hyp, '-r', ref, '-s', src, '-b', \
'--metricsMATR']
subprocess.Popen(bleu_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \
cwd=work_dir).wait()
# Get scores from file
seg = {}
scr = open(os.path.join(work_dir, 'BLEU-seg.scr'))
for line in scr:
part = line.strip().split()
seg['{0}:{1}'.format(part[2], part[3])] = float(part[4])
scr.close()
scr = open(os.path.join(work_dir, 'BLEU-sys.scr'))
sys_s = float(scr.readline().split()[-1])
scr.close()
return (seg, sys_s)
def meteor(hyp, ref, lang='en', work_dir=os.curdir):
# Run Meteor
meteor_cmd = ['java', '-Xmx2G', '-jar', meteor_jar, hyp, ref, '-sgml', \
'-l', lang, '-norm']
subprocess.Popen(meteor_cmd, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, cwd=work_dir).wait()
# Get scores from file
seg = {}
scr = open(os.path.join(work_dir, 'meteor-seg.scr'))
for line in scr:
part = line.strip().split()
seg['{0}:{1}'.format(part[2], part[3])] = float(part[4])
scr.close()
scr = open(os.path.join(work_dir, 'meteor-sys.scr'))
sys_s = float(scr.readline().split()[-1])
scr.close()
return (seg, sys_s)
def diff_scr(scr1, scr2):
diff = []
for key in scr1.keys():
diff.append(scr2[key] - scr1[key])
return diff
def diff_dist(diff):
step = 0.1
dist = [0] * 20
zero = 0
for d in diff:
if d == 0:
zero +=1
else:
dist[min(19, int(10 + d * 10))] += 1
dist.insert(10, zero)
return dist
def write_sgm(in_file, out_sgm, header, footer):
file_in = open(in_file)
file_out = open(out_sgm, 'w')
print >> file_out, header
print >> file_out, '<doc sysid="any" docid="any">'
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'.format(i, line.strip())
print >> file_out, '</doc>'
print >> file_out, footer
file_in.close()
file_out.close()
return i
def write_ref_sgm(in_files, out_sgm, header, footer):
file_out = open(out_sgm, 'w')
print >> file_out, header
sys_id = 0
for in_file in in_files:
sys_id += 1
file_in = open(in_file)
print >> file_out, '<doc sysid="{0}" docid="any">'.format(sys_id)
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'. \
format(i, line.strip())
print >> file_out, '</doc>'
file_in.close()
print >> file_out, footer
file_out.close()
if __name__ == '__main__' : main(sys.argv)
| 6,503 | 31.19802 | 111 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/build_wordnet_files.py
|
#!/usr/bin/env python
import os, sys
# Set for WordNet3
excFiles = ["adj.exc", "adv.exc", "noun.exc", "verb.exc"]
senseFile = "index.sense"
nounFile = "data.noun"
verbFile = "data.verb"
adjFile = "data.adj"
nounRelations = ["@", "@i", "~", "~i"] # Hypernym (instance), Hyponym (instance)
verbRelations = ["@", "~", "*"] # Hypernym, Hyponym, Entailment
adjRelations = ["\\"] # Pertainym
def main(argv):
if len(argv) < 3:
print "Build synonym files from WordNet"
print "usage:", argv[0], "<wordnetDictDir>", "<outDir>", "[language]"
print "example:", os.path.basename(argv[0]), \
"/usr/local/WordNet-3.0/dict", "synonyms"
sys.exit(1)
wnDir = argv[1]
outDir = argv[2]
lang = "english"
if len(argv) > 3 : lang = argv[3]
# Create exceptions file
exc = {} # exc[word] = formList
for excFile in excFiles:
inExc = open(os.path.join(wnDir, excFile), "r")
while True:
line = inExc.readline()
if not line : break
words = line.split()
form = words[0]
for i in range(1, len(words)):
word = words[i]
if word not in exc.keys():
exc[word] = []
exc[word].append(form)
inExc.close()
outExc = open(os.path.join(outDir, lang + ".exceptions"), "w")
for word in sorted(exc.keys()):
outExc.write(word + "\n")
formLine = ""
for form in exc[word]:
formLine += form + " "
outExc.write(formLine.strip() + "\n")
outExc.close()
# Create Synsets file
# For reasonable runtime, this assumes that different senses of the same
# word are on sequential lines. If this is not the case, change the synonym
# file to point to a sorted version (any consistent sorting method).
inSyn = open(os.path.join(wnDir, senseFile), "r")
outSyn = open(os.path.join(outDir, lang + ".synsets"), "w")
curWord = ""
synSets = ""
while True:
line = inSyn.readline()
if not line : break
terms = line.split()
word = terms[0].split("%")[0]
synSet = terms[1]
if word != curWord:
if curWord != "":
outSyn.write(curWord + "\n")
outSyn.write(synSets.strip() + "\n")
curWord = word
synSets = ""
synSets += synSet + " "
outSyn.write(curWord + "\n")
outSyn.write(synSets.strip() + "\n")
inSyn.close()
outSyn.close()
# Create Relations (Hypernymy, Hypnonymy, Entailment) file
outRel = open(os.path.join(outDir, lang + ".relations"), "w")
scanData(os.path.join(wnDir, nounFile), nounRelations, outRel)
scanData(os.path.join(wnDir, verbFile), verbRelations, outRel)
scanData(os.path.join(wnDir, nounFile), adjRelations, outRel)
outRel.close()
# Scan a data file and write extras to output stream
def scanData(fileName, pointerList, outStream):
inData = open(fileName, "r")
while True:
line = inData.readline()
if not line : break
if line.startswith(" "):
continue
terms = line.split()
synSet = terms[0]
extraLine = ""
i = 7
while i < len(terms):
if terms[i] == "|":
break
if terms[i] in pointerList:
extraLine += terms[i + 1] + " "
i += 3
i += 1
if (extraLine != ""):
outStream.write(synSet + "\n")
outStream.write(extraLine.strip() + "\n")
inData.close()
if __name__ == "__main__" : main(sys.argv)
| 3,141 | 24.136 | 80 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/combine_segcor_trainset.py
|
#!/usr/bin/env python
import os, shutil, sys
def main(argv):
if len(argv) < 3:
print "Create a single Meteor training set from HTER test sets"
print "usage:", argv[0], "<outDir> <hterDir1> [hterDir2] ..."
sys.exit(1)
outDir = argv[1]
hterDirs = argv[2:]
if os.path.exists(outDir):
print "File", outDir, "exists, aborting to avoid overwriting files"
sys.exit(1)
os.mkdir(outDir)
for hterDir in hterDirs:
base = os.path.basename(hterDir)
shutil.copy(os.path.join(hterDir, "tst.sgm"), os.path.join(outDir, \
base + ".tst"))
shutil.copy(os.path.join(hterDir, "ref.sgm"), os.path.join(outDir, \
base + ".ref"))
shutil.copy(os.path.join(hterDir, "ter.seg"), os.path.join(outDir, \
base + ".ter"))
if __name__ == "__main__" : main(sys.argv)
| 780 | 24.193548 | 70 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/sgmlize.py
|
#!/usr/bin/env python
# Convert to and from SGML easily. There exist many SGML/XML standards
# for MT evaulation. This script produces files in a format compatible
# with meteor-*.jar, mteval-v*.pl, and tercom.*.jar
import codecs, re, sys
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
def main(argv):
if len(argv[1:]) < 1:
print 'SGMLize'
print 'Do you want a [s]rc, [t]est, [r]eference, or [p]laintext?'
print 'usage: {0} (s|t|r|p) < <textfile>'.format(argv[0])
print 'ex: {0} t < sys-output.txt > sys-output.sgm'.format(argv[0])
sys.exit(1)
t_type = argv[1]
if t_type not in ['s', 't', 'r', 'p']:
print 'usage: {0} (s|t|r|p) < <textfile>'.format(argv[0])
sys.exit(1)
if t_type == 'p':
while True:
line = sys.stdin.readline()
if not line:
break
r = re.search(u'<seg[^>]+>\s*(.*\S)\s*<.seg>', line, re.I)
if r:
print unescape(r.group(1))
return
tag = 'srcset' if t_type == 's' else 'tstset' if t_type == 't' else 'refset'
seg = 0
print u'<{0} trglang="any" setid="any" srclang="any">'.format(tag)
print u'<doc docid="any" sysid="sys">'
while True:
line = sys.stdin.readline()
if not line:
break
seg += 1
print u'<seg id="{0}"> {1} </seg>'.format(seg, escape(line.strip()))
print u'</doc>'
print u'</{0}>'.format(tag)
def escape(s):
return s.replace('&', '&').replace('"', '"').replace('\'', '''). \
replace('<', '<').replace('>', '>')
def unescape(s):
return s.replace('"', '"').replace(''', '\'').replace('<', '<'). \
replace('>', '>').replace('&', '&')
if __name__ == '__main__': main(sys.argv)
| 1,881 | 30.366667 | 84 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/unigram_stats.py
|
#!/usr/bin/env python
import os, subprocess, sys
# Output line:
# P R f1 frag
# Unigrams, surface forms only
# Params set so frag = (chunks/matches)
def main(argv):
if len(argv) < 2:
print 'usage: {0} hyps refs > f1.out'.format(argv[0])
sys.exit(1)
hyp = argv[1]
ref = argv[2]
rc = wc(ref) / wc(hyp)
cmd = ['java', '-Xmx2G', '-jar',
os.path.dirname(__file__) + '/../meteor-1.2.jar', hyp, ref, '-r', str(rc),
'-normalize', '-m', 'exact', '-p', '0.5 1 1 0.5']
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if line.startswith('Precision'):
P = float(line.split()[-1]) * 100
if line.startswith('Recall'):
R = float(line.split()[-1]) * 100
if line.startswith('f1'):
f1 = float(line.split()[-1]) * 100
if line.startswith('Fragmentation'):
frag = float(line.split()[-1]) * 100
if not line:
break
print '{0:5.2f} {1:5.2f} {2:5.2f} {3:5.2f}'.format(P, R, f1, frag)
def wc(f):
i = 0
f_in = open(f, 'r')
while True:
line = f_in.readline()
if not line:
break
i += 1
f_in.close()
return i
if __name__ == '__main__' : main(sys.argv)
| 1,351 | 23.581818 | 80 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/armageddon.py
|
#!/usr/bin/env python
# Tune Meteor parameters quickly using Sun Grid Engine
# Script submits 21 job array, 1 cpu / 3gb each
import os, subprocess, sys
SGE_SCRIPT = '''
### Job name: m.xx
#$ -N "m.{0}"
### Log files go in the log dir
#$ -e {2}/m.err
#$ -o {2}/m.out
### Just one slot
#$ -pe smp 1
### 2g for Java heap, 1g extra for JVM
#$ -l h_vmem=3g
### 24h should be enough for very large data sets (WMT09/10 English combined).
#$ -l h_rt=24:00:00
### Job array: 21 tasks
#$ -t 1-21
### Call the script for this job
echo "start: `date`"
bash {1}/script.$SGE_TASK_ID.sh
echo "finish: `date`"
'''
SORT_SCRIPT ='''
#!/usr/bin/env bash
cat {0}/output.* | sort -g > {0}/output.sort
'''
def main(argv):
if len(argv[1:]) < 7:
print 'usage: {0} <meteor.jar> <lang> <n_mods> <paraphrase.gz> <task> <data_dir> <work_dir> [b_min b_max d_min d_max] [other args like -ch]'.format(argv[0])
sys.exit(1)
# Args
meteor_jar = os.path.abspath(argv[1])
lang = argv[2]
n_mods = int(argv[3])
paraphrase_gz = os.path.abspath(argv[4])
task = argv[5]
data_dir = os.path.abspath(argv[6])
work_dir = os.path.abspath(argv[7])
log_dir = os.path.join(work_dir, 'log')
script_dir = os.path.join(work_dir, 'script')
sb_dir = os.path.join(work_dir, 'sandbox')
b_min = argv[8] if len(argv[1:]) > 7 else '0'
b_max = argv[9] if len(argv[1:]) > 8 else '2.0'
d_min = argv[10] if len(argv[1:]) > 9 else '0.4'
d_max = argv[11] if len(argv[1:]) > 10 else '0.9'
other_args = argv[12:]
# Working dir
if os.path.exists(work_dir):
print 'Work dir {0} exists, exiting'.format(work_dir)
sys.exit(1)
os.mkdir(work_dir)
os.mkdir(log_dir)
os.mkdir(script_dir)
os.mkdir(sb_dir)
# Weight ranges for jobs based on mod count
w_start_list = [1, 0, 0, 0]
w_end_list = [1, 0, 0, 0]
for i in range(n_mods):
w_end_list[i] = 1
w_start = ''
w_end = ''
for i in range(4):
w_start += str(w_start_list[i]) + ' '
w_end += str(w_end_list[i]) + ' '
w_start = w_start.strip()
w_end = w_end.strip()
# Step is always the same
step = '0.05 0.10 0.05 0.05 1.0 0.2 0.2 0.2'
# Write out Trainer job scripts
for i in range(21):
script_file = os.path.join(script_dir, 'script.{0}.sh'.format(i + 1))
sb_sub_dir = os.path.join(sb_dir, '{0}'.format(i + 1))
os.mkdir(sb_sub_dir)
out_file = os.path.join(work_dir, 'output.{0}'.format(i + 1))
a = 0.05 * i
# If optimal parameters include b=2.0 or d={0.4,0.9}, pass b/d args
# to script to explore additional area in those directions (one
# direction per run)
start = '{0} {1} 0 {2} {3}'.format(a, b_min, d_min, w_start)
end = '{0} {1} 1 {2} {3}'.format(a, b_max, d_max, w_end)
trainer_cmd = 'java -XX:+UseCompressedOops -Xmx2G -cp {0} Trainer {1} {2} -l {3} -a {4} -i \'{5}\' -f \'{6}\' -s \'{7}\' {args} > {8}'.format(meteor_jar, task, data_dir, lang, paraphrase_gz, start, end, step, out_file, args=' '.join(other_args))
o = open(script_file, 'w')
print >> o, '#!/usr/bin/env bash'
print >> o, 'if [[ -e {0} ]] ; then exit ; fi'.format(out_file)
print >> o, 'cd {0}'.format(sb_sub_dir)
print >> o, trainer_cmd
o.close()
# Sort script
sort_script_file = os.path.join(work_dir, 'sort_output.sh')
o = open(sort_script_file, 'w')
print >> o, SORT_SCRIPT.format(work_dir).strip()
o.close()
os.chmod(sort_script_file, 0755)
# SGE Script
sge_script_file = os.path.join(script_dir, 'sge-script.sh')
o = open(sge_script_file, 'w')
print >> o, SGE_SCRIPT.format(lang, script_dir, log_dir)
o.close()
subprocess.call(['qsub', sge_script_file])
# Report
print ''
print 'Trainer jobs submitted, output written to:'
print work_dir
print ''
print 'After all jobs finish, sort results:'
print sort_script_file
print ''
if __name__ == '__main__' : main(sys.argv)
| 4,071 | 30.083969 | 253 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/delete_stray_matches.py
|
#!/usr/bin/env python
import re, sys
DEFAULT_DIST = 0.0
DEFAULT_LEN = 0
def main(argv):
# Directions
if len(argv[1:]) < 1:
sys.stderr.write('Using defaults - for help, use {0} -h\n'.format(argv[0]))
min_dist = DEFAULT_DIST
min_len = DEFAULT_LEN
words = []
# help or min distance
if len(argv[1:]) > 0:
if argv[1] in '--help':
print 'Delete single matches to improve monotonicity of alignments'
print ''
print 'usage:', argv[0], 'min_rel_dist', 'min_seg_len', \
'word_list', '<', 'matcher.out', '>', 'matcher.out.mon'
print ''
print 'min_rel_dist - minimum relative distance for deletion' + \
'(default = X)'
print 'min_seg_len - minimum segment length (reference) to' + \
'consider (default = X)'
print 'word_list - file of words, one per line, to consider' + \
'for deletion (default = all words)'
sys.exit()
else:
min_dist = float(argv[1])
# min length
if len(argv[1:]) > 1:
min_len = int(argv[2])
# word list
if len(argv[1:]) > 2:
words_in = open(argv[3])
for line in words_in:
words.append(line.strip().split()[0])
words_in.close()
# Read alignments
while True:
# Next line should be 'Alignment...'
line = sys.stdin.readline()
# End of file
if not line:
break
if not line.startswith('Alignment'):
print 'Error: file does not start with Alignment line'
print 'Please use exact output of Matcher'
sys.exit(1)
print line,
sen1 = sys.stdin.readline()
words1 = sen1.split()
print sen1,
sen2 = sys.stdin.readline()
words2 = sen2.split()
print sen2,
print sys.stdin.readline(),
# Read matches
match_words2 = []
match_words1 = []
match_start2 = []
match_start1 = []
match_len2 = []
match_len1 = []
mods = []
scores = []
while True:
line = sys.stdin.readline()
if not line.strip():
break
m2, m1, mod, score = line.split()
m2_s, m2_l = map(int, m2.split(':'))
match_start2.append(m2_s)
match_len2.append(m2_l)
match_words2.append(words2[m2_s : m2_s + m2_l])
m1_s, m1_l = map(int, m1.split(':'))
match_start1.append(m1_s)
match_len1.append(m1_l)
match_words1.append(words1[m1_s : m1_s + m1_l])
mods.append(mod)
scores.append(score)
# For sentences minimum length or above that have more than one match
if len(words2) >= min_len and len(mods) > 1:
# Look for stray matches
for i in range(len(mods)):
# Phrase matches safe
if match_len1[i] > 1 or match_len2[i] > 1:
continue
# Words not on list safe
if words:
if words2[match_start2[i]] not in words \
and words1[match_start1[i]] not in words:
continue
# Distance from monotonicity with previous match
if i == 0:
dist_prev = 0
else:
dist_prev = abs((match_start1[i] - match_start1[i - 1]) \
- (match_start2[i] - match_start2[i - 1]))
# Distance from monotonicity with next match
if i == len(mods) - 1:
dist_next = 0
else:
dist_next = abs((match_start1[i + 1] - match_start1[i]) \
- (match_start2[i + 1] - match_start2[i]))
# Anchored matches safe
if i != 0 and dist_next == 0:
continue
if i != len(mods) - 1 and dist_prev == 0:
continue
# Total jump distance
dist = min(dist_prev, dist_next)
# Delete if exceeds threshold
if float(dist) / len(words2) >= min_dist:
mods[i] = -1 # dist / len(words2)
# Write new match lines
for i in range(len(mods)):
print '{0}:{1}\t\t\t{2}:{3}\t\t\t{4}\t\t{5}'.format( \
match_start2[i], match_len2[i], match_start1[i], match_len1[i], \
mods[i], scores[i])
print ''
if __name__ == '__main__' : main(sys.argv)
| 4,642 | 33.909774 | 83 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/agg.py
|
#!/usr/bin/env python
# Aggregate: sum input lines by column. Useful for aggregating
# MeteorStats lines as a MERT implementation would.
from sys import argv, exit, stdin
parse = int
if len(argv) > 1:
if argv[1].startswith('-h'):
print 'usage: agg [-f] FILE'
exit()
if argv[1] == '-f':
parse = float
else:
stdin = open(argv[1], 'r')
if len(argv) > 2:
stdin = open(argv[2], 'r')
agg = None
while True:
line = stdin.readline()
if not line:
break
f = line.split()
if agg == None:
agg = [0] * len(f)
if len(f) != len(agg):
print 'error: number of columns not constant'
exit(1)
for i in range(len(agg)):
agg[i] += parse(f[i])
if agg:
print ' '.join([str(x) for x in agg])
stdin.close()
| 814 | 19.375 | 63 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/rm_que.py
|
#!/usr/bin/env python
import gzip, sys
if len(sys.argv[1:]) < 2:
print 'usage', sys.argv[0], 'original.gz', 'clean.gz'
sys.exit(1)
f_in = gzip.open(sys.argv[1])
f_out = gzip.open(sys.argv[2], 'wb')
while True:
prob = f_in.readline()
if not prob:
break
p1 = f_in.readline()
p2 = f_in.readline()
if 'que' in (p1.strip(), p2.strip()):
print 'bad:'
print p1,
print p2,
continue
f_out.write(prob)
f_out.write(p1)
f_out.write(p2)
f_in.close()
f_out.close()
| 536 | 17.517241 | 57 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/freq.py
|
#!/usr/bin/env python
# Simple word relative frequency counter. Used to create
# function word lists.
from sys import stdin, argv
freq = {}
total = 0
if argv[1:]:
stdin = open(argv[1], 'r')
while True:
line = stdin.readline()
if not line:
break
f = line.split()
for w in f:
freq[w] = 1 if w not in freq else freq[w] + 1
total += 1
for w in sorted(freq, cmp=lambda x,y: freq[y] - freq[x]):
print w, float(freq[w]) / total
| 476 | 18.08 | 57 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/rm_word_accumulation.py
|
#!/usr/bin/env python
import gzip, sys
if len(sys.argv[1:]) < 2:
print 'Remove paraphrases with word accumulation:'
print 'Paraphrases where p1 is a substring of p2 or vice versa will be removed'
print ''
print 'usage', sys.argv[0], 'original.gz', 'clean.gz'
sys.exit(1)
f_in = gzip.open(sys.argv[1])
f_out = gzip.open(sys.argv[2], 'wb')
while True:
prob = f_in.readline()
if not prob:
break
p1 = f_in.readline()
p2 = f_in.readline()
p1s = ' ' + p1.strip() + ' '
p2s = ' ' + p2.strip() + ' '
if p1s in p2s or p2s in p1s:
continue
f_out.write(prob)
f_out.write(p1)
f_out.write(p2)
f_in.close()
f_out.close()
| 688 | 21.225806 | 83 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.