python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TC_Whitelist:
whitelist = ['h884', 's884', 'h1688', 's1688', 'hmma', 'i8816', '16816',
'dgrad_1x1_stride_2x2', 'first_layer_wgrad_kernel', 'conv1x1',
'conv2d_c1_k1', 'direct_group', 'xmma_implicit_gemm',
'xmma_sparse_conv', 'xmma_warp_specialized_implicit_gemm',
'xmma_gemm', 'xmma_sparse_gemm', 'c1688']
def __contains__(self, item):
for pattern in self.whitelist:
if pattern in item:
return True
return False
| PyProf-master | pyprof/prof/tc.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Foo(OperatorLayerBase):
"""
An object of Foo is instantiated when we detect an unsupported operator.
"""
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
shapes = []
types = []
for arg in args:
if arg['type'] == "tensor":
shapes.append(arg['shape'])
types.append(arg['dtype'])
self.shape = shapes
self.type = types
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
return 0
def bytes(self):
return 0
class Copy(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "copy_")
assert (len(args) == 2)
dst, src = args
assert (src['type'] == dst['type'])
assert (src['shape'] == dst['shape'])
self.shape = src['shape']
self.stype = src['dtype']
self.dtype = dst['dtype']
def params(self):
#The data type might be different
p = OrderedDict([('T', self.shape), ('stype', self.stype), ('dtype', self.dtype)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
return 0
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
return self.elems() * (Utility.typeToBytes(self.stype) + Utility.typeToBytes(self.dtype))
class Clone(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "clone")
assert (len(args) == 1)
t = args[0]
self.shape = t['shape']
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
return 2 * self.elems() * Utility.typeToBytes(self.type)
class Contiguous(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "contiguous")
assert (len(args) == 1)
t = args[0]
self.shape = t['shape']
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def bytes(self):
return 2 * Utility.numElems(self.shape) * Utility.typeToBytes(self.type)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
class Any(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "any")
assert (len(args) in [1,2])
t = args[0]
# The input can be a tensor or scalar
assert (t['type'] in ["tensor", "bool"])
if t['type'] == "tensor":
self.shape = t['shape']
self.type = t['dtype']
else:
self.shape = (1,)
self.type = t['type']
self.sub = d.sub
return
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def flops(self):
return 0
def bytes(self):
return Utility.numElems(self.shape) * Utility.typeToBytes(self.type)
| PyProf-master | pyprof/prof/misc.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script reads the output (Python dictionary) created by parse.py.
For every kernel (line) in the input it determines
module / class name e.g. torch.nn.functional
operator name e.g. linear
kernel parameters e.g. GEMM M, N, K, datatype
bytes
flops
tensor core usage
direction (fprop, bprop)
and other things. Please see the tool usage.
"""
from .usage import parseArgs
from .output import Output
from .utility import Utility
from .pointwise import Pointwise
from .convert import Convert
from .blas import *
from .embedding import Embedding
from .reduction import *
from .dropout import Dropout
from .softmax import *
#from pooling import * # work in progress
from .linear import Linear
from .optim import Adam
from .misc import *
from .conv import Conv
from .activation import Activation
from .index_slice_join_mutate import Cat, Reshape, MaskedScatter, Gather, Nonzero, IndexSelect, MaskedSelect
from .recurrentCell import RNNCell
from .normalization import BatchNorm
from .randomSample import RandPerm
from .loss import MSELoss
from .data import Data
from .memory import OneZero, Fill, Full
def findFpropKernel(seq):
#Find the last fprop kernel with the same seqId
#First look at seqId and then at altSeqId
for idx in reversed(range(len(kernels))):
k = kernels[idx]
if (seq in k['seqId']) and (k['dir'] == "fprop"):
return idx
for idx in reversed(range(len(kernels))):
k = kernels[idx]
if (seq in k['altSeqId']) and (k['dir'] == "fprop"):
return idx
return -1
#print("Error: seqId {} not found.".format(seq), file=sys.stderr)
#assert False
def foo(mod, op, d):
if (op[0] == "linear"):
xx = Linear(d)
# rnncell, lstmcell, grucell
elif (mod[0] in ["LSTMCell", "GRUCell"]) and (op[0] == "forward"):
xx = RNNCell(d)
elif op[0] in [
"conv1d",
"conv2d",
]:
xx = Conv(d)
elif (op[0] in Pointwise.ops):
xx = Pointwise(d)
elif (op[0] in Convert.ops):
xx = Convert(d)
elif op[0] in ["__matmul__", "matmul"]:
xx = Matmul(d)
elif op[0] == "embedding":
xx = Embedding(d)
#reduction
elif op[0] == "sum":
xx = Sum(d)
elif op[0] == "mean":
xx = Mean(d)
elif op[0] == "norm":
xx = Norm(d)
elif op[0] == "dropout":
xx = Dropout(d)
#Index, Slice, Join, Mutate
elif (op[0] == "cat"):
xx = Cat(d)
elif (op[0] == "reshape"):
xx = Reshape(d)
elif (op[0] == "masked_scatter_"):
xx = MaskedScatter(d)
elif (op[0] == "gather"):
xx = Gather(d)
elif (op[0] == "nonzero"):
xx = Nonzero(d)
elif (op[0] == "index_select"):
xx = IndexSelect(d)
elif (op[0] == "masked_select"):
xx = MaskedSelect(d)
#blas
elif op[0] in ["addmm", "addmm_"]:
xx = Addmm(d)
elif op[0] == "mm":
xx = Mm(d)
elif op[0] == "bmm":
xx = Bmm(d)
#softmax
elif op[0] == "softmax":
xx = Softmax(d)
elif op[0] == "log_softmax":
xx = LogSoftmax(d)
#loss
elif op[0] == "mse_loss":
xx = MSELoss(d)
#optimizers
elif op[0] == "adam":
xx = Adam(d)
#normalization
elif op[0] == "batch_norm":
xx = BatchNorm(d)
#random
elif op[0] == "randperm":
xx = RandPerm(d)
#memory
elif op[0] in OneZero.ops:
xx = OneZero(d)
elif op[0] == "fill_":
xx = Fill(d)
elif op[0] == "full":
xx = Full(d)
#misc
elif op[0] == "copy_":
xx = Copy(d)
elif op[0] == "clone":
xx = Clone(d)
elif op[0] == "contiguous":
xx = Contiguous(d)
elif op[0] == "any":
xx = Any(d)
elif (op[0] in Activation.ops):
xx = Activation(d)
elif op[0] == "to":
xx = Convert(d)
else:
xx = Foo(d)
return xx
def main():
#Read cmd line arguments
cmdArgs = parseArgs()
output = Output(cmdArgs)
output.header()
idx = -1
#Read in all the kernel info
for line in cmdArgs.file:
idx += 1
kernel = eval(line)
assert (kernel)
kernels.append(kernel)
k = kernel
d = Data(k)
mod = k['mod']
op = k['op']
flops = 0
params = {"na": "na"}
tc = "na"
bytes = 0
if (d.dir == "bprop"):
d.seqMarker = k['seqMarker']
seq = k['seqId']
if len(seq) > 1:
pass
seq = k['seqId'][:1]
assert (len(seq) == 1), seq
#assert (seq[0] != 0)
assert (len(d.seqMarker) > 0)
#If there is no useful marker associated, use the
#sequence number to find the kernel from fprop
if len(d.argMarker) == 0:
index = findFpropKernel(seq[0])
if index >= 0:
d.argMarker = kernels[index]['marker']
d.modMarker = kernels[index]['reprMarkers']
mod = kernels[index]['mod']
op = kernels[index]['op']
d.layer = kernels[index]['layer']
d.trace = kernels[index]['trace']
# Check if marker has our annotations
if len(d.argMarker) and Utility.hasNVTX(d.argMarker[0]):
xx = foo(mod, op, d)
bytes = xx.bytes()
flops = xx.flops()
op = xx.op()
params = xx.params()
tc = xx.tc()
if type(op) is list:
if len(op):
op = op[0]
else:
op = ""
if type(mod) is list:
if len(mod):
mod = mod[0]
else:
mod = ""
d.index = idx + 1
# The following 8 come from operator class functions.
d.setParams(params)
d.tc = tc
d.flops = flops
d.bytes = bytes
d.mod = mod
d.op = op
output.data(d)
kernels = []
if __name__ == '__main__':
main()
| PyProf-master | pyprof/prof/prof.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aditya Agrawal.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
def readMarker(d):
marker = eval(d.argMarker[0])
return marker['mod'], marker['op'], marker['args']
class OneZero(OperatorLayerBase):
"""
Support for torch.ones, torch.zeros etc.
Fill a tensor with ones or zeros.
"""
ops = ["ones", "ones_like", "zero_", "zeros", "zeros_like"]
def __init__(self, d):
mod, op, args = readMarker(d)
assert mod in ["torch", "Tensor"]
assert op in OneZero.ops
self.mod_ = mod
self.op_ = op
# For ones_like, zero_, zeros_like, the input is a tensor.
if op in ["ones_like", "zero_", "zeros_like"]:
assert(len(args) == 1)
arg = args[0]
self.input = Tensor(arg['shape'], arg['dtype'])
# For ones and zeros, the input can be a list, tuple, sequence of integers.
# E.g. torch.ones((3,5,6)) or torch.ones([3,5,6]) or torch.ones(3,5,6)
else:
assert op in ["ones", "zeros"]
# TODO: Assume the output dtype is float
if args[0]['type'] in ['list', 'tuple']:
assert(len(args) == 1)
self.input = Tensor(args[0]['value'], "float")
elif args[0]['type'] == "int":
# Get all unnamed arguments of type int
args = list(filter(
lambda x: x['name'] == "" and x['type'] == "int"),
args)
shape = [x['value'] for x in args]
self.input = Tensor(shape, "float")
else:
assert False
def params(self):
return str(self.input)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return self.input.bytes
def flops(self):
return 0
class Fill(OperatorLayerBase):
"""
Support for torch.fill_.
Fill a tensor with a specific value.
"""
def __init__(self, d):
mod, op, args = readMarker(d)
assert mod == "Tensor"
assert op == "fill_"
self.mod_ = mod
self.op_ = op
assert(len(args) == 2)
arg = args[0]
self.input = Tensor(arg['shape'], arg['dtype'])
def params(self):
return str(self.input)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return self.input.bytes
def flops(self):
return 0
class Full(OperatorLayerBase):
"""
Support for torch.full.
Create a tensor of specified size and filled with a specified value.
"""
def __init__(self, d):
mod, op, args = readMarker(d)
assert mod == "torch"
assert op == "full"
self.mod_ = mod
self.op_ = op
assert(len(args) == 2)
arg1, arg2 = args
assert arg1['type'] in ['list', 'tuple']
# TODO: Add more types for arg2
assert arg2['type'] in ['float', 'int']
self.output = Tensor(arg1['value'], arg2['type'])
def params(self):
return str(self.output)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return self.output.bytes
def flops(self):
return 0
| PyProf-master | pyprof/prof/memory.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
from .tensor import Tensor
class Mean(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod in ["torch", "Tensor"])
assert (op == "mean")
#Filter out named parameters
args = list(filter(lambda x: x['name'] == '', args))
assert (len(args) <= 2)
i = args[0]
# The input can be a scalar or a tensor
if 'shape' in i: # tensor
self.input = Tensor(i['shape'], i['dtype'])
else: # scalar
assert ('value' in i)
self.input = Tensor([], i['type'])
self.dir = d.dir
self.sub = d.sub
def params(self):
return str(self.input)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
if self.sub == 0:
return self.input.bytes + self.input.itemsize
else:
return 0
def flops(self):
if self.sub == 0:
return self.input.size + 1
else:
return 0
class Sum(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "sum")
assert (len(args) >= 1)
#Get input
if (args[0]['name'] == ""):
i = args[0]
else:
i = list(filter(lambda x: x['name'] == "input", args))[0]
self.shape = i['shape']
self.type = i['dtype']
self.sub = d.sub
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def elems(self):
return Utility.numElems(self.shape)
def flops(self):
# Note: This is incorrect, need to calculate actual flops (say via nvprof)
return self.elems()
def bytes(self):
b = self.elems() * Utility.typeToBytes(self.type)
if self.sub == 0:
return b
else:
return 0
class Norm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "norm")
#assert (len(args) == 1)
i = args[0]
self.shape = i['shape']
self.type = i['dtype']
self.sub = d.sub
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
b = self.elems() * Utility.typeToBytes(self.type)
if self.sub == 0:
return b
else:
return 0
def flops(self):
# square and add plus sqrt
f = 2 * self.elems() + 1
if self.sub == 0:
return f
else:
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/reduction.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class Embedding(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod == "torch.nn.functional")
assert (op == "embedding")
input = args[0]
embedding = args[1]
self.input = Tensor(input['shape'], input['dtype'])
self.embedding = Tensor(embedding['shape'], embedding['dtype'])
assert (len(self.embedding.shape) == 2)
self.dir = d.dir
self.sub = d.sub
return
def params(self):
return str(self.input) + ";" + str(self.embedding)
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def bytes(self):
b = 0
if self.dir == "fprop":
# read indices
b += self.input.bytes
# read and write the embedding values
b += 2 * self.input.size * self.embedding.shape[1] * self.embedding.itemsize
else:
# 3 times the size of the incoming gradient
b = 3 * self.input.size * self.embedding.shape[1] * self.embedding.itemsize
if self.sub > 0:
b = 0
return b
def flops(self):
# Note: not implemented yet
return 0
| PyProf-master | pyprof/prof/embedding.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aditya Agrawal.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Dtype(object):
_types = {
"uint8" : (1, "uint8"),
"int8" : (1, "int8"),
"byte" : (1, "byte"),
"char" : (1, "char"),
"bool" : (1, "bool"),
"float16" : (2, "fp16"),
"half" : (2, "fp16"),
"int16" : (2, "int16"),
"short" : (2, "int16"),
"float32" : (4, "fp32"),
"float" : (4, "fp32"),
"int32" : (4, "int32"),
"int" : (4, "int32"),
"int64" : (8, "int64"),
"long" : (8, "int64"),
"float64" : (8, "fp64"),
"double" : (8, "fp64"),
}
@staticmethod
def types():
t = Dtype._types.keys()
return list(t)
def __init__(self, dtype):
assert dtype in Dtype.types()
size, name = Dtype._types[dtype]
self._itemsize = size
self._name = name
def __str__(self):
return self._name
@property
def itemsize(self):
return self._itemsize
def main():
print(Dtype.types())
for i in Dtype.types():
dt = Dtype(i)
print(i, dt, dt.itemsize)
if __name__ == '__main__':
main()
| PyProf-master | pyprof/prof/dtype.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .tc import TC_Whitelist
from .utility import Utility
from .base import OperatorLayerBase
class Linear(OperatorLayerBase):
'''
Notes:
If the bias occurs before the GEMM, then its 1 write (bias expansion).
If the bias occurs after, then its 1 read and 1 write.
bias in bprop is a reduction and hence is 1 read.
'''
gemmKernels = [
"gemm", "gemv", "dot_kernel", "splitKreduce_kernel",
"reduce_1Block_kernel", "cutlass"
]
biasKernels = [
"kernelReduceContigDim", "kernelReduceNoncontigDim_shared",
"elementwise_kernel", "reduce_kernel", "kernelPointwiseApply2",
"2d_grouped_direct_kernel"
]
def setXWBMNK(self, args):
x = None
w = None
b = None
if (len(args) == 2):
x, w = args
elif (len(args) == 3):
x, w, b = args
assert (x['type'] == w['type'] == "tensor")
if (b['type'] == "tensor"):
assert (len(b['shape']) == 1)
elif (b['type'] == "NoneType"):
assert b['value'] is None
b = None
else:
assert False
else:
assert False
assert (len(w['shape']) == 2)
k1 = x['shape'][-1]
n, k2 = w['shape']
assert (k1 == k2)
if b is not None:
assert (b['shape'][0] == n)
t1 = x['dtype']
t2 = w['dtype']
assert (t1 == t2)
# X, W, B
self.x = x['shape']
self.w = w['shape']
self.b = b['shape'] if b is not None else None
self.type = t1
# M, N, K
#n = Utility.numElems(x[0:-1])
n = self.x[0:-1]
k = self.x[-1]
m, k1 = self.w
assert (k == k1)
self.m = m
self.n = n
self.k = k
def tc(self):
if self.op() == "linear":
if self.name in TC_Whitelist():
return 1
return 0
else:
return "-"
def __init__(self, d):
self.name = d.name
self.dir = d.dir
self.sub = d.sub
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
assert (mod == "torch.nn.functional")
assert (op == "linear")
self.setXWBMNK(args)
if any(x in d.name for x in Linear.gemmKernels):
self.op_ = "linear"
else:
assert any(x in d.name for x in Linear.biasKernels), f"Kernel name: {d.name}"
self.op_ = "bias"
'''
elif (("kernelPointwiseApply2" in d.name) or ("kernelReduceContigDim" in d.name) or ("kernelReduceNoncontigDim_shared" in d.name)):
#bias expansion was before the gemm
self.op_ = "bias"
elif ("elementwise_kernel" in d.name):
#Bias addition happens later with a broadcast tensor
self.op_ = "bias"
assert (len(d.argMarker) == 2)
marker = eval(d.argMarker[1])
mod = marker['mod']
op = marker['op']
args = marker['args']
assert (mod == "Tensor")
assert (op == "__iadd__")
assert (len(args) == 2)
mn = args[0]['shape']
b = args[1]['shape']
assert (len(b) == 1)
assert (mn == (self.n + (self.m,)))
assert (b == self.b)
else:
assert False
'''
def params(self):
#p = OrderedDict([('X', self.x), ('W', self.w), ('B', self.b), ('type', self.type)])
m, n, k, x, w, t = self.m, self.n, self.k, self.x, self.w, self.type
if len(n) == 1:
n = n[0]
if self.op_ == "linear":
if self.dir == "fprop":
p = OrderedDict([('M', m), ('N', n), ('K', k), ('type', t)])
elif self.dir == "bprop":
if self.sub == 0: #dgrad (most likely)
p = OrderedDict([('M', k), ('N', n), ('K', m), ('type', t)])
elif self.sub == 1: #wgrad (most likely)
p = OrderedDict([('M', k), ('N', m), ('K', n), ('type', t)])
else:
#This happens when there are additional kernels for reduction
p = OrderedDict([('X', x), ('W', w), ('type', t)])
else:
assert False
elif self.op_ == "bias":
p = OrderedDict([('M', m), ('N', n), ('type', t)])
else:
assert False
return p
def op(self):
return self.op_
def bytesFlops(self):
m = self.m
n = Utility.numElems(self.n)
k = self.k
if self.op_ == "linear":
if self.dir == "fprop":
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
elif self.dir == "bprop":
if self.sub == 0: #dgrad (most likely)
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
elif self.sub == 1: #wgrad (most likely)
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
else:
#This happens when there are additional kernels for reduction
f = 0
b = 0
else:
assert False
elif self.op_ == "bias":
f = m * n
b = 2 * m * n * Utility.typeToBytes(self.type)
else:
assert False
return b, f
# TODO: Fix bytes and flops with CUTLASS kernels.
def bytes(self):
b, f = self.bytesFlops()
return b
def flops(self):
b, f = self.bytesFlops()
return f
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/linear.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class Convert(OperatorLayerBase):
"""
Class to handle convert operations.
"""
ops = ["byte", "char", "double", "float", "half", "int", "long", "short", "to"]
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod == "Tensor")
assert (op in Convert.ops)
assert (len(args) == 1)
t = args[0]
if t['type'] == "tensor":
self.input = Tensor(t['shape'], t['dtype'])
else: # scalar
self.input = Tensor([], t['type'])
if op == "to":
# the output dtype is unknown
self.output = self.input
else:
self.output = Tensor(self.input.shape, op)
def params(self):
return str(self.input)
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def flops(self):
return 0
def bytes(self):
return self.input.bytes + self.output.bytes
| PyProf-master | pyprof/prof/convert.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
class Utility(object):
@staticmethod
def numElems(shape):
assert (type(shape) == tuple)
return reduce(lambda x, y: x * y, shape, 1)
@staticmethod
def typeToBytes(t):
if (t in ["uint8", "int8", "byte", "char", "bool"]):
return 1
elif (t in ["float16", "half", "int16", "short"]):
return 2
elif (t in ["float32", "float", "int32", "int"]):
return 4
elif (t in ["int64", "long", "float64", "double"]):
return 8
assert False
@staticmethod
def typeToString(t):
if (t in ["uint8", "byte", "char"]):
return "uint8"
elif (t in [
"int8",
]):
return "int8"
elif (t in [
"int16",
"short",
]):
return "int16"
elif (t in ["float16", "half"]):
return "fp16"
elif (t in ["float32", "float"]):
return "fp32"
elif (t in [
"int32",
"int",
]):
return "int32"
elif (t in ["int64", "long"]):
return "int64"
elif (t in [
"float64",
"double",
]):
return "fp64"
elif (t in [
"bool",
]):
return "bool"
assert False
@staticmethod
def hasNVTX(marker):
if type(marker) is str:
try:
marker = eval(marker)
except:
return False
if type(marker) is dict:
keys = marker.keys()
return ("mod" in keys) and ("op" in keys) and ("args" in keys)
else:
return False
@staticmethod
def isscalar(t):
return (t in ["float", "int"])
| PyProf-master | pyprof/prof/utility.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
# Work in progress.
#poolFuncs = ["max_pool2d_with_indices_forward", "max_pool2d_with_indices"]
class MaxPool2d(object):
def parse(marker):
def convert2Tuple(arg):
assert (arg['type'] in ["int", "tuple"])
if arg['type'] == "int":
return (arg['value'], arg['value'])
else:
return arg['value']
mod = marker['mod']
op = marker['op']
args = marker['args']
assert (mod == "torch.nn.functional")
assert (op == "max_pool2d")
assert (len(args) >= 2)
#input
assert (args[0]['name'] == "")
inp = args[0]
assert (inp['type'] == "tensor")
i = inp['shape']
t = inp['dtype']
assert (len(i) == 4) #nchw tensor
#kernel
if (args[1]['name'] == ""):
k = args[1]
else:
k = list(filter(lambda x: x['name'] == "kernel_size", args))[0]
k = convert2Tuple(k)
#stride
s = k #default value
if ((len(args) >= 3) and args[2] == ""):
s = args[2]
s = convert2Tuple(s)
elif any(x['name'] == "stride" for x in args):
s = list(filter(lambda x: x['name'] == "stride", args))[0]
s = convert2Tuple(s)
#padding
p = (0, 0)
if ((len(args) >= 4) and args[3] == ""):
p = args[3]
p = convert2Tuple(p)
elif any(x['name'] == "padding" for x in args):
p = list(filter(lambda x: x['name'] == "padding", args))[0]
p = convert2Tuple(p)
params = OrderedDict([('T', i), ('K', k), ('s', s), ('p', p), ('type', t)])
return params
| PyProf-master | pyprof/prof/pooling.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| PyProf-master | pyprof/prof/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class RandPerm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch")
assert (op == "randperm")
assert (len(args) == 1)
n = args[0]
assert n['type'] == "int"
self.n = n['value']
def params(self):
p = OrderedDict([('N', self.n)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return self.n * Utility.typeToBytes("int64")
def flops(self):
# Depends on RNG but this is probably a reasonable assumption.
return self.n * 3
| PyProf-master | pyprof/prof/randomSample.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aditya Agrawal.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import numpy as np
from .dtype import Dtype
class Tensor(object):
def __init__(self, shape, dtype):
assert type(shape) in [tuple, list]
assert dtype in Dtype.types()
self._shape = list(shape)
self._dtype = dtype
def __str__(self):
t = Dtype(self.dtype)
return str(self.shape).replace(" ", "") + str(t)
@property
def ndim(self):
# can be 0 for scalars
return len(self._shape)
@property
def shape(self):
# can be () for scalars
return self._shape
@property
def size(self):
# number of elements
return reduce(lambda x, y: x * y, self.shape, 1)
@property
def dtype(self):
return self._dtype
@property
def itemsize(self):
return Dtype(self.dtype).itemsize
@property
def bytes(self):
return self.size * self.itemsize
@staticmethod
def broadcast(tensors):
r'''
The input is a list of Tensors.
The output is a Tensor.
'''
assert len(tensors) > 1
shape = tensors[0].shape
# TODO: Assume the output dtype is the same as the first arg
dt = tensors[0].dtype
# Check if shapes are different
if any(t.shape != shape for t in tensors):
x = [np.empty(t.shape, t.dtype) for t in tensors]
try:
out = np.broadcast(*x)
except:
assert False # not broadcastable
return Tensor(out.shape, dt)
else:
return Tensor(shape, dt)
def main():
for shape in [(), (1,), (3,7), (3,7,11)]:
for dt in Dtype.types():
t = Tensor(shape, dt)
print(t.ndim, str(t.shape).replace(" ", ""), \
t.size, t.dtype, t.itemsize, t.bytes, t)
# Broadcast test
a = Tensor([1,3], "int")
b = Tensor([3,1], "float")
c = Tensor([1,3], "float64")
d = np.ones([], "float64")
out = Tensor.broadcast([a,b,c,d])
print(out.shape)
if __name__ == '__main__':
main()
| PyProf-master | pyprof/prof/tensor.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
#TODO: Add support for additional loss functions.
class MSELoss(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch.nn.functional")
assert (op == "mse_loss")
assert (len(args) == 3)
#Get input, target and reduction
if (args[0]['name'] == ""):
x = args[0]
else:
x = list(filter(lambda x: x['name'] == "input", args))[0]
if (args[1]['name'] == ""):
y = args[1]
else:
y = list(filter(lambda x: x['name'] == "target", args))[0]
if (args[2]['name'] == ""):
r = args[2]
else:
r = list(filter(lambda x: x['name'] == "reduction", args))[0]
assert (x['type'] == y['type'] == "tensor")
assert (x['shape'] == y['shape'])
assert (x['dtype'] == y['dtype'])
assert (r['type'] == "str")
assert (r['value'] in ["none", "mean", "sum"])
self.shape = x['shape']
self.type = x['dtype']
self.red = r['value']
self.dir = d.dir
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type), ('red', self.red)])
return p
def elems(self):
red = self.red
e = Utility.numElems(self.shape)
if self.dir == "fprop":
if red == "none":
e *= 3
else:
e *= 2
else:
if red == "none":
e *= 4
else:
e *= 3
return e
def bytes(self):
return self.elems() * Utility.typeToBytes(self.type)
def flops(self):
return self.elems() * 2 + 1
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/loss.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class Activation(OperatorLayerBase):
"""
This class handles the various activation functions.
"""
ops = [
"celu", "elu", "elu_", "hardshrink", "hardtanh", "hardtanh_", "leaky_relu", "leaky_relu_", "logsigmoid",
"prelu", "relu", "relu_", "relu6", "rrelu", "rrelu_", "selu", "sigmoid", "softplus", "softshrink", "softsign",
"tanh", "tanhshrink", "threshold", "threshold_"
]
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod in ["torch.nn.functional", "torch", "Tensor"])
#Filter out named parameters
args = list(filter(lambda x: x['name'] == '', args))
assert (len(args) >= 1)
arg = args[0]
assert (arg['type'] == "tensor")
self.input = Tensor(arg['shape'], arg['dtype'])
self.dir = d.dir
def params(self):
return str(self.input)
def flops(self):
# TODO: revise based on op
return self.input.size
def bytes(self):
# TODO: revise based on op
direction = self.dir
b = self.input.bytes
# fprop is 1 read, 1 write
# bprop is 2 reads, 1 write
b *= 2 if direction == "fprop" else 3
return b
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/activation.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .tc import TC_Whitelist
from .utility import Utility
from .base import OperatorLayerBase
import numpy as np
class Addmm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in [
"torch",
"Tensor",
])
assert (op in [
"addmm",
"addmm_",
])
#Get alpha and beta
alpha = 1
beta = 1
if any(x['name'] == 'alpha' for x in args):
alpha = list(filter(lambda x: x['name'] == "alpha", args))[0]
alpha = alpha['value']
if any(x['name'] == 'beta' for x in args):
beta = list(filter(lambda x: x['name'] == "beta", args))[0]
beta = beta['value']
self.alpha = alpha
self.beta = beta
#Filter out named parameters
args = list(filter(lambda x: x['name'] == '', args))
assert (len(args) == 3)
C, A, B = args
m, k1 = A['shape']
k2, n = B['shape']
assert (k1 == k2)
t1 = A['dtype']
t2 = B['dtype']
t3 = C['dtype']
assert (t1 == t2 == t3)
self.A = A
self.B = B
self.C = C
self.m = m
self.n = n
self.k = k1
self.type = t1
self.name = d.name
return
def tc(self):
if self.name in TC_Whitelist():
return 1
return 0
def bytes(self):
m, n, k = self.m, self.n, self.k
return Utility.typeToBytes(self.type) * (m * n + m * k + n * k)
def flops(self):
return self.m * self.n * self.k * 2
def op(self):
return self.op_
def mod(self):
return self.mod_
def params(self):
p = OrderedDict([('M', self.n), ('N', self.m), ('K', self.k), ('type', self.type)])
return p
class Bmm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch") and (op == "bmm")
#Filter out named params (kwargs)
args = list(filter(lambda x: x['name'] == "", args))
assert (len(args) == 2)
A, B = args
b1, m, k1 = A['shape']
b2, k2, n = B['shape']
assert (b1 == b2)
assert (k1 == k2)
t1 = A['dtype']
t2 = B['dtype']
assert (t1 == t2)
self.A = A
self.B = B
self.b = b1
self.m = m
self.n = n
self.k = k1
self.type = t1
self.name = d.name
def tc(self):
if self.name in TC_Whitelist():
return 1
return 0
def params(self):
#p = OrderedDict([('A', A['shape']), ('B', B['shape']), ('type', t1)])
p = OrderedDict([('B', self.b), ('M', self.n), ('N', self.m), ('K', self.k), ('type', self.type)])
return p
def flops(self):
return self.b * self.m * self.n * self.k * 2
def bytes(self):
b, m, n, k = self.b, self.m, self.n, self.k
return Utility.typeToBytes(self.type) * b * (m * n + m * k + n * k)
def op(self):
return self.op_
def mod(self):
return self.mod_
class Matmul(OperatorLayerBase):
NON_GEMM = ["kernelPointwiseApply2", "reduce_1Block_kernel", "elementwise_kernel"]
NON_TC = NON_GEMM + ["dot_kernel"]
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.name = d.name
self.sub = d.sub
assert ((mod == "torch") and (op == "matmul")) or ((mod == "Tensor") and (op == "__matmul__"))
assert (len(args) == 2)
assert any([x in d.name for x in Matmul.NON_TC + ["gemm", "gemv"]])
A, B = args
t1 = A['dtype']
t2 = B['dtype']
assert (t1 == t2)
A = A['shape']
B = B['shape']
self.A = A
self.B = B
self.type = t1
# batch, MNK
if (len(A) == 1) and (len(B) == 1):
#dot product
assert (A[0] == B[0])
self.b = (1, )
self.m = 1
self.n = 1
self.k = A[0]
elif (len(A) == 2) and (len(B) == 2):
#gemm
m, k1 = A
k2, n = B
assert (k1 == k2)
self.b = (1, )
self.m = m
self.n = n
self.k = k1
elif (len(A) == 1) and (len(B) == 2):
#vector matrix
k1 = A[0]
k2, n = B
assert (k1 == k2)
self.b = (1, )
self.m = 1
self.n = n
self.k = k1
elif (len(A) == 2) and (len(B) == 1):
#gemv
m, k1 = A
k2 = B[0]
assert (k1 == k2)
self.b = (1, )
self.m = m
self.n = 1
self.k = k1
elif (len(A) == 1) and (len(B) > 2):
assert (A[0] == B[-2])
self.b = B[0:-2]
self.m = 1
self.n = B[-1]
self.k = B[-2]
elif (len(B) == 1) and (len(A) > 2):
assert (B[0] == A[-1])
self.b = A[0:-2]
self.m = A[-2]
self.n = 1
self.k = A[-1]
else:
assert (len(A) >= 2)
assert (len(B) >= 2)
assert (A[-1] == B[-2])
self.m = A[-2]
self.n = B[-1]
self.k = A[-1]
aa = np.empty(A[0:-2])
bb = np.empty(B[0:-2])
self.b = np.broadcast(aa, bb).shape
def params(self):
return OrderedDict([('A', self.A), ('B', self.B), ('type', self.type)])
def tc(self):
if self.name in Matmul.NON_TC:
return "-"
else:
if self.name in TC_Whitelist():
return 1
return 0
def bytes(self):
# TODO: check bytes for non-GEMM cases
if self.name in Matmul.NON_GEMM:
return 2 * Utility.typeToBytes(self.type) * Utility.numElems(self.A) #could be B as well
else:
m, n, k = self.m, self.n, self.k
return Utility.typeToBytes(self.type) * (m * n + m * k + n * k)
def flops(self):
# TODO: calculate actual FLOPs. At least we're not saying it's GEMM FLOPs for now.
if self.name in Matmul.NON_GEMM:
return 0
else:
return Utility.numElems(self.b) * self.m * self.n * self.k * 2
def op(self):
return self.op_
def mod(self):
return self.mod_
class Mm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch") and (op == "mm")
assert (len(args) == 2)
A, B = args
m, k1 = A['shape']
k2, n = B['shape']
assert (k1 == k2)
t1 = A['dtype']
t2 = B['dtype']
assert (t1 == t2)
self.A = A
self.B = B
self.m = m
self.n = n
self.k = k1
self.type = t1
self.name = d.name
return
def params(self):
p = OrderedDict([('M', self.n), ('N', self.m), ('K', self.k), ('type', self.type)])
return p
def tc(self):
if self.name in TC_Whitelist():
return 1
return 0
def bytes(self):
m, n, k = self.m, self.n, self.k
return Utility.typeToBytes(self.type) * (m * n + m * k + n * k)
def flops(self):
return self.m * self.n * self.k * 2
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/blas.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
import numpy as np
from .base import OperatorLayerBase
from .tensor import Tensor
from functools import reduce
import operator
class Cat(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod == "torch")
assert (op == "cat")
assert (len(args) >= 2)
dtype = args[0]['dtype']
tensors = []
# Get all tensor arguments
args = filter(lambda x: x['type'] == "tensor", args)
for arg in args:
assert (arg['dtype'] == dtype)
t = Tensor(arg['shape'], dtype)
tensors.append(t)
self.input = tensors
self.sub = d.sub
def params(self):
return ";".join([str(t) for t in self.input])
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
# 1 read, 1 write
b = 2 * reduce(operator.add, [t.bytes for t in self.input])
return b if (self.sub == 0) else 0
class Reshape(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "reshape")
#Temporarily commenting three lines
#assert (len(args) == 2)
#t,s = args
#assert s['type'] == "tuple"
t = args[0]
assert t['type'] == "tensor"
self.type = t['dtype']
self.shape = t['shape']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return 0
class Gather(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor") or (mod == "torch")
assert (op == "gather")
#Filter out the "out" parameter
args = list(filter(lambda x: x['name'] != 'out', args))
assert (len(args) == 3)
#Get input
if (args[0]['name'] == ""):
arg = args[0]
else:
arg = list(filter(lambda x: x['name'] == "input", args))[0]
assert (arg['type'] == "tensor")
self.shape = arg['shape']
self.type = arg['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
return 2 * Utility.numElems(self.shape) * Utility.typeToBytes(self.type)
class MaskedScatter(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "masked_scatter_")
assert (len(args) == 3)
dst, mask, src = args
assert (dst['type'] == mask['type'] == src['type'] == "tensor")
assert (mask['dtype'] == "uint8")
assert (dst['dtype'] == src['dtype'])
assert (dst['shape'] == mask['shape'])
self.shape = dst['shape']
self.type = dst['dtype']
self.seqId = d.seqId
self.sub = d.sub
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
elems = Utility.numElems(self.shape)
#src and dst
b = 2 * elems * Utility.typeToBytes(self.type)
#mask (uint8)
b += elems
if (self.sub > 0):
b = 0
return b
class Nonzero(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod in ["torch", "Tensor"])
assert (op == "nonzero")
assert (len(args) == 1)
arg = args[0]
self.shape = arg['shape']
self.type = arg['dtype']
self.seqId = d.seqId
self.sub = d.sub
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
elems = Utility.numElems(self.shape)
dim = len(self.shape)
#input tensor
b = elems * Utility.typeToBytes(self.type)
#in the worst case, the output is a (elems x dim) tensor of type "long"
b += elems * dim * Utility.typeToBytes("int64")
if self.sub > 0:
return 0
else:
return b
class IndexSelect(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor") or (mod == "torch")
assert (op == "index_select")
#Filter out the "out" parameter
args = list(filter(lambda x: x['name'] != 'out', args))
assert (len(args) == 3)
#Get input, dim and index
if (args[0]['name'] == ""):
t = args[0]
else:
t = list(filter(lambda x: x['name'] == "input", args))[0]
if (args[1]['name'] == ""):
d = args[1]
else:
d = list(filter(lambda x: x['name'] == "dim", args))[0]
if (args[2]['name'] == ""):
i = args[2]
else:
i = list(filter(lambda x: x['name'] == "index", args))[0]
assert (t['type'] == i['type'] == "tensor")
assert (d['type'] == "int")
assert (i['dtype'] == "int64")
assert (len(i['shape']) == 1)
shape = t['shape']
dim = d['value']
indices = i['shape'][0]
assert (dim < len(shape))
self.shape = shape
self.dim = dim
self.indices = indices
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('D', self.dim), ('I', self.indices), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
return 0
def bytes(self):
#determine the shape of the output tensor
shape = list(self.shape)
shape[self.dim] = self.indices
shape = tuple(shape)
b = 0
#time to read the input and write the output
elems = Utility.numElems(shape)
b += 2 * elems * Utility.typeToBytes(self.type)
#time to read the indices
b += self.indices * Utility.typeToBytes("int64")
return b
class MaskedSelect(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.sub = d.sub
assert (mod == "Tensor") or (mod == "torch")
assert (op == "masked_select")
#Filter out the "out" parameter
args = list(filter(lambda x: x['name'] != 'out', args))
assert (len(args) == 2)
#Get input and mask
if (args[0]['name'] == ""):
t = args[0]
else:
t = list(filter(lambda x: x['name'] == "input", args))[0]
if (args[1]['name'] == ""):
m = args[1]
else:
m = list(filter(lambda x: x['name'] == "mask", args))[0]
assert (m['dtype'] == "uint8")
tensor = t['shape']
mask = m['shape']
#check for broadcast condition
if (tensor != mask):
array1 = np.empty(list(tensor))
array2 = np.empty(list(mask))
try:
out = np.broadcast(array1, array2).shape
except:
assert False
self.tshape = tensor
self.mshape = mask
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.tshape), ('M', self.mshape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
tensor = self.tshape
mask = self.mshape
t = self.type
#in the worst case, #output elements = #input elements
b = 2 * Utility.numElems(tensor) * Utility.typeToBytes(t)
#mask tensor (assuming uint8)
b += Utility.numElems(mask)
return b
def flops(self):
return 0
| PyProf-master | pyprof/prof/index_slice_join_mutate.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .base import OperatorLayerBase
from .tensor import Tensor
from functools import reduce
import operator
class Pointwise(OperatorLayerBase):
# TODO: Add more operators.
# TODO: Determining the output dtype is tricky.
# TODO: Refine calculations based on direction.
# TODO: Refine calculations for non-arithmetic ops.
# Unary
unary = ["abs", "abs_", "neg", "neg_", "reciprocal", "reciprocal_"]
unary += ["__abs__", "__neg__"]
# Unary bitwise
unary += ["__invert__"]
# Exponential and log (unary)
exp_log = ["exp", "exp_", "exp1m", "exp1m_", "log", "log_",
"log10", "log10_", "log1p", "log1p_", "log2", "log2_"]
# Sqrt (unary)
sqrt = ["rsqrt", "rsqrt_", "sqrt", "sqrt_"]
# Representation (unary)
representation = ["ceil", "ceil_", "clamp", "clamp_", "floor", "floor_",
"frac", "frac_", "round", "round_", "sign", "sign_",
"trunc", "trunc_"]
# Trigonometric and transcendental (unary)
trig_trans = ["acos", "acos_", "asin", "asin_", "atan", "atan_",
"atan2", "atan2_", "cos", "cos_", "cosh", "cosh_",
"sin", "sin_", "sinh", "sinh_", "tan", "tan_",
"sigmoid", "sigmoid_", "tanh", "tanh_"]
# Error (unary)
error = ["erf", "erf_", "erfc", "erfc_", "erfinv", "erfinv_"]
# Binary
binary = ["add", "add_", "div", "div_", "mul", "mul_",
"remainder", "remainder_", "sub", "sub_"]
binary += ["__add__", "__sub__", "__mul__", "__floordiv__",
"__truediv__", "__mod__"]
binary += ["__radd__", "__rsub__", "__rmul__", "__rdiv__",
"__rtruediv__", "__rfloordiv__"]
binary += ["fmod", "fmod_"]
# Binary inplace
ibinary = ["__iadd__", "__isub__", "__imul__", "__itruediv__"]
# Power (binary)
power = ["pow", "pow_", "__pow__", "__rpow__"]
# Comparison (binary)
comp = ["lt", "lt_", "gt", "gt_", "ge", "ge_", "le", "le_",
"eq", "eq_", "ne", "ne_"]
comp += ["__lt__", "__gt__", "__ge__", "__le__", "__eq__", "__ne__"]
# Logical (binary)
logical = ["__and__", "__or__", "__xor__", "__lshift__", "__rshift__"]
# Logical inplace (binary)
ilogical = ["__iand__", "__ior__", "__ixor__", "__ilshift__", "__irshift__"]
# Ternary
ternary = ["addcdiv", "addcdiv_", "addcmul", "addcmul_"]
# Misc
misc = ["digamma", "lerp", "lerp_", "mvlgamma"]
ops = unary + binary + ibinary + comp + logical + ilogical + \
ternary + exp_log + power + sqrt + representation + trig_trans + \
error + misc
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.dir = d.dir
assert (d.dir in ["fprop", "bprop"])
assert (op in Pointwise.ops)
# Filter out all named parameters (kwargs).
# This might require revisiting in future.
args = list(filter(lambda x: x['name'] == "", args))
# Filter out non tensors
#args = list(filter(lambda x: x['type'] == "tensor", args))
assert (len(args) <= 4)
self.input = []
for arg in args:
t = arg['type']
if (t == "tensor"):
tensor = Tensor(arg['shape'], arg['dtype'])
elif t in ['float', 'int']:
tensor = Tensor([], t)
else:
assert False
self.input.append(tensor)
def params(self):
return ";".join([str(t) for t in self.input])
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes_flops(self):
b = f = 0
# Unary
if self.op() in Pointwise.unary + Pointwise.representation:
assert (len(self.input) == 1)
b = 2 * self.input[0].bytes
f = self.input[0].size
elif self.op() in Pointwise.exp_log + Pointwise.trig_trans + \
Pointwise.sqrt + Pointwise.error:
assert (len(self.input) == 1)
b = 2 * self.input[0].bytes
f = self.input[0].size * 20 # estimate
# Binary
elif self.op() in Pointwise.comp + \
Pointwise.binary + Pointwise.ibinary + \
Pointwise.logical + Pointwise.ilogical:
assert (len(self.input) == 2)
out = Tensor.broadcast(self.input)
if self.dir == "fprop":
b = reduce(operator.add, [t.bytes for t in self.input])
# The output of comparison is bool
if self.op() in Pointwise.comp:
out = Tensor(out.shape, "bool")
b += out.bytes
f = out.size
else:
if (self.op() in ["add", "__add__", "sub", "__sub__", "__isub__"]):
b = 2 * out.bytes
f = 0
elif (self.op() in ["__mul__", "__imul__", "__rmul__", "div", "__truediv__"]):
b = 3 * out.bytes
f = out.size
else:
e = f'{self.op()} bprop not supported yet. Please file a bug.'
assert False, e
elif self.op() in Pointwise.power:
assert (len(self.input) == 2)
out = Tensor.broadcast(self.input)
b = reduce(operator.add, [t.bytes for t in self.input])
b += out.bytes
f = out.size * 20 # estimate
# Ternary
elif self.op() in Pointwise.ternary:
# Remove scalars
tensors = list(filter(lambda x: x.shape != [], self.input))
assert len(tensors) == 3
out = Tensor.broadcast(tensors)
b = reduce(operator.add, [t.bytes for t in tensors])
b += out.bytes
f = 3 * out.size
else:
e = f'{self.op()} not supported yet. Please file a bug.'
assert False, e
return b, f
def bytes(self):
b, f = self.bytes_flops()
return b
def flops(self):
b, f = self.bytes_flops()
return f
| PyProf-master | pyprof/prof/pointwise.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class Dropout(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch.nn.functional")
assert (op == "dropout")
self.inp = Tensor(args[0]['shape'], args[0]['dtype'])
self.dir = d.dir
return
def params(self):
return str(self.inp)
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def bytes(self):
#Ignoring the cost of writing and reading the mask
return self.inp.bytes * 2
def flops(self):
# Note: This is approximate and depends on the RNG
return 5 * self.inp.size
| PyProf-master | pyprof/prof/dropout.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .tc import TC_Whitelist
from .utility import Utility
from .base import OperatorLayerBase
class Conv(OperatorLayerBase):
"""
# N = batch size
# C,H,W = input channels, height, width
# K,P,Q = output channels, height, width
# R,S = filter height, width
# g = groups
"""
#todo: refine winograd and FFT
convAuxList = [
"nchwToNhwc",
"nhwcToNchw",
"OffsetsKernel",
]
winoAuxList = ["generateWinogradTilesKernel", "winogradWgradData", "winogradWgradOutput", "winogradWgradDelta"]
fftAuxList = ["compute_gemm_pointers", "flip_filter", "fft2d_r2c_", "fft2d_c2r_", "fft1d_r2c", "fft1d_c2r"]
miscAuxList = [
"scaleTensor_kernel",
]
convList = [
"_s884cudnn_", "_s1688cudnn_", "_scudnn_", "2d_grouped_direct_kernel", "cudnn::detail::implicit_convolve_sgemm",
"cudnn::detail::dgrad2d_alg1_1", "cudnn::detail::wgrad_alg0_engine", "cudnn::detail::dgrad_engine",
"dgrad_1x1_stride_2x2", "spatialDepthwiseConvolutionUpdateOutput"
]
winoList = ["winograd3x3Kernel", "_sgemm_"]
fftList = [
"fermiPlusCgemmLDS128_batched",
"_gcgemm_",
]
miscList = []
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.dir = d.dir
self.name = d.name
self.sub = d.sub
assert (mod == "torch.nn.functional")
assert (op in ["conv1d", "conv2d"])
length = len(args)
assert (length >= 2) and (length <= 7)
i, w = args[0], args[1]
assert (i['type'] == "tensor")
assert (w['type'] == "tensor")
#ignore bias
if (length >= 4) and (args[3]['name'] == ""):
s = args[3]
elif any(x['name'] == 'stride' for x in args):
s = list(filter(lambda x: x['name'] == 'stride', args))[0]
else:
s = {'name': 'stride', 'type': 'int', 'value': 1}
if (length >= 5) and (args[4]['name'] == ""):
p = args[4]
elif any(x['name'] == 'padding' for x in args):
p = list(filter(lambda x: x['name'] == 'padding', args))[0]
else:
p = {'name': 'padding', 'type': 'int', 'value': 0}
if (length >= 6) and (args[5]['name'] == ""):
d = args[5]
elif any(x['name'] == 'dilation' for x in args):
d = list(filter(lambda x: x['name'] == 'dilation', args))[0]
else:
d = {'name': 'dilation', 'type': 'int', 'value': 1}
if (length == 7) and (args[6]['name'] == ""):
g = args[6]
elif any(x['name'] == 'groups' for x in args):
g = list(filter(lambda x: x['name'] == 'groups', args))[0]
else:
g = {'name': 'groups', 'type': 'int', 'value': 1}
if op == "conv1d":
assert (len(i['shape']) == 3)
assert (len(w['shape']) == 3)
assert (i['dtype'] == w['dtype'])
N, C1, W = i['shape']
K, C2, S = w['shape']
assert (C1 == C2)
p = p['value'] if Utility.isscalar(p['type']) else p['value'][0]
s = s['value'] if Utility.isscalar(s['type']) else s['value'][0]
d = d['value'] if Utility.isscalar(d['type']) else d['value'][0]
g = g['value']
assert (g == 1)
H = 1
R = 1
P = 1 + (H - (((R - 1)) + 1))
Q = 1 + (W + 2 * p - (((S - 1) * d) + 1)) / s
P = int(P)
Q = int(Q)
if (H == 1):
assert (P == 1)
if (W == 1):
assert (Q == 1)
self.N = N
self.C = C1
self.H = H
self.W = W
self.K = K
self.P = P
self.Q = Q
self.R = R
self.S = S
self.ph = 0
self.pw = p
self.U = 1
self.V = s
self.dh = 1
self.dw = d
self.g = g
self.type = i['dtype']
elif op == "conv2d":
assert (len(i['shape']) == 4)
assert (len(w['shape']) == 4)
assert (i['dtype'] == w['dtype'])
N, C1, H, W = i['shape']
K, C2, R, S = w['shape']
if Utility.isscalar(p['type']):
ph = pw = p['value']
else:
assert (p['type'] == "tuple")
ph, pw = p['value']
if Utility.isscalar(s['type']):
sh = sw = s['value']
else:
assert (s['type'] == "tuple")
sh, sw = s['value']
if Utility.isscalar(d['type']):
dh = dw = d['value']
else:
assert (d['type'] == "tuple")
dh, dw = d['value']
g = g['value']
assert (g >= 1)
assert (C1 == C2 * g)
P = 1 + (H + 2 * ph - (((R - 1) * dh) + 1)) / sh
Q = 1 + (W + 2 * pw - (((S - 1) * dw) + 1)) / sw
P = int(P)
Q = int(Q)
if (H == 1):
assert (P == 1)
if (W == 1):
assert (Q == 1)
self.N = N
self.C = C1
self.H = H
self.W = W
self.K = K
self.P = P
self.Q = Q
self.R = R
self.S = S
self.ph = ph
self.pw = pw
self.U = sh
self.V = sw
self.dh = dh
self.dw = dw
self.g = g
self.type = i['dtype']
else:
assert False
def params(self):
p = OrderedDict(
[
('N', self.N), ('C', self.C), ('H', self.H), ('W', self.W), ('K', self.K), ('P', self.P), ('Q', self.Q),
('R', self.R), ('S', self.S), ('ph', self.ph), ('pw', self.pw), ('U', self.U), ('V', self.V),
('dh', self.dh), ('dw', self.dw), ('g', self.g), ('type', self.type)
]
)
return p
def conv_bytes_flops(self, N, C, H, W, K, P, Q, R, S, g, t):
f = 2 * N * K * P * Q * C * R * S / g #for fprop
elems = N * C * H * W + K * C * R * S / g + N * K * P * Q
b = elems * Utility.typeToBytes(t)
return b, f
def bytes_flops(self):
N, C, H, W, K, P, Q, R, S, ph, pw, U, V, dh, dw, g, t = self.params().values()
if any(x in self.name for x in Conv.convAuxList + Conv.winoAuxList + Conv.fftAuxList + Conv.miscAuxList):
bytes, flops = [0, 0]
elif any(x in self.name for x in Conv.convList + Conv.winoList + Conv.fftList + Conv.miscList):
if g == 1:
bytes, flops = self.conv_bytes_flops(N, C, H, W, K, P, Q, R, S, g, t)
else:
if "2d_grouped_direct_kernel" in self.name: #only 1 kernel is called
bytes, flops = self.conv_bytes_flops(N, C, H, W, K, P, Q, R, S, g, t)
elif "spatialDepthwiseConvolutionUpdateOutput" in self.name: #one kernel for separable conv
bytes, flops = self.conv_bytes_flops(N, C, H, W, K, P, Q, R, S, g, t)
else: #a kernel per group is called
bytes, flops = self.conv_bytes_flops(N, C / g, H, W, K / g, P, Q, R, S, 1, t)
elif ("calc_bias_diff" in self.name): #bias gradient
elems = N * K * P * Q
flops = elems
bytes = 2 * elems * Utility.typeToBytes(t)
#params = OrderedDict([('N',N), ('K',K), ('P',P), ('Q',Q), ('type', t)])
else:
bytes, flops = [0, 0]
return bytes, flops
def bytes(self):
b, _ = self.bytes_flops()
return b
def flops(self):
_, f = self.bytes_flops()
return f
def tc(self):
if self.name in TC_Whitelist():
return 1
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/conv.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .tc import TC_Whitelist
from .utility import Utility
from .base import OperatorLayerBase
def hasTileSize(name):
if ("sgemm" in name) or ("884gemm" in name) or ("hgemm" in name):
return True
else:
return False
def ctaTile(name):
name = name.split("_")
name = list(filter(lambda x: "x" in x, name))
name = list(filter(lambda x: "slice" not in x, name))
assert (len(name) == 1)
name = name[0].split("x")
assert (len(name) == 2)
name = list(map(int, name))
return name[0], name[1]
class RNNCell(OperatorLayerBase):
"""
This class supports RNNCell, LSTMCell and GRUCell.
"""
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.name = d.name
self.dir = d.dir
self.sub = d.sub
self.grid = d.grid
assert (op == "forward")
assert (mod in ["LSTMCell", "GRUCell", "RNNCell"])
assert (len(args) in [2, 3])
x, h = args[0], args[1]
b1, ii = x['shape']
b2, hh = h['shape']
assert b1 == b2
assert x['dtype'] == h['dtype']
t = x['dtype']
self.cell = mod
self.inp = ii
self.hid = hh
self.b = b1
self.type = t
self.multiple = 1
if self.cell == "LSTMCell":
self.multiple = 4
elif self.cell == "GRUCell":
self.multiple = 3
self.gemm = None
self.m = None
self.n = None
self.k = None
self.elems = 0
self.bar()
def params(self):
if self.gemm is None:
p = OrderedDict([('cell', self.cell), ('X', self.inp), ('H', self.hid), ('B', self.b), ('type', self.type)])
else:
assert self.m is not None
assert self.n is not None
assert self.k is not None
p = OrderedDict([('gemm', self.gemm), ('M', self.m), ('N', self.n), ('K', self.k), ('type', self.type)])
return p
def tc(self):
if "gemm" in self.name:
if self.name in TC_Whitelist():
return 1
return 0
else:
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def bytes(self):
if self.gemm is not None:
m, n, k, t = self.m, self.n, self.k, self.type
b = (m * k + k * n + m * n) * Utility.typeToBytes(t)
elif self.elems != 0:
b = self.elems * Utility.typeToBytes(self.type)
else:
b = 0
return b
def flops(self):
if self.gemm is not None:
m, n, k = self.m, self.n, self.k
f = 2 * m * n * k
elif self.elems != 0:
f = 0 #TODO
else:
f = 0
return f
def bar(self):
cell = self.cell
X = self.inp
H = self.hid
B = self.b
t = self.type
subseqId = self.sub
direc = self.dir
name = self.name
grid = self.grid
multiple = self.multiple
if direc == "fprop":
subseqId = subseqId % 3
if subseqId == 0: #layer gemm
self.gemm = "layer"
self.m = multiple * H
self.n = B
self.k = X
elif subseqId == 1: #recurrent gemm
self.gemm = "recur"
self.m = multiple * H
self.n = B
self.k = H
else:
layerGemmElems = multiple * H * B
recurGemmElems = multiple * H * B
cElems = H * B
hElems = H * B
totElems = layerGemmElems + recurGemmElems + 2 * cElems + hElems
self.elems = totElems
else:
if ("gemm" in name) and hasTileSize(name): #gemm
#Get cta tile size
tileX, tileY = ctaTile(name)
#Get grid dimensions
grid = grid.split(",")
gridX, gridY, gridZ = map(lambda x: int(x), grid)
gemmM = tileX * gridX
gemmN = tileY * gridY
if name[-3:] == "_nn": # dgrad
if (gemmM == H): # recurrent dgrad
#Ideally gemmN = B, but we have a limited set of tile sizes.
gemmN = B
gemmK = multiple * H
self.gemm = "recur"
self.m = gemmM
self.n = gemmN
self.k = gemmK
elif (gemmM == X): # layer dgrad
#assert(gemmN % B == 0)
gemmK = multiple * H
self.gemm = "layer"
self.m = gemmM
self.n = gemmN
self.k = gemmK
else:
pass
elif name[-3:] == "_nt": #wgrad
if (gemmM == H): #recurrent wgrad
assert (gemmN == multiple * H)
gemmK = B
self.gemm = "recur"
self.m = gemmM
self.n = gemmN
self.k = gemmK
elif (gemmM == X): #layer wgrad
assert (gemmN == multiple * H)
gemmK = B
self.gemm = "layer"
self.m = gemmM
self.n = gemmN
self.k = gemmK
else:
pass
else:
pass
else:
pass
return
| PyProf-master | pyprof/prof/recurrentCell.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class BatchNorm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (op == "batch_norm")
assert (len(args) >= 1)
i = args[0]
assert (i['type'] == "tensor")
self.input = Tensor(i['shape'], i['dtype'])
self.dir = d.dir
self.sub = d.sub
def params(self):
return str(self.input)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
# Variance algo-dependent, but this is a reasonable value.
return self.input.size * 8
def bytes(self):
b = self.input.bytes
# fprop is 2 reads, 2 writes
# bprop is 4 reads, 1 write
if self.dir == "fprop":
b *= 4
else:
b *= 5
if self.sub > 0:
return 0
else:
return b
| PyProf-master | pyprof/prof/normalization.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
#TODO: Add support for other optimizers.
class Adam(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.sub = d.sub
assert (op == "adam")
assert (len(args) == 12) or (len(args) == 14)
w, hw, m, v, g = args[0:5]
assert (w['shape'] == m['shape'] == v['shape'] == g['shape'])
assert (hw['shape'] == w['shape']) or (hw['shape'] == (0, )) #hw could be null
assert (w['type'] == m['type'] == v['type'] == g['type'] == hw['type'] == "tensor")
assert (w['dtype'] == m['dtype'] == v['dtype'] == "float32")
self.w = w
self.g = g
def params(self):
p = OrderedDict([('T', self.w['shape']), ('wtype', self.w['dtype']), ('gtype', self.g['dtype'])])
return p
def flops(self):
return 0
def bytes(self):
wshape = self.w['shape']
wtype = self.w['dtype']
gtype = self.g['dtype']
b = 0
elems = Utility.numElems(wshape)
#Get time to stream read/write w, m, v
b += 6 * elems * Utility.typeToBytes(wtype)
#Get time to read "g"
b += elems * Utility.typeToBytes(gtype)
if wtype != gtype: #mixed precision
#Get time to write "hw
b += elems * Utility.typeToBytes(gtype)
return b if (self.sub == 0) else 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
| PyProf-master | pyprof/prof/optim.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
def parseArgs():
"""
Print usage and parse arguments.
"""
def check_cols(value):
valid = [
"idx", "seq", "altseq", "tid", "layer", "trace", "dir", "sub", "mod", "op", "kernel", "params", "sil", "tc",
"device", "stream", "grid", "block", "flops", "bytes"
]
cols = value.split(",")
for col in cols:
if col not in valid:
raise argparse.ArgumentTypeError(
"{} is not a valid column name. Valid column names are {}.".format(col, ",".join(valid))
)
return cols
def openFile(f):
try:
d = open(f, "r")
return d
except IOError:
print("Error opening file {}. Exiting.".format(f), file=sys.stderr)
sys.exit(1)
parser = argparse.ArgumentParser(
prog=sys.argv[0], description="PyTorch Profiler", formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("file", nargs='?', type=str, default=None, help="Output of parse.py (Python dictionary).")
parser.add_argument(
"-c", type=check_cols, default="idx,dir,sub,mod,op,kernel,params,sil",
help='''Comma seperated names of columns to print.
idx: Index
seq: PyTorch Sequence Id
altseq: PyTorch Alternate Sequence Id
tid: Thread Id
layer: User annotated NVTX string (can be nested)
trace: Function Call Trace
dir: Direction
sub: Sub Sequence Id
mod: Module
op: Operattion
kernel: Kernel Name
params: Parameters
sil: Silicon Time (in ns)
tc: Tensor Core Usage
device: GPU Device Id
stream: Stream Id
grid: Grid Dimensions
block: Block Dimensions
flops: Floating point ops (FMA = 2 FLOPs)
bytes: Number of bytes in and out of DRAM
e.g. -c idx,kernel,sil'''
)
group = parser.add_mutually_exclusive_group()
group.add_argument("--csv", action="store_true", default=False, help="Print a CSV output.")
group.add_argument("-w", type=int, default=0, help="Width of columnated output.")
args = parser.parse_args()
if args.file is None:
args.file = sys.stdin
else:
args.file = openFile(args.file)
return args
| PyProf-master | pyprof/prof/usage.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import OperatorLayerBase
from .tensor import Tensor
class Softmax(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod == "torch.nn.functional")
assert (op == "softmax")
#Filter out named parameters
args = list(filter(lambda x: x['name'] == '', args))
assert (len(args) <= 2)
arg = args[0]
self.input = Tensor(arg['shape'], arg['dtype'])
self.dir = d.dir
return
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def params(self):
return str(self.input)
def flops(self):
# An approximation
# http://ai.stanford.edu/~paskin/slam/javadoc/javaslam/util/Flops.html#exp()
# TODO: consider direction
e = self.input.size
f = e * 20 # denominator, exp all elements and reduce
f += e * 20 # numerator, exp all elements and divide
return f
def bytes(self):
# TODO: verify
b = self.input.bytes
# fprop is 2 reads, 1 write
# bprop is 4 reads, 1 write
b *= 3 if self.dir == "fprop" else 5
return b
class LogSoftmax(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.mod_ = mod
self.op_ = op
assert (mod in ["torch", "Tensor", "torch.nn.functional"])
assert (op == "log_softmax")
#Filter out named parameters
args = list(filter(lambda x: x['name'] == '', args))
assert (len(args) <= 2)
#Get input
if (args[0]['name'] == ""):
i = args[0]
else:
i = list(filter(lambda x: x['name'] == "input", args))[0]
self.input = Tensor(i['shape'], i['dtype'])
self.dir = d.dir
return
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def params(self):
return str(self.input)
def flops(self):
# An approximation
# http://ai.stanford.edu/~paskin/slam/javadoc/javaslam/util/Flops.html#exp()
# TODO: consider direction
e = self.input.size
f = e * 20 # denominator, exp all elements and reduce
f += e # numerator, just a subtraction
return f
def bytes(self):
# TODO: verify
b = self.input.bytes
# fprop is 2 reads, 1 write
# bprop is 4 reads, 1 write
b *= 3 if self.dir == "fprop" else 5
return b
| PyProf-master | pyprof/prof/softmax.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .prof import main
if __name__ == '__main__':
main()
| PyProf-master | pyprof/prof/__main__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
class OperatorLayerBase(ABC):
"""
Base class for all layers and operators.
Every derived class should have the following functions.
"""
@abstractmethod
def tc(self):
"""
Tensor core usage by the kernel.
Return "1" (yes), "0" (no, but possible), "-" (not applicable)
"""
pass
@abstractmethod
def params(self):
"""
Kernel parameters to be printed.
"""
pass
@abstractmethod
def flops(self):
"""
Note that 1 FMA = 2 flops.
"""
pass
@abstractmethod
def bytes(self):
pass
@abstractmethod
def mod(self):
"""
Name of the module/class e.g. torch.nn.functional.
"""
pass
@abstractmethod
def op(self):
"""
Name of the operator e.g. sigmoid.
"""
pass
| PyProf-master | pyprof/prof/base.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utility import Utility
class Data(object):
"""
Class to store all the data for every kernel e.g. name, bytes, flops, device, stream etc.
"""
def __init__(self, kernel):
#Available from NVprof
self.tid = kernel['tid']
self.device = kernel['device']
self.stream = kernel['stream']
self.grid = str(kernel['grid']).replace(" ", "").replace("(", "").replace(")", "")
self.block = str(kernel['block']).replace(" ", "").replace("(", "").replace(")", "")
self.name = kernel['kShortName'].replace(" ", "_")
self.lName = kernel['kLongName']
self.sil = kernel['kDuration'] #units ns
self.index = None
#Markers
self.argMarker = kernel['marker']
self.modMarker = kernel['reprMarkers']
self.seqMarker = kernel['seqMarker']
self.layer = kernel['layer']
self.trace = kernel['trace']
self.seqId = kernel['seqId']
self.altSeqId = kernel['altSeqId']
self.dir = kernel['dir']
self.sub = kernel['subSeqId']
self.mod = "na"
self.op = "na"
self.params = {"na": "na"}
self.tc = "na"
self.flops = 0
self.bytes = 0
def setParams(self, params):
# TODO: Remove the else block after refactoring.
if type(params) == str:
self.params = params
else:
#Remove space from params
qaz = ""
for key, value in params.items():
if "type" not in key:
qaz += "{}={},".format(key, value)
else:
if type(value) is str:
qaz += "{},".format(Utility.typeToString(value))
else:
qaz += "{}".format(value)
self.params = qaz.replace(" ", "")
| PyProf-master | pyprof/prof/data.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno, os, sys
class Output():
"""
This class handles printing of a columed output and a CSV.
"""
# The table below is organized as
# user_option: [output_header, attribute_in_Data_class, type, min_width_in_columed_output]
table = {
"idx": ["Idx", "index", int, 7],
"seq": ["SeqId", "seqId", str, 7],
"altseq": ["AltSeqId", "altSeqId", str, 7],
"tid": ["TId", "tid", int, 12],
"layer": ["Layer", "layer", str, 10],
"trace": ["Trace", "trace", str, 25],
"dir": ["Direction", "dir", str, 5],
"sub": ["Sub", "sub", int, 3],
"mod": ["Module", "mod", str, 15],
"op": ["Op", "op", str, 15],
"kernel": ["Kernel", "name", str, 0],
"params": ["Params", "params", str, 0],
"sil": ["Sil(ns)", "sil", int, 10],
"tc": ["TC", "tc", str, 2],
"device": ["Device", "device", int, 3],
"stream": ["Stream", "stream", int, 3],
"grid": ["Grid", "grid", str, 12],
"block": ["Block", "block", str, 12],
"flops": ["FLOPs", "flops", int, 12],
"bytes": ["Bytes", "bytes", int, 12]
}
def __init__(self, args):
self.cols = args.c
self.csv = args.csv
self.col = True if (args.w > 0) else False
self.width = args.w
w = 0
for col in self.cols:
assert col in Output.table.keys()
w += Output.table[col][3]
if ((self.col) and (w > self.width)):
print("Minimum width required to print {} = {}. Exiting.".format(",".join(self.cols), w))
sys.exit(1)
remainder = self.width - w
if ("kernel" in self.cols) and ("params" in self.cols):
Output.table["kernel"][3] = int(remainder / 2)
Output.table["params"][3] = int(remainder / 2)
elif ("kernel" in self.cols):
Output.table["kernel"][3] = remainder
elif ("params" in self.cols):
Output.table["params"][3] = remainder
#header format
cadena = ""
for col in self.cols:
_, _, t, w = Output.table[col]
cadena += "%-{}.{}s ".format(w, w)
self.hFormat = cadena
#data format
cadena = ""
for col in self.cols:
_, _, t, w = Output.table[col]
if (t == str):
cadena += "%-{}.{}s ".format(w, w)
elif (t == int):
cadena += "%{}d ".format(w)
self.dFormat = cadena
def foo(self, cadena, pformat):
if self.csv:
cadena = ",".join(map(lambda x: '"' + str(x) + '"', cadena))
elif self.col:
cadena = pformat % cadena
else:
cadena = " ".join(map(str, cadena))
try:
print(cadena)
except IOError as e:
#gracefully handle pipes
if e.errno == errno.EPIPE:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(0)
else:
sys.exit(-1)
def header(self):
cadena = ()
for col in self.cols:
h = Output.table[col][0]
cadena = cadena + (h, )
self.foo(cadena, self.hFormat)
def data(self, a):
if a.dir == "":
direc = "na"
else:
direc = a.dir
if a.op == "":
op = "na"
else:
op = a.op
if a.mod == "":
mod = "na"
else:
mod = a.mod
cadena = ()
for col in self.cols:
attr = Output.table[col][1]
val = getattr(a, attr)
if col == "layer":
assert (type(val) == list)
val = ":".join(val)
val = "-" if val == "" else val
if col == "trace":
assert (type(val) == list)
if self.col and len(val):
val = val[-1]
val = val.split("/")[-1]
else:
val = ",".join(val)
val = "-" if val == "" else val
if col in ["seq", "altseq"]:
assert (type(val) == list)
val = ",".join(map(str, val))
val = "-" if val == "" else val
cadena = cadena + (val, )
self.foo(cadena, self.dFormat)
| PyProf-master | pyprof/prof/output.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda.profiler as profiler
import torch.optim as optim
import pyprof
pyprof.init()
class LeNet5(nn.Module):
def __init__(self):
super(LeNet5, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
with torch.autograd.profiler.emit_nvtx():
net = LeNet5().cuda()
input = torch.randn(1, 1, 32, 32).cuda()
out = net(input)
target = torch.randn(10) # a dummy target, for example
target = target.view(1, -1).cuda() # make it the same shape as output
criterion = nn.MSELoss()
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
profiler.start()
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
profiler.stop()
| PyProf-master | pyprof/examples/lenet.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file checks all Python operators.
"""
import sys
import torch
import torch.cuda.profiler as profiler
import operator
import inspect
#Import and initialize pyprof
import pyprof
pyprof.init()
X = 1024
Y = 1024
fa = torch.rand(X, Y).cuda()
fb = torch.rand(X, Y).cuda()
fc = torch.rand(X, Y).cuda()
ia = torch.randint(0, 100, (X, Y)).cuda()
ib = torch.randint(0, 100, (X, Y)).cuda()
sa = torch.ones(1, 1).cuda()
sb = torch.ones(1, 1).cuda()
ba = fa.byte()
unaryOps = [
"abs",
"__abs__",
"neg",
"__neg__",
]
invertOps = [
"inv",
"invert",
"__inv__",
"__invert__",
] #imlemented only for byte tensors
#pos, __pos__ is not implemented for tensors
binaryOps = []
binaryOps += ["lt", "__lt__", "le", "__le__", "eq", "__eq__", "ne", "__ne__", "ge", "__ge__", "gt", "__gt__"]
binaryOps += [
"add", "__add__", "sub", "__sub__", "mul", "__mul__", "floordiv", "__floordiv__", "truediv", "__truediv__", "pow",
"__pow__", "mod", "__mod__"
]
binaryOps += ["and_", "__and__", "or_", "__or__", "xor", "__xor__", "lshift", "__lshift__", "rshift", "__rshift__"]
inplaceOps = []
inplaceOps += [
"iadd",
"__iadd__",
"isub",
"__isub__",
"imul",
"__imul__",
"ifloordiv",
"__ifloordiv__",
"itruediv",
"__itruediv__",
"imod",
"__imod__",
]
#ipow, __ipow__ is not implemented in pytorch
inplaceOps += [
"iand",
"__iand__",
"ior",
"__ior__",
"ixor",
"__ixor__",
"ilshift",
"__ilshift__",
"irshift",
"__irshift__",
]
matmulOps = ["matmul", "__matmul__"]
inplacematmulOps = ["imatmul", "__imatmul__"]
reverseIntBinaryOps = [
"__radd__",
"__rsub__",
"__rmul__",
"__rfloordiv__",
"__rpow__",
]
reverseFloatBinaryOps = [
"__radd__",
"__rsub__",
"__rmul__",
"__rdiv__",
"__rtruediv__",
"__rfloordiv__",
"__rpow__",
]
'''
TODO
.concat(a, b)
.__concat__(a, b)
.contains(a, b)
.__contains__(a, b)
.countOf(a, b)
.delitem(a, b)
.__delitem__(a, b)
.getitem(a, b)
.__getitem__(a, b)
.indexOf(a, b)
.setitem(a, b, c)
.__setitem__(a, b, c)
.length_hint(obj, default=0)
.iconcat(a, b)
.__iconcat__(a, b)
.index(a)
.__index__(a)
'''
#Context manager
with torch.autograd.profiler.emit_nvtx():
#Start profiler
profiler.start()
for op in unaryOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
c = f(ia)
for op in invertOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
c = f(ba)
for op in binaryOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
c = f(ia, ib)
c = f(ia, 2)
for op in inplaceOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
ia = f(ia, ib)
ia = f(ia, 2)
for op in matmulOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
c = f(fa, fb)
for op in inplacematmulOps:
assert hasattr(operator, op)
f = getattr(operator, op)
assert inspect.isbuiltin(f)
fa = f(fa, fb)
for op in reverseIntBinaryOps:
assert hasattr(torch.Tensor, op)
f = getattr(torch.Tensor, op)
ia = f(ia, ib)
for op in reverseFloatBinaryOps:
assert hasattr(torch.Tensor, op)
f = getattr(torch.Tensor, op)
fa = f(fa, fb)
'''
#c = fa[3]
#c = fa[3][3]
#c = torch.min(fa, 3)
c = torch.sum(fa)
c = torch.max(fa)
c = -fa
#fc[2][2] = fa[2][2]
c = a_scalar and b_scalar
c = a_scalar or b_scalar
c = not a_scalar
c = a is b
c = a is not b
'''
#Stop profiler
profiler.stop()
| PyProf-master | pyprof/examples/operators.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This simple file provides an example of how to
- import the pyprof library and initialize it
- use the emit_nvtx context manager
- start and stop the profiler
Only kernels within profiler.start and profiler.stop calls are profiled.
To profile
$ nvprof -f -o simple.sql --profile-from-start off ./simple.py
"""
import sys
import torch
import torch.cuda.profiler as profiler
#Import and initialize pyprof
import pyprof
pyprof.init()
a = torch.randn(5, 5).cuda()
b = torch.randn(5, 5).cuda()
#Context manager
with torch.autograd.profiler.emit_nvtx():
#Start profiler
profiler.start()
c = a + b
c = torch.mul(a, b)
c = torch.matmul(a, b)
c = torch.argmax(a, dim=1)
c = torch.nn.functional.pad(a, (1, 1))
#Stop profiler
profiler.stop()
| PyProf-master | pyprof/examples/simple.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
pyprof.init()
class Foo(torch.nn.Module):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
def forward(self, input):
return self.n * input + self.m
# Hook the forward function to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x)
profiler.stop()
| PyProf-master | pyprof/examples/custom_func_module/custom_module.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
#Initialize pyprof
pyprof.init()
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, in1, in2):
out = in1 + in2 #This could be a custom C/C++ function.
return out
@staticmethod
def backward(ctx, grad):
in1_grad = grad #This could be a custom C/C++ function.
in2_grad = grad #This could be a custom C/C++ function.
return in1_grad, in2_grad
#Hook the forward and backward functions to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
pyprof.nvtx.wrap(Foo, 'backward')
foo = Foo.apply
x = torch.ones(4, 4).cuda()
y = torch.ones(4, 4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x, y)
profiler.stop()
| PyProf-master | pyprof/examples/custom_func_module/custom_function.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
def foo(x, y):
return torch.sigmoid(x) + y
x = torch.zeros(4, 4).cuda()
y = torch.ones(4, 4).cuda()
#JIT the function using tracing
#This returns an object of type ScriptModule with a forward method.
traced_foo = torch.jit.trace(foo, (x, y))
#Initialize pyprof after the JIT step
pyprof.init()
#Assign a name to the object "traced_foo"
traced_foo.__dict__['__name__'] = "foo"
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(traced_foo, 'forward')
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = traced_foo(x, y)
profiler.stop()
print(z)
| PyProf-master | pyprof/examples/jit/jit_trace_function.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
class Foo(torch.nn.Module):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
def forward(self, input):
return self.n * input + self.m
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
#JIT the class using tracing
traced_foo = torch.jit.trace(foo, x)
#Initialize pyprof after the JIT step
pyprof.init()
#Assign a name to the object "traced_foo"
traced_foo.__dict__['__name__'] = "foo"
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(traced_foo, 'forward')
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = traced_foo(x)
profiler.stop()
print(z)
| PyProf-master | pyprof/examples/jit/jit_trace_method.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
#The following creates an object "foo" of type ScriptModule
#The new object has a function called "forward"
@torch.jit.script
def foo(x, y):
return torch.sigmoid(x) + y
#Initialize pyprof after the JIT step
pyprof.init()
#Assign a name to the object "foo"
foo.__name__ = "foo"
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(foo, 'forward')
x = torch.zeros(4, 4).cuda()
y = torch.ones(4, 4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x, y)
profiler.stop()
print(z)
| PyProf-master | pyprof/examples/jit/jit_script_function.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
class Foo(torch.jit.ScriptModule):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
@torch.jit.script_method
def forward(self, input):
return self.n * input + self.m
#Initialize pyprof after the JIT step
pyprof.init()
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x)
profiler.stop()
print(z)
| PyProf-master | pyprof/examples/jit/jit_script_method.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example to run pyprof with imagenet models.
"""
import sys
import torch
import torch.nn as nn
import torchvision.models as models
import torch.cuda.profiler as profiler
import argparse
import pyprof
from apex.optimizers import FusedAdam
def parseArgs():
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Run popular imagenet models.")
parser.add_argument(
"-m", type=str, default="resnet50", choices=[
"alexnet", "densenet121", "densenet161", "densenet169", "densenet201", "googlenet", "mnasnet0_5",
"mnasnet0_75", "mnasnet1_0", "mnasnet1_3", "mobilenet_v2", "resnet18", "resnet34", "resnet50", "resnet101",
"resnet152", "resnext50_32x4d", "resnext101_32x8d", "wide_resnet50_2", "wide_resnet101_2",
"shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "shufflenet_v2_x1_5", "shufflenet_v2_x2_0", "squeezenet1_0",
"squeezenet1_1", "vgg11", "vgg11_bn", "vgg13", "vgg13_bn", "vgg16", "vgg16_bn", "vgg19", "vgg19_bn",
"inception_v3"
], help="Model."
)
parser.add_argument("-b", type=int, default=32, help="Batch size.")
parser.add_argument("-o", type=str, default="adam", choices=["adam", "sgd"], help="Optimizer.")
args = parser.parse_args()
return args
d = {
"alexnet": {
'H': 224,
'W': 224,
'opts': {}
},
"densenet121": {
'H': 224,
'W': 224,
'opts': {}
},
"densenet161": {
'H': 224,
'W': 224,
'opts': {}
},
"densenet169": {
'H': 224,
'W': 224,
'opts': {}
},
"densenet201": {
'H': 224,
'W': 224,
'opts': {}
},
"googlenet": {
'H': 224,
'W': 224,
'opts': {
'aux_logits': False
}
},
"mnasnet0_5": {
'H': 224,
'W': 224,
'opts': {}
},
"mnasnet0_75": {
'H': 224,
'W': 224,
'opts': {}
},
"mnasnet1_0": {
'H': 224,
'W': 224,
'opts': {}
},
"mnasnet1_3": {
'H': 224,
'W': 224,
'opts': {}
},
"mobilenet_v2": {
'H': 224,
'W': 224,
'opts': {}
},
"resnet18": {
'H': 224,
'W': 224,
'opts': {}
},
"resnet34": {
'H': 224,
'W': 224,
'opts': {}
},
"resnet50": {
'H': 224,
'W': 224,
'opts': {}
},
"resnet101": {
'H': 224,
'W': 224,
'opts': {}
},
"resnet152": {
'H': 224,
'W': 224,
'opts': {}
},
"resnext50_32x4d": {
'H': 224,
'W': 224,
'opts': {}
},
"resnext101_32x8d": {
'H': 224,
'W': 224,
'opts': {}
},
"wide_resnet50_2": {
'H': 224,
'W': 224,
'opts': {}
},
"wide_resnet101_2": {
'H': 224,
'W': 224,
'opts': {}
},
"shufflenet_v2_x0_5": {
'H': 224,
'W': 224,
'opts': {}
},
"shufflenet_v2_x1_0": {
'H': 224,
'W': 224,
'opts': {}
},
"shufflenet_v2_x1_5": {
'H': 224,
'W': 224,
'opts': {}
},
"shufflenet_v2_x2_0": {
'H': 224,
'W': 224,
'opts': {}
},
"squeezenet1_0": {
'H': 224,
'W': 224,
'opts': {}
},
"squeezenet1_1": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg11": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg11_bn": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg13": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg13_bn": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg16": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg16_bn": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg19": {
'H': 224,
'W': 224,
'opts': {}
},
"vgg19_bn": {
'H': 224,
'W': 224,
'opts': {}
},
"inception_v3": {
'H': 299,
'W': 299,
'opts': {
'aux_logits': False
}
},
}
def main():
args = parseArgs()
pyprof.init()
N = args.b
C = 3
H = d[args.m]['H']
W = d[args.m]['W']
opts = d[args.m]['opts']
classes = 1000
net = getattr(models, args.m)
net = net(**opts).cuda().half()
net.train()
x = torch.rand(N, C, H, W).cuda().half()
target = torch.empty(N, dtype=torch.long).random_(classes).cuda()
criterion = nn.CrossEntropyLoss().cuda()
if (args.o == "sgd"):
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
elif (args.o == "adam"):
optimizer = FusedAdam(net.parameters())
else:
assert False
#Warm up without profiler
for i in range(2):
output = net(x)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
output = net(x)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
profiler.stop()
if __name__ == "__main__":
main()
| PyProf-master | pyprof/examples/imagenet/imagenet.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An example showing use of nested NVTX markers.
"""
import torch
import torch.nn as nn
import torch.cuda.profiler as profiler
import torch.cuda.nvtx as nvtx
import pyprof
pyprof.init()
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Bottleneck(nn.Module):
expansion = 4
count = 1
def __init__(
self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.id = Bottleneck.count
Bottleneck.count += 1
def forward(self, x):
identity = x
nvtx.range_push("layer:Bottleneck_{}".format(self.id))
nvtx.range_push("layer:Conv1")
out = self.conv1(x)
nvtx.range_pop()
nvtx.range_push("layer:BN1")
out = self.bn1(out)
nvtx.range_pop()
nvtx.range_push("layer:ReLU")
out = self.relu(out)
nvtx.range_pop()
nvtx.range_push("layer:Conv2")
out = self.conv2(out)
nvtx.range_pop()
nvtx.range_push("layer:BN2")
out = self.bn2(out)
nvtx.range_pop()
nvtx.range_push("layer:ReLU")
out = self.relu(out)
nvtx.range_pop()
nvtx.range_push("layer:Conv3")
out = self.conv3(out)
nvtx.range_pop()
nvtx.range_push("layer:BN3")
out = self.bn3(out)
nvtx.range_pop()
if self.downsample is not None:
nvtx.range_push("layer:Downsample")
identity = self.downsample(x)
nvtx.range_pop()
nvtx.range_push("layer:Residual")
out += identity
nvtx.range_pop()
nvtx.range_push("layer:ReLU")
out = self.relu(out)
nvtx.range_pop()
nvtx.range_pop()
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, groups=1, width_per_group=64, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def forward(self, x):
nvtx.range_push("layer:conv1_x")
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
nvtx.range_pop()
nvtx.range_push("layer:conv2_x")
x = self.layer1(x)
nvtx.range_pop()
nvtx.range_push("layer:conv3_x")
x = self.layer2(x)
nvtx.range_pop()
nvtx.range_push("layer:conv4_x")
x = self.layer3(x)
nvtx.range_pop()
nvtx.range_push("layer:conv5_x")
x = self.layer4(x)
nvtx.range_pop()
x = self.avgpool(x)
x = torch.flatten(x, 1)
nvtx.range_push("layer:FC")
x = self.fc(x)
nvtx.range_pop()
return x
def resnet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
#Create model
net = resnet50().cuda().half()
net.train()
#Create optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
#Create synthetic input and label
x = torch.rand(32, 3, 224, 224).cuda().half()
target = torch.empty(32, dtype=torch.long).random_(1000).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
output = net(x)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
profiler.stop()
| PyProf-master | pyprof/examples/user_annotation/resnet.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Config(object):
_instance = None
# Overloading the __new__ method enables singleton behavior
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(Config, cls).__new__(cls)
cls.func_stack_enabled = kwargs.get("enable_function_stack",
False) or kwargs.get("capture_input_ops", False)
cls.capture_input_ops = kwargs.get("capture_input_ops", False)
cls.delay_graph_capture = kwargs.get("delay_graph_capture", False)
cls.debug_graph = kwargs.get("debug_graph", False)
return cls._instance
| PyProf-master | pyprof/nvtx/config.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import inspect as ins
from .config import Config
def dprint(*args):
"""
dprint()
Printf debugging messages controlled by the debug_flag.
Disabled by default.
Call debug(True) from instrumentation code to enable
debug messages generated by dprint.
"""
config = Config()
if config.debug_graph:
fn = ins.currentframe().f_back.f_code.co_name
depth = len(ins.stack())-2
print(" " * depth, f"[{fn}] {args}")
return
class DLProf(object):
_instance = None
# Overloading the __new__ method enables singleton behavior
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(DLProf, cls).__new__(cls)
cls.call_id = 0 # input op tracking identifier
cls.op_to_out_tensor_map = {} # map from tensor ptr to to call_id
cls.call_id_to_op_map = {} # map from call_id to op name
cls.patch_list = [] # used to track nested callids
# Nested dicts of this run's frame names to help uniquify them
# func_map[(partial_func_stack,frame_name)][filename+lineno] = frame_name_to_use
#
cls.func_map = {}
return cls._instance
# Return True if the name in the hierarchy should be skipped
@classmethod
def should_skip_frame_name(cls, name, prev_name):
# wrapper_func and always_benchmark_warpper:
# Are functions in this file. If there are nested monkeypatched functions
# we don't want it to show up
# name==prev_name:
# Remove back-to-back duplicates of the same function name.
# This is common during recursion
#
for prefix in ["wrapper_func", "always_benchmark_wrapper"]:
if name.startswith(prefix):
return True
if name == prev_name:
return True
return False
# Given a function stack, clean it up to remove unwanted fields as
# well as removing any back-to-back duplicates
@classmethod
def cleanup_func_stack(cls, func_stack, op_name):
ret = ""
prev_fn_name = ""
suffix = ""
x = func_stack.split("/")
for fn_name in x:
# This is used to detect when the same torch op was called
# multiple times from the same parent function. Capture the
# count as a 'suffix' and put it on the end of the op name
#
# For example, if we end up with these:
# a/b/c/wrapper_func
# a/b/c/wrapper_func(2)
# Both would end up as a/b/c after the wrapper function is ignored
# However, we want to keep the information that the resulting torch op
# called by wrapper_func was called 2 different times from the same function 'c'
#
# This code changes "wrapper_func(2)" to "(2)" so that it doesn't get filtered
# out by should_skip_frame_name()
#
if fn_name.startswith("wrapper_func("):
suffix = fn_name.replace("wrapper_func", "")
if fn_name.startswith("always_benchmark_wrapper("):
suffix = fn_name.replace("always_benchmark_wrapper", "")
if not DLProf.should_skip_frame_name(fn_name, prev_fn_name):
ret += "/" + fn_name
prev_fn_name = fn_name
ret += "/" + op_name + suffix
return ret
@classmethod
def build_function_stack(cls, index, func_stack, frame_name, prev_fn, op_name, stack, ins_frame):
# Build funcStack
fn_name = frame_name
# Capture class name
#
# Iterate through the stack frames (like a linked list) until we get
# to the detailed frame we want. This is much faster and less
# expensive than extracting the entire frame stack every time
#
# ins stack is backwards from traceback, so depth is inverse
# of current traceback depth
#
depth = len(stack) - index
for _ in range(1, depth):
ins_frame = ins_frame.f_back
# Grab the class name if it exists
#
if 'self' in ins_frame.f_locals:
fn_name = ins_frame.f_locals['self'].__class__.__name__ + "::" + fn_name
key = (func_stack, frame_name, "")
if (fn_name in ["wrapper_func", "always_benchmark_wrapper"]):
key = (func_stack, frame_name, op_name)
if key not in cls.func_map.keys():
cls.func_map[key] = {}
# If we have been to this stack depth with all the same
# information, use the stored name
#
if prev_fn in cls.func_map[key].keys():
fn_name = cls.func_map[key][prev_fn]
else:
# If we have been do this stack depth and have called
# this function at least once but didn't hit in the dict
# above, then this is a repeat call. Postpend a count
# to the fn_name to uniquify it
#
if len(cls.func_map[key]) > 0:
fn_name = fn_name + "(" + str(1 + len(cls.func_map[key])) + ")"
# Store this new unique stack information with the
# determined fn_name
#
cls.func_map[key][prev_fn] = fn_name
return fn_name
@classmethod
def capture_inputs(cls, call_id, input_callid_list, *args):
input_tensors = []
for arg in args:
if isinstance(arg, torch.Tensor):
input_tensors.append({
'ptr': arg.data_ptr(),
})
elif isinstance(arg, list) or isinstance(arg, tuple):
for item in arg:
if isinstance(item, torch.Tensor):
input_tensors.append({
'ptr': item.data_ptr(),
})
if isinstance(item, list) or isinstance(item, tuple):
for item2 in item:
if isinstance(item2, torch.Tensor):
input_tensors.append({
'ptr': item2.data_ptr(),
})
for input_id, _ in enumerate(input_tensors):
input_ptr = input_tensors[input_id]['ptr']
if input_ptr in cls.op_to_out_tensor_map:
input_callid_info = cls.op_to_out_tensor_map[input_ptr]
if input_callid_info not in input_callid_list:
input_callid_list.append(input_callid_info)
dprint(f"Callid {call_id} Input tensor ptr {input_ptr} fetching saved call_id"\
f" {input_callid_info} port 0")
else:
## otherwise, push '-1'. this allows the input param shapes to align with the
## input_callids when building the graph
input_callid_list.append(-1)
@classmethod
def capture_outputs(cls, call_id, result):
output_tensors = []
if isinstance(result, torch.Tensor):
output_tensors.append({
'ptr': result.data_ptr(),
})
elif isinstance(result, list) or isinstance(result, tuple):
for item in result:
if isinstance(item, torch.Tensor):
output_tensors.append({
'ptr': item.data_ptr(),
})
for out_port, _ in enumerate(output_tensors):
output_ptr = output_tensors[out_port]['ptr']
cls.op_to_out_tensor_map[output_ptr] = f"{call_id}"
dprint(f"call_id {call_id} output tensors {output_tensors}")
| PyProf-master | pyprof/nvtx/dlprof.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .nvmarker import init
from .nvmarker import add_wrapper as wrap
from .nvmarker import start_graph
from .nvmarker import stop_graph
| PyProf-master | pyprof/nvtx/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file intercepts (monkey patches) the following functions and adds NVTX markers.
torch.*
torch.Tensor.*
torch.nn.functional.*
torch.nn.*.forward
The NVTX markers (one or more) contain the following information
call trace (a list of file_name:line_number)
extra_repr() from torch.nn modules
module/class name
function name
inputs (args and kwargs)
scalar: name, type and value
tensor: name, shape and datatype
numpy: name, shape and datatype
list/tuple: a sequence of scalars or tensors or numpy arrays
"""
import torch
import torch.cuda.nvtx as nvtx
import numpy
import inspect as ins
import traceback
import math
import json
import importlib
from .config import Config
from .dlprof import DLProf
from .dlprof import dprint
# Singleton object tracking dlprof specific information
dlprof = DLProf()
# flag to control wrapping ops in nvtx markers
wrappers_enabled = True
def start_graph():
"""
start_graph()
This function is exported in __init__.py so the instrumentation code
can control which iteration to capture the network graph.
Use this in conjunction with config option --delay_graph_capture
"""
global wrappers_enabled
wrappers_enabled = True
dprint(f"Starting graph tracker wrappers enabled {wrappers_enabled}")
return
def stop_graph():
"""
stop_graph()
This function is exported in __init__.py so the instrumentation code can
stop the graph capture at the end of a specific iteration.
Use this in conjunction with config option --delay_graph_capture
"""
global wrappers_enabled
wrappers_enabled = False
dprint(f"Stopping graph tracker wrappers enabled {wrappers_enabled}")
return
def isfunc(mod, f):
assert hasattr(mod, f)
attr = getattr(mod, f)
#Ignore functions like _add
if (len(f) >= 2):
if f[0] == "_" and f[1] != "_":
return False
#Ignore functions from this list
ignore = [
'__all__', '__array__', '__array_priority__', '__array_wrap__', '__bool__', '__builtins__', '__cached__',
'__class__', '__deepcopy__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__doc__', '__file__',
'__format__', '__getattribute__', '__getitem__', '__hash__', '__index__', '__init__', '__init_subclass__',
'__iter__', '__len__', '__loader__', '__module__', '__name__', '__new__', '__nonzero__', '__package__',
'__path__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__setattr__', '__setitem__',
'__setstate__', '__sizeof__', '__spec__', '__str__', '__subclasshook__', '__version__', '__weakref__'
]
#Add functions to this list if they cause recursion
ignore += ['size', 'tolist', 'dim', 'is_storage', 'item', 'data_ptr']
# Add functions to this list if they are called often, are generally extremely
# short, and don't lead GPU usage
#
ignore += [
'autocast_decrement_nesting', 'autocast_increment_nesting', 'contiguous', 'detach', 'empty', 'from_numpy',
'has_torch_function', 'is_autocast_enabled', 'is_available', 'is_complex', 'is_floating_point',
'is_grad_enabled', 'is_initialized', 'is_tensor', 'numel', 'requires_grad_', 'set_autocast_enabled', 'to',
'type'
]
if f in ignore:
return False
return ins.ismethod(attr) or ins.isfunction(attr) or ins.ismethoddescriptor(attr) or ins.isbuiltin(attr)
# Returns a dict string with a tracemarker and function stack in it
#
def traceMarker(op_name):
config = Config()
# Return a trace marker string and func_stack string
#
def get_trace_info(op_name):
cadena = []
stack = traceback.extract_stack()
func_stack = ""
# Previous frame name and line. This is the file and line
# that CALLED the frame we are in
#
prev_fn = ""
# Starting at index of 2 to ignore this function and its parent (traceMarker).
# Intentionally leaving in wrapper_func and other functions in this file as they
# may be needed to uniquify the node name
#
for idx in range(len(stack) - 2):
frame = stack[idx]
# Build traceMarker
#
# Don't include any functions from this file (nvmarker.py)
# Also skip repeated back to back cases of the same file/line (recursive calls)
#
fnl = "{}:{}".format(frame.filename, frame.lineno)
if (not frame.filename.endswith("nvmarker.py") and fnl != prev_fn):
cadena.append(fnl)
# Early exit if we aren't doing any funcStack code
#
if not config.func_stack_enabled:
continue
else:
ins_frame = ins.currentframe()
fn_name = dlprof.build_function_stack(idx, func_stack, frame.name, prev_fn, op_name, stack, ins_frame)
del ins_frame
prev_fn = fnl
# Append this frame's info into the function stack
#
func_stack = func_stack + "/" + fn_name
if config.func_stack_enabled:
func_stack = dlprof.cleanup_func_stack(func_stack, op_name)
return cadena, func_stack
d = {}
tm, fs = get_trace_info(op_name)
d['traceMarker'] = tm
if config.func_stack_enabled:
d['funcStack'] = fs
return str(d)
def modMarker(mod, fn_name, args):
"""
Returns the stringified extra_repr() of a module.
"""
assert (len(args) > 0)
d = {}
d['mod'] = mod.__name__
d['strRepr'] = args[0].extra_repr()
return str(d)
def add_wrapper(mod, fn_name):
config = Config()
# Get a pointer to the original function
func = getattr(mod, fn_name)
# Check if the mod has a string representation
# and is not a Script or Traced module (used by JIT)
# yapf: disable
s = hasattr(mod, "extra_repr") and (type(mod) is not torch.jit.ScriptModule
) and (type(mod) is not torch.jit.TopLevelTracedModule)
# yapf: enable
def wrapper_func(*args, **kwargs):
global wrappers_enabled
traceMarker_str = ""
input_callid_list = []
if wrappers_enabled:
if config.capture_input_ops:
## Stack for callids to work with nested monkey patch function calls
dlprof.patch_list.append(dlprof.call_id)
dlprof.capture_inputs(dlprof.call_id, input_callid_list, *args)
# Push trace marker
traceMarker_str = traceMarker(fn_name)
nvtx.range_push(traceMarker_str)
# Push module marker
if s:
m = modMarker(mod, fn_name, args)
nvtx.range_push(m)
# Create and push argument marker
#
# Disable wrappers while getting the argMarker in case it
# ends up executing another wrapped function
wrappers_enabled = False
if config.capture_input_ops:
saved_call_id = dlprof.call_id
# Keeps call_id correct when there are nested
# monkey patch functions
if dlprof.call_id != dlprof.patch_list[0]:
saved_call_id = dlprof.patch_list[0]
cadena = argMarker(mod, fn_name, args, kwargs, saved_call_id, input_callid_list)
else:
cadena = argMarker(mod, fn_name, args, kwargs)
nvtx.range_push(cadena)
wrappers_enabled = True
# Call the original function
result = func(*args, **kwargs)
if wrappers_enabled:
# Pop argumet marker
nvtx.range_pop()
# Pop module marker
if s:
nvtx.range_pop()
# Pop trace marker
nvtx.range_pop()
if config.capture_input_ops:
# Keeps call_id correct when there are nested
# monkey patch functions
saved_call_id = dlprof.call_id
if dlprof.call_id != dlprof.patch_list[0]:
saved_call_id = dlprof.patch_list[0]
dlprof.capture_outputs(saved_call_id, result)
# Store the callid -> op_name mapping
if traceMarker_str != "":
traceMarker_str = traceMarker_str.replace("\'", "\"")
traceMarker_dict = json.loads(traceMarker_str)
dlprof.call_id_to_op_map[saved_call_id] = traceMarker_dict['funcStack']
starting_call_id = dlprof.patch_list[0]
last_call_id = dlprof.patch_list.pop()
dlprof.call_id = dlprof.call_id + 1
return result
setattr(mod, fn_name, wrapper_func)
def argMarker(mod, op, args, kwargs, idx=-1, inputid_list=[]):
#For this function args is a tuple and kwargs is a dict
config = Config()
def tensor(arg, name=""):
if config.capture_input_ops:
cid = dlprof.op_to_out_tensor_map.get(arg.data_ptr(), -1)
name = dlprof.call_id_to_op_map.get(int(cid), "")
a = {}
a['name'] = name
a['type'] = "tensor"
a['shape'] = tuple(arg.size())
a['dtype'] = str(arg.dtype).split(".")[-1]
cadena['args'].append(a)
def ndarray(arg, name=""):
a = {}
a['name'] = name
a['type'] = "ndarray"
a['shape'] = arg.shape
a['dtype'] = str(arg.dtype).split(".")[-1]
cadena['args'].append(a)
def seq(arg, name=""):
assert issequence(arg)
a = {}
a['name'] = name
if isinstance(arg, list):
a['type'] = "list"
a['value'] = arg
else:
a['type'] = "tuple"
# The arg could be torch.Size, which is a subclass of tuple
# Therefore, explicitly convert to tuple
a['value'] = tuple(arg)
cadena['args'].append(a)
def scalar(arg, name=""):
a = {}
a['name'] = name
a['type'] = type(arg).__name__
#handle the case when the argument is +/- inf or nan
if arg == float('inf'):
a['value'] = "inf"
elif arg == float('-inf'):
a['value'] = "-inf"
elif isinstance(arg, float) and math.isnan(arg):
a['value'] = "nan"
else:
a['value'] = arg
cadena['args'].append(a)
def isscalar(arg):
return (type(arg) is int) or (type(arg) is float) or (type(arg) is bool) or (arg is None) or (type(arg) is str)
def issequence(arg):
return isinstance(arg, list) or isinstance(arg, tuple)
def foo(args, name):
#args should be an iterable sequence e.g. list or tuple
for arg in args:
if isinstance(arg, torch.Tensor):
if arg.dim() == 0:
scalar(arg.item(), name)
else:
tensor(arg, name)
elif isinstance(arg, numpy.ndarray):
ndarray(arg, name)
elif (isscalar(arg)):
scalar(arg, name)
elif issequence(arg):
if (len(arg) == 0) or isscalar(arg[0]): #An empty sequence or a sequence of scalars
seq(arg, name)
else: # A sequence of tensors or numpy arrays
foo(arg, name)
'''
else:
print("The following arg is none of Tensor, numpy array, scalar but a %s" % (str(type(arg))))
print("Mod: %s" % str(mod.__name__))
print("Op: %s" % str(op))
print(dir(arg))
'''
cadena = {}
cadena['mod'] = mod.__name__
cadena['op'] = op
if config.capture_input_ops:
cadena['callid'] = idx
cadena['input_callids'] = inputid_list
cadena['args'] = []
foo(args, "")
for k, v in kwargs.items():
foo((v, ), k)
return str(cadena)
def patchClass(cls):
for f in dir(cls):
if isfunc(cls, f):
add_wrapper(cls, f)
def patch_torch_classes():
"""Monkey-patch all classes in torch"""
for cls in [torch, torch.Tensor, torch.nn.functional, torch.distributed]:
patchClass(cls)
def patch_torch_nn_forward_functions():
"""Monkey-patch all forward functions in torch.nn libraries"""
for cls in [torch.nn.RNN, torch.nn.RNNCell, torch.nn.LSTM, torch.nn.LSTMCell, torch.nn.GRU, torch.nn.GRUCell]:
if isfunc(cls, 'forward'):
add_wrapper(cls, 'forward')
def patch_dataloader():
"""Monkey-patch the dataloader in torch.utils.data"""
mod = torch.utils.data.dataloader
old_iter = mod.DataLoader.__iter__
def new_iter(self, *args, **kwargs):
# Push trace marker
nvtx.range_push(traceMarker("Dataloader"))
# First pass is for creating the dataloader + returning the first data
cadena = argMarker(mod, "DataLoader", args, kwargs)
nvtx.range_push(cadena)
for x in old_iter(self, *args, **kwargs):
# Pop tracemarker
nvtx.range_pop()
# Dataloader stop, Model start
nvtx.range_pop()
yield x
# Push trace marker
nvtx.range_push(traceMarker("DataLoader"))
# Model stop, dataloader start
cadena = argMarker(mod, "DataLoader", args, kwargs)
nvtx.range_push(cadena)
# Pop the last iteration before returning
nvtx.range_pop()
nvtx.range_pop()
mod.DataLoader.__iter__ = new_iter
def patch_apex():
"""Monkey-patch functions in APEX"""
patch_apex_c()
patch_apex_pyt()
def patch_apex_c():
"""Monkey-patch C-implemented functions in APEX"""
if importlib.util.find_spec("amp_C") is not None:
import amp_C
patchClass(amp_C)
if importlib.util.find_spec("fused_adam_cuda") is not None:
import fused_adam_cuda
patchClass(fused_adam_cuda)
if importlib.util.find_spec("fused_lamb_cuda") is not None:
import fused_lamb_cuda
patchClass(fused_lamb_cuda)
if importlib.util.find_spec("fused_layer_norm_cuda") is not None:
import fused_layer_norm_cuda
patchClass(fused_layer_norm_cuda)
if importlib.util.find_spec("distributed_lamb_cuda") is not None:
import distributed_lamb_cuda
patchClass(distributed_lamb_cuda)
if importlib.util.find_spec("xentropy_cuda") is not None:
import xentropy_cuda
patchClass(xentropy_cuda)
if importlib.util.find_spec("mlp_cuda") is not None:
import mlp_cuda
patchClass(mlp_cuda)
def patch_apex_pyt():
"""Monkey-patch pytorch-implemented functions in APEX"""
if importlib.util.find_spec("apex") is not None:
patch_apex_module("apex.amp")
patch_apex_module("apex.contrib.groupbn")
patch_apex_module("apex.contrib.multihead_attn")
patch_apex_module("apex.contrib.optimizers")
patch_apex_module("apex.contrib.sparsity")
patch_apex_module("apex.contrib.xentropy")
patch_apex_module("apex.fp16_utils")
patch_apex_module("apex.mlp")
patch_apex_module("apex.multi_tensor_apply")
patch_apex_module("apex.optimizers")
patch_apex_module("apex.parallel")
#patch_apex_module("apex.RNN") # Confirmed to be dead code. Do not patch
def is_same_module_or_submodule(orig, incoming):
"""
Returns true if the incoming module is the same module as the original,
or is a submodule of the original module
"""
if incoming is None:
return False
if orig == incoming:
return True
if incoming.__name__.startswith(orig.__name__):
return True
return False
def patch_apex_module(modstr):
"""
Patch all forward/backward/step functions in classes in the given apex module.
"""
if importlib.util.find_spec(modstr) is not None:
mod = importlib.import_module(modstr)
for n, v in ins.getmembers(mod):
# This makes sure we don't patch random other modules that are imported by the target module
#
if is_same_module_or_submodule(mod, ins.getmodule(v)):
if (ins.isclass(v)):
patch_apex_class(v)
def patch_apex_class(cls):
"""
Patch all forward/backward/step functions in the given apex class
"""
for f in cls.__dict__:
if (ins.isfunction(cls.__dict__[f])):
if f in ["forward", "backward", "step"]:
add_wrapper(cls, f)
def push_nvtx_model_config(config):
"""
Helper function to dump the passed in dict config as an nvtx
marker with "model_config" key
"""
nvtx_msg = json.dumps({"model_config": config})
nvtx.range_push(nvtx_msg)
def patch_dataloader_init():
"""
Capture dataloader config (num_workers and pin_memory) and
emit a model_config nvtx range with the information
"""
mod = torch.utils.data.dataloader
old_init = mod.DataLoader.__init__
def new_init(self, *args, **kwargs):
num_workers = kwargs.get("num_workers", 0)
pin_memory = kwargs.get("pin_memory", False)
push_nvtx_model_config({"num_workers": num_workers, "pin_memory": pin_memory})
old_init(self, *args, **kwargs)
nvtx.range_pop()
mod.DataLoader.__init__ = new_init
# Flag to indicate that cudnn_benchmark_disabled has already been reported
#
cudnn_benchmark_disabled_reported = False
def patch_with_always_benchmark(mod, fn_name):
"""
Patch the given mod/function so that if it is ever executed and
torch.backends.cudnn.benchmark is not true, it will emit an nvtx
range to report that fact
"""
assert isfunc(mod, fn_name)
old_fn = getattr(mod, fn_name)
def always_benchmark_wrapper(*args, **kwargs):
global cudnn_benchmark_disabled_reported
add_nvtx = not torch.backends.cudnn.benchmark and not cudnn_benchmark_disabled_reported
if add_nvtx:
cudnn_benchmark_disabled_reported = True
push_nvtx_model_config({"cudnn_benchmark_disabled": True})
result = old_fn(*args, **kwargs)
if add_nvtx:
nvtx.range_pop()
return result
setattr(mod, fn_name, always_benchmark_wrapper)
def patch_never_call(mod, fn_name, key):
"""
Patch the given mod/function. If the function is executed, emit
an nvtx_range with data indicating that 'key' was true
"""
old_fn = getattr(mod, fn_name)
def wrapper_func(*args, **kwargs):
push_nvtx_model_config({key: True})
result = old_fn(*args, **kwargs)
nvtx.range_pop()
return result
setattr(mod, fn_name, wrapper_func)
def patch_never_call_with_args(mod, fn_name, key, bad_args):
"""
Patch the given mod/function. If the function is executed
and any of the bad args have any of the listed bad values,
emit an nvtx_range with data indicating that 'key' was true
"""
old_fn = getattr(mod, fn_name)
def wrapper_func(*args, **kwargs):
signature = ins.signature(old_fn)
bound = signature.bind(*args, **kwargs)
bound.apply_defaults()
problem = False
for k, v in bound.arguments.items():
if k in bad_args:
if v in bad_args[k]:
problem = True
if problem:
push_nvtx_model_config({key: True})
result = old_fn(*args, **kwargs)
if problem:
nvtx.range_pop()
return result
setattr(mod, fn_name, wrapper_func)
def patch_model_configs():
"""
Patch functions that help gather high-level configuration options for the model.
All resulting nvtx ranges will have 'model_config' as the primary key
"""
patch_dataloader_init()
patch_with_always_benchmark(torch.nn.functional, "conv1d")
patch_with_always_benchmark(torch.nn.functional, "conv2d")
patch_with_always_benchmark(torch.nn.functional, "conv3d")
patch_with_always_benchmark(torch.nn.functional, "conv_transpose1d")
patch_with_always_benchmark(torch.nn.functional, "conv_transpose2d")
patch_with_always_benchmark(torch.nn.functional, "conv_transpose3d")
patch_never_call(torch.autograd.detect_anomaly, "__init__", "detect_anomaly")
patch_never_call(torch.autograd, "gradcheck", "gradcheck")
patch_never_call(torch.autograd, "gradgradcheck", "gradgradcheck")
patch_never_call(torch.autograd.profiler.record_function, "__init__", "record_function")
# Patch both AMP libraries
#
import importlib
if importlib.util.find_spec("apex") is not None and importlib.util.find_spec("apex.amp") is not None:
import apex.amp
patch_never_call_with_args(apex.amp, "initialize", "amp_enabled", {"enabled": {True}})
patch_never_call_with_args(torch.cuda.amp, "autocast", "amp_enabled", {"enabled": {True}})
patch_never_call_with_args(torch.autograd.profiler.profile, "__init__", "profile", {"enabled": {True}})
patch_never_call_with_args(torch.autograd.set_detect_anomaly, "__init__", "detect_anomaly", {"mode": {True}})
patch_never_call_with_args(torch.autograd.profiler.emit_nvtx, "__init__", "emit_nvtx", {"enabled": {True}})
def init(*args, **kwargs):
"""
Initialize pyprof and monkey-patch Torch functions
Kwargs:
enable_function_stack (bool): When true, function stack information
will be added to NVTX markers
capture_input_ops (bool): When true, input tensor names will be added
to NVTX markers and enable_function_stack is set to True.
"""
global wrappers_enabled
config = Config(*args, **kwargs)
if config.delay_graph_capture:
## Disable wrappers_enabled at init when user wants to control
## which iteration to begin graph capture
wrappers_enabled = False
print("Initializing NVTX monkey patches")
patch_apex()
patch_dataloader()
patch_torch_classes()
patch_torch_nn_forward_functions()
patch_model_configs()
print("Done with NVTX monkey patching")
| PyProf-master | pyprof/nvtx/nvmarker.py |
"""
A script to convert the csv file output into two kinds of graphs:
1, Graphs of each hyperparameter with the benchmark (e.g. valid perplexity)
2, Color graphs that show the relationship between any two hyperparameters and the benchmark
To run the script, use:
python3 visualize.py --file [the name of the results csv file]
--n [the number of samples to visualize]
--subplots [the number of subplots to show in a plot]
--max [the max value of benchmark you care about]
# Copyright (c) 2018 NVIDIA Corporation
"""
import argparse
import os
import math
import random
import re
from matplotlib import pyplot as plt
import numpy as np
def get_names(params):
names = []
for part in params:
names.append(part[2:part.find('=')])
return names
def is_number(s):
if is_float(s):
return True, float(s)
if is_int(s):
return True, int(s)
return False, s
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def is_float(s):
return re.match("^\d+?\.\d+?$", s) is not None
def colorplot(results, xlabel, ylabel, values, benchmark, graph_folder):
fig, ax = plt.subplots()
scat = ax.scatter(results[xlabel], results[ylabel], c=values)
fig.colorbar(scat, label=benchmark)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(os.path.join(graph_folder, xlabel + '_' + ylabel + '.png'))
plt.close()
def main():
parser = argparse.ArgumentParser(description='visualizing Milano results')
parser.add_argument('--file', type=str, default='results.csv',
help='location of the result file')
parser.add_argument('--n', type=int, default=-1,
help='number of results to visualize. -1 for all')
parser.add_argument('--subplots', type=int, default=6,
help='the number of subplots in one plot')
parser.add_argument('--max', type=int, default=-1,
help='the max value of the benchmark you care about. -1 means you care about all values')
args = parser.parse_args()
result_lines = open(args.file, 'r').readlines()
benchmark = result_lines[0].split(',')[1]
if len(result_lines) <= 1:
raise ValueError('No experiments recorded')
lines = [line.split(',') for line in result_lines[1:]]
params = lines[0][2].split()
param_names = get_names(params)
raw_benchmarks = [float(line[1]) for line in lines]
if not args.max == -1:
raw_benchmarks = [b if b < args.max else float('inf') for b in raw_benchmarks]
max_ = max([v for v in raw_benchmarks if v != float('inf')])
benchmarks = [v if v != float('inf') else max_ * 1.2 + random.uniform(-max_ * 0.05, max_ * 0.05) for v in raw_benchmarks]
results = {name: [] for name in param_names}
for line in lines:
for part in line[2].split():
idx = part.find('=')
results[part[2:idx]].append(is_number(part[idx + 1:])[1])
samples = args.n if (args.n != -1 and args.n <= len(lines)) else len(lines)
graph_folder = 'graphs_{}'.format(samples)
os.makedirs(graph_folder, exist_ok=True)
n_params = len(param_names)
n_rows = math.ceil(args.subplots / 2)
n_cols = 2
n_plots = math.ceil(n_params / args.subplots)
for j in range(n_plots):
for i, name in enumerate(param_names[j * args.subplots : (j + 1) * args.subplots]):
plt.subplot(n_rows, n_cols, i + 1)
plt.plot(results[name][:samples], benchmarks[:samples], 'bo')
plt.title(name)
plt.ylabel(benchmark)
plt.savefig(os.path.join(graph_folder, 'single_params_{}.png'.format(j)))
plt.show()
plt.close()
for i, xlabel in enumerate(param_names):
for j, ylabel in enumerate(param_names):
if i >= j:
continue
colorplot(results, xlabel, ylabel, benchmarks, benchmark, graph_folder)
if __name__ == '__main__':
main() | Milano-master | visualize.py |
# Copyright (c) 2018 NVIDIA Corporation
import argparse
import runpy
from milano.exec_utils import ExecutionManager
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning parameters')
parser.add_argument("--config", required=True,
help="Path to the configuration file.")
parser.add_argument("--output_file", default="results.csv",
help="Path to the output file containing results.")
parser.add_argument("--verbose", type=int, default=1,
help="How much output to print. Setting to 0 mutes "
"the script, 3 is the highest level.")
args = parser.parse_args()
config = runpy.run_path(args.config)
backend_manager = config['backend'](
script_to_run=config['script_to_run'],
**config['backend_params'],
)
search_algorithm = config['search_algorithm'](
params_to_tune=config['params_to_tune'],
params_to_try_first=config.get('params_to_try_first', None),
objective=config['objective'],
**config['search_algorithm_params'],
)
exec_mng = ExecutionManager(
backend_manager=backend_manager,
search_algorithm=search_algorithm,
res_pattern=config['result_pattern'],
objective=config['objective'],
constraints=config.get('constraints', []),
output_file=args.output_file,
verbose=args.verbose,
)
exec_mng.start_tuning()
| Milano-master | tune.py |
# Copyright (c) 2018 NVIDIA Corporation
import asyncio
import numpy as np
import pandas as pd
import re
import traceback
from typing import Iterable, Mapping, Any, Tuple, Optional
from .backends.base import Backend, JobStatus, RetrievingJobLogsError, \
IsWorkerAvailableError, GettingJobStatusError, \
KillingJobError, LaunchingJobError
from .search_algorithms.base import SearchAlgorithm
class ExecutionManager:
def __init__(self,
backend_manager: Backend,
search_algorithm: SearchAlgorithm,
res_pattern: str,
objective: str,
constraints: Iterable[Mapping[str, Any]],
output_file: str = None,
verbose=0,
sleep_time=5,
wait_for_logs_time=10,
max_retries=5) -> None:
self._res_pattern = res_pattern
self._search_algorithm = search_algorithm
self._backend_manager = backend_manager
self._num_workers = self._backend_manager.num_workers
self._constraints = constraints
self._output_file = output_file
self._verbose = verbose
self._sleep_time = sleep_time
self._wait_for_logs_time = wait_for_logs_time
self._max_retries = max_retries
if objective.lower() not in ["minimize", "maximize"]:
raise ValueError(
'Objective has to be "minimize" or "maximize", '
'but "{}" was provided'.format(objective)
)
self._objective = objective.lower()
if self._objective == "minimize":
self._failure_score = np.inf
else:
self._failure_score = -np.inf
self.final_results = None
self._cnt = 0
def _parse_result(self, log: str) -> Optional[float]:
"""This method takes a log string and parses it to produce resulting float.
More precisely it looks for the last occurrence of ``self._res_pattern``
in the log file and takes the float which supposed to be present right after
the sought pattern. If no pattern was found, None is returned.
"""
res_pos = log.rfind(self._res_pattern)
# -1 means the res_pattern is not found, returning None
if res_pos == -1:
return None
res_pos += len(self._res_pattern)
return float(log[res_pos:].split(maxsplit=1)[0])
def _check_constraints(self, log: str) -> bool:
"""This method returns True if all constraints are satisfied
and False otherwise. We default to False in case of exception
"""
def formatter(value_str: str) -> float:
return float(value_str)
pattern = None
value = None
try:
for constraint_dict in self._constraints:
pattern = constraint_dict["pattern"]
for i, pos in enumerate(re.finditer(pattern, log)):
if i < constraint_dict.get("skip_first", 0):
continue
val_pos = pos.end()
value_string = log[val_pos:].split(maxsplit=1)[0]
cur_formatter = constraint_dict.get("formatter", formatter)
value = cur_formatter(value_string)
rng = constraint_dict["range"]
if value < rng[0] or value > rng[1]:
if self._verbose > 2:
print('Constraint "{}" not satisfied with value = {}'.format(
pattern, value,
))
return False
return True
except:
print('Constraint checking with pattern "{}" and with value = {} threw exeption. Setting not satisfied.'.format(
pattern, value,
))
return False
async def get_available_worker(self) -> int:
"""This method returns the first available worker.
The backend is queried for the next worker every sleep_time second.
"""
while True:
available_id = -1
for worker_id in range(self._num_workers):
try:
worker_available = self._backend_manager.is_worker_available(worker_id)
except IsWorkerAvailableError as e:
worker_available = False
if self._verbose > 1:
print("IsWorkerAvailableError raised for worker {}: {}".format(
worker_id, e.message,
))
if worker_available:
available_id = worker_id
break
if available_id == -1:
await asyncio.sleep(self._sleep_time)
else:
return available_id
async def _handle_running_job(self,
job_info: object,
job_params: str,
worker_id: int) -> Tuple[Optional[str],
Optional[float]]:
"""Helper function that handles running jobs."""
result, job_status = None, None
try:
log = self._backend_manager.get_logs_for_job(job_info)
except RetrievingJobLogsError:
log = None
if log is not None:
if not self._check_constraints(log):
for i in range(self._max_retries):
try:
self._backend_manager.kill_job(job_info)
break
except KillingJobError as e:
if i == self._max_retries - 1:
if self._verbose > 1:
print('Could not kill job "{}" on worker {}: {}'.format(
job_params, worker_id, e.message,
))
# continuing execution, since worker can't become available
return None, None
result = self._failure_score
job_status = "Some constraints are not satisfied"
if self._verbose > 1:
print(
'Killed job "{}" on worker {}: constraints are not satisfied'
.format(job_params, worker_id)
)
return job_status, result
async def _handle_succeeded_job(self,
job_info: object,
job_params: str,
worker_id: int) -> Tuple[str, float]:
"""Helper function that handles succeeded jobs."""
# trying 5 times and than returning None as if the job failed
log = None
for i in range(self._max_retries):
# waiting here in order to let the backend time to finalize results
await asyncio.sleep(self._wait_for_logs_time)
try:
log = self._backend_manager.get_logs_for_job(job_info)
break
except RetrievingJobLogsError as e:
if i == self._max_retries - 1:
if self._verbose > 1:
print('Could not access logs for job "{}" on worker {}: {}'.format(
job_params, worker_id, e.message,
))
return "Job failed: could not access logs", self._failure_score
# log was successfully retrieved, trying to parse results
result = self._parse_result(log)
if result is None:
if self._verbose > 1:
print('"{}" was not found in log for job {} on worker {}'.format(
self._res_pattern, job_params, worker_id,
))
return (
"Job failed: {} was not found in job's log".format(self._res_pattern),
self._failure_score
)
# got valid result, checking constraints
if not self._check_constraints(log):
if self._verbose > 1:
print("Constraints not satisfied on job {}".format(job_params))
return "Some constraints are not satisfied", self._failure_score
# everything is correct, returning result
if self._verbose > 1:
print("Got {} {} for job \"{}\" on worker {}".format(
self._res_pattern, result, job_params, worker_id,
))
return "Job succeeded", result
async def _handle_failed_job(self,
job_info: object,
job_params: str,
worker_id: int) -> Tuple[str, float]:
"""Helper function that handles failed jobs."""
if self._verbose > 1:
print('Job "{}" failed on worker {}'.format(job_params, worker_id))
return "Job failed", self._failure_score
async def _start_job_and_push_results(self,
job_params: str,
worker_id: int,
results_queue: asyncio.Queue) -> None:
"""This method is responsible for starting job and pushing result to queue.
It will launch the job with ``job_params`` on worker ``worker_id`` and
then wait until job status becomes "succeeded" or "failed", periodically
checking that job's log satisfies constraints. The backend
is queried for the job status every sleep_time seconds. As soon as the backend
reports success or failure the ``(job_params, result, job_status)`` tuple
is pushed into the ``results_queue``. The result is obtained by getting the
job log from the backend (trying a few times if something goes wrong) and
searching for the ``self._res_pattern``. In case of
failure or when ``self._res_pattern`` was not found in job log, result is
equal to ``np.inf`` (or ``-np.inf``, depending on the objective).
"""
# making the function exception-safe, since they are not going to
# be handled or stop execution of the main program flow
try:
for i in range(self._max_retries):
try:
job_info = self._backend_manager.launch_job(worker_id, job_params)
break
except LaunchingJobError as e:
if i == self._max_retries - 1:
if self._verbose > 1:
print("Backend can't start job {} on worker {}: {}".format(
job_params, worker_id, e.message,
))
elif self._verbose == 1:
self._cnt += 1
print("Processed {} jobs".format(self._cnt), end="\r")
await results_queue.put((self._failure_score, job_params,
"Job failed: can't launch job on backend"))
return
if self._verbose > 1:
print("Started job \"{}\" on worker {}".format(job_params, worker_id))
while True:
for i in range(self._max_retries):
try:
status = self._backend_manager.get_job_status(job_info)
break
except GettingJobStatusError as e:
if i == self._max_retries - 1:
# setting status to JobStatus.RUNNING, since it's unclear
# what state the job is in currently
status = JobStatus.RUNNING
if self._verbose > 1:
print("Can't get status for job {} on worker {}: {}".format(
job_params, worker_id, e.message,
))
if status == JobStatus.RUNNING or status == JobStatus.PENDING:
job_status, result = await self._handle_running_job(
job_info, job_params, worker_id,
)
if result is None:
# everything is ok, can continue running this job
await asyncio.sleep(self._sleep_time)
continue
elif status == JobStatus.SUCCEEDED:
job_status, result = await self._handle_succeeded_job(
job_info, job_params, worker_id,
)
elif status == JobStatus.FAILED or status == JobStatus.KILLED or status == JobStatus.NOTFOUND:
job_status, result = await self._handle_failed_job(
job_info, job_params, worker_id,
)
else:
raise RuntimeError("Got unknown status from job: {}".format(status))
if self._verbose == 1:
self._cnt += 1
print("Processed {} jobs".format(self._cnt), end="\r")
await results_queue.put((result, job_params, job_status))
return
except Exception as e:
if self._verbose > 1:
print("Job {} on worker {} failed with unhandled exception:".format(
job_params, worker_id,
))
print(traceback.format_exc())
elif self._verbose == 1:
self._cnt += 1
print("Processed {} jobs".format(self._cnt), end="\r")
await results_queue.put((self._failure_score, job_params,
"Job failed: unhandled exception"))
async def _process_jobs(self,
jobs_queue: asyncio.Queue,
results_queue: asyncio.Queue) -> None:
"""Main routine for processing jobs.
This method will query the ``jobs_queue`` for the jobs parameters and
launch jobs as soon as new parameters are pushed in the queue. The jobs
are launched with :meth:`_start_job_and_push_results` calls which are
wrapped with ``asyncio.ensure_future`` so that they don't block code
execution. In order to ensure that all jobs are finished, the futures
objects are stored in ``jobs_dispatched`` list and the method waits for
all of them before finishing. The main loop will query for the new job
parameters every sleep_time seconds and will stop as soon as it gets None.
"""
jobs_dispatched = []
while True:
job_params = await jobs_queue.get()
if job_params is None:
break
# converting dictionary into cmd arguments
job_params = " ".join(
["{}={}".format(name, val) for name, val in job_params.items()]
)
worker_id = await self.get_available_worker()
jobs_dispatched.append(asyncio.ensure_future(
self._start_job_and_push_results(job_params, worker_id, results_queue)
))
# waiting for job to start and make worker busy
await asyncio.sleep(self._sleep_time)
for job_dispatched in jobs_dispatched:
await asyncio.wait_for(job_dispatched, timeout=None)
await results_queue.put(None)
async def _generate_jobs(self,
jobs_queue: asyncio.Queue,
results_queue: asyncio.Queue) -> None:
"""This method is used to generate all search jobs.
It uses ``self._search_algorithm`` to get the first set of jobs by calling
``self._search_algorithm.gen_initial_jobs()`` and pushes all of them to the
``jobs_queue``. It then enters the loop until it gets None from the
``results_queue``. On each iteration of the loop it will wait for the new
result to appear in the results_queue and ask the
``self._search_algorithm`` to generate new jobs based on the last result
retrieved using ``self._search_algorithm.gen_new_jobs``. It will then push
all new jobs into the ``jobs_queue`` and save the current ``results.csv``.
"""
init_jobs = self._search_algorithm.gen_initial_params()
for job_params in init_jobs:
await jobs_queue.put(job_params)
results = []
cnt = 0
while True:
result_tuple = await results_queue.get()
if result_tuple is None:
break
cnt += 1
results.append(result_tuple + (cnt,))
new_jobs = self._search_algorithm.gen_new_params(
result=result_tuple[0],
params=dict([arg_val.split('=') for arg_val in result_tuple[1].split()]),
evaluation_succeeded=(not result_tuple[2].startswith("Job failed"))
)
for job_params in new_jobs:
await jobs_queue.put(job_params)
if self._objective == "minimize":
sorted_results = sorted(results)
else:
sorted_results = sorted(results, reverse=True)
if self._output_file:
pd.DataFrame(
data=sorted_results,
columns=[self._res_pattern, "params", "status", "job_id"],
).to_csv(self._output_file)
if self._verbose > 1:
print(
"\nTop-10 parameters:\n {}".format(
"\n ".join(["{} {} for job \"{}\"".format(
self._res_pattern, value, cmd,
) for value, cmd, status, job_id in sorted_results[:10]])
)
)
self.final_results = pd.DataFrame(
data=sorted_results,
columns=[self._res_pattern, "params", "status", "job_id"],
)
def start_tuning(self) -> None:
"""This is the main function that should be called to start tuning."""
self._cnt = 0
loop = asyncio.get_event_loop()
jobs_queue = asyncio.Queue(loop=loop)
results_queue = asyncio.Queue(loop=loop)
generate_jobs_coroutine = self._generate_jobs(
jobs_queue=jobs_queue, results_queue=results_queue,
)
process_jobs_coroutine = self._process_jobs(
jobs_queue=jobs_queue, results_queue=results_queue,
)
loop.run_until_complete(asyncio.gather(generate_jobs_coroutine,
process_jobs_coroutine))
loop.close()
| Milano-master | milano/exec_utils.py |
# Copyright (c) 2018 NVIDIA Corporation | Milano-master | milano/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
import subprocess
import json
import os
import shutil
import copy
import time
import re
from .utils import SSHClient
from .base import Backend, JobStatus, RetrievingJobLogsError, \
GettingJobStatusError, KillingJobError, \
IsWorkerAvailableError, LaunchingJobError
class SLURMBackend(Backend):
"""Class for working with SLURM backend."""
def __init__(self, script_to_run: str, workers_config: dict) -> None:
super().__init__(script_to_run, workers_config)
self._workers_config = workers_config
self._num_workers = self._workers_config['num_workers']
self._partition = self._workers_config['partition']
self._key_path = workers_config['key_path']
self._entrypoint = workers_config['entrypoint']
self._username = workers_config['username']
with open(script_to_run) as fin:
self._script_code = fin.read()
self._ssh_client = SSHClient(private_key_path=self._key_path)
try:
self._ssh_client.connect(address=self._entrypoint,
username=self._username)
self._ssh_client.exec_command_blocking("cat {} > milano_script.sh")
self._workers_job = [-1] * self._num_workers
except:
raise Exception("Couldn't connect to the backend. Check your credentials")
def get_job_status(self, job_info: object) -> JobStatus:
job_id = int(job_info)
try:
ec, stdout, stderr = self._ssh_client.exec_command_blocking(
"scontrol show job {}".format(job_id))
except:
raise GettingJobStatusError(stderr)
match = re.search('JobState=(\S*)', stdout, re.IGNORECASE)
if match is not None:
result_string = match.group(1)
else:
return JobStatus.NOTFOUND
if result_string == "COMPLETED":
return JobStatus.SUCCEEDED
elif result_string == "FAILED" or result_string == "NODE_FAIL" or \
result_string == "REVOKED" or result_string == "TIMEOUT":
return JobStatus.FAILED
elif result_string == "PENDING":
return JobStatus.PENDING
elif result_string == "RUNNING" or result_string == "CONFIGURING" or \
result_string == "COMPLETING":
return JobStatus.RUNNING
elif result_string == "CANCELLED" or result_string == "STOPPED" or \
result_string == "SUSPENDED":
return JobStatus.KILLED
else:
print("~~~~~~~~~~~~~~~~~Got the following status: {}".format(result_string))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(stdout)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
return JobStatus.UNKNOWN
def get_logs_for_job(self, job_info: object) -> str:
try:
job_id = int(job_info)
ec, stdout, stderr = self._ssh_client.exec_command_blocking(
"scontrol show job {}".format(job_id))
match = re.search('StdOut=(\S*)', stdout, re.IGNORECASE)
path = match.group(1)
ec, stdout, stderr = self._ssh_client.exec_command_blocking(
"cat {}".format(path))
return stdout
except:
raise RetrievingJobLogsError(stderr)
def kill_job(self, job_info: object) -> None:
try:
job_id = job_info
ec, stdout, stderr = self._ssh_client.exec_command_blocking(
"scancel {}".format(job_id))
except:
raise KillingJobError(stderr)
def is_worker_available(self, worker_id: int) -> bool:
if self._workers_job[worker_id] == -1:
return True
else:
try:
status = self.get_job_status(self._workers_job[worker_id])
except GettingJobStatusError as e:
raise IsWorkerAvailableError(e.message)
if status == JobStatus.RUNNING or status == JobStatus.PENDING \
or status == JobStatus.KILLED:
return False
else:
self._workers_job[worker_id] = -1
return True
def launch_job(self, worker_id: int, params: str) -> object:
# first, copy script to entrypoint
try:
script_code = self._script_code.replace('"$@"', params)
script_name = "script-{}-{}.sh".format(time.time(), worker_id)
# move script to entrypoint
ec, stdout, stderr = self._ssh_client.exec_command_blocking('echo "{}" > {}'.format(script_code,
script_name))
# execute script on entrypoint
ec, stdout, stderr = self._ssh_client\
.exec_command_blocking('sbatch -p {} {}'
.format(self._partition, script_name))
match = re.search('Submitted batch job (\S*)', stdout, re.IGNORECASE)
job_id = int(match.group(1))
self._workers_job[worker_id] = job_id
return job_id
except:
LaunchingJobError(stderr)
@property
def num_workers(self) -> int:
return self._num_workers | Milano-master | milano/backends/slurm.py |
# Copyright (c) 2018 NVIDIA Corporation
import boto3
import botocore
import time
import threading
import json
import hashlib
from milano.backends.utils import SSHClient
class EC2Instance:
def __init__(self, resource, username, private_key_path):
self._resource = resource
self._private_key_path = private_key_path
self._username = username
self._ssh_client = None
self._terminated = False
def is_running(self):
return self.state() == 'running'
def is_terminated(self):
s = self.state()
return s != 'pending' and s != 'running'
def state(self):
self._reload()
s = self._resource.state['Name']
if s == 'terminated':
self._terminated = True
return s
def public_ip(self):
self._reload()
return self._resource.public_ip_address
def instance_id(self):
return self._resource.instance_id
def _reload(self):
if not self._terminated:
self._resource.reload()
def __try_connect(self):
if self._resource.state['Name'] != 'running':
raise Exception("instance not running")
if self._ssh_client is None:
client = SSHClient(self._private_key_path)
client.connect(self.public_ip(), self._username)
self._ssh_client = client
def exec_command(self, command):
self.__try_connect()
return self._ssh_client.exec_command(command)
def exec_command_blocking(self, command, retries=3):
for i in range(retries):
try:
self.__try_connect()
return self._ssh_client.exec_command_blocking(command)
except Exception as e:
if i < retries - 1:
try:
if self._ssh_client is not None:
self._ssh_client.close()
except:
pass
self._ssh_client = None
else:
raise e
def keep_alive(self):
# As long as this file remains less than 5 minutes old, the instance
# won't terminate.
try:
self.exec_command_blocking("touch /home/ubuntu/.milano_keep_alive")
except:
pass
def is_driver_working(self):
try:
ec, _, _ = self.exec_command_blocking("nvidia-smi")
return ec == 0
except:
return False
def datasets_present(self, datasets):
try:
for i in range(len(datasets)):
ec, _, _ = self.exec_command_blocking("ls /home/ubuntu/data/" + str(i))
if ec != 0:
return False
except:
return False
return True
def terminate(self):
return self._resource.terminate()
def startup_script(datasets):
dataset_mounts = "\n"
for i in range(len(datasets)):
if datasets[i]['type'] == 's3':
dataset_mounts += "aws s3 sync {src} {dst}\n".format(
src="s3://{bucket}/{prefix}".format(
bucket=datasets[i]['bucket'],
prefix=datasets[i].get('prefix', "")),
dst="/home/ubuntu/data/" + str(i),
)
else:
raise Exception("unrecognized dataset source type '{}'".format(
datasets[i]['type']))
# TODO All of the software installation should be baked into an AMI instead,
# this is pretty slow.
return """#!/bin/bash
touch /home/ubuntu/.milano_keep_alive
chmod 777 /home/ubuntu/.milano_keep_alive
eval "while true; do find /home/ubuntu/.milano_keep_alive -mmin +5 -exec shutdown -h now {} + && sleep 10; done" &>/dev/null &disown;
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
groupadd docker
usermod -aG docker ubuntu
apt-get update
apt-get install -y awscli
""" + dataset_mounts + """
apt-get install -y docker-ce
apt-get install -y nvidia-docker2
apt-get install -y nvidia-384
modprobe nvidia
systemctl restart docker
"""
class EC2InstanceManager:
def __init__(self, count, key_name, private_key_path, region_name,
spot_instances, datasets, iam_role, user_params):
self._desired_count = count
self._key_name = key_name
self._private_key_path = private_key_path
self._region_name = region_name
self._spot_instances = spot_instances
self._datasets = datasets
self._iam_role = iam_role
self._user_params = user_params
self._instances = {}
self._active_instance_ids = []
self._thread = None
self._lock = threading.Lock()
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._management_thread_main)
self._thread.start()
def _ami_for_region(self):
# ubuntu 16.04 HVM SSD
ami = {
"us-east-1": "ami-5c150e23",
"us-west-1": "ami-4d6a852e",
"ap-northeast-1": "ami-e5b3ca08",
"sa-east-1": "ami-01316f8dfe32c01e2",
"ap-southeast-1": "ami-01fde464a811ead8a",
"ca-central-1": "ami-4975f82d",
"ap-south-1": "ami-0dcc9657fd6ff85bc",
"eu-central-1": "ami-9fbfb174",
"eu-west-1": "ami-0a8458313ef39d6f6",
"cn-north-1": "ami-0510c868",
"cn-northwest-1": "ami-f96c7b9b",
"us-gov-west-1": "ami-3a4dd15b",
"ap-northeast-2": "ami-09960a24a97b8087b",
"ap-southeast-2": "ami-fc26869e",
"us-west-2": "ami-529fb82a",
"us-east-2": "ami-0eb3ba416aed8a6a4",
"eu-west-2": "ami-52d12435",
"ap-northeast-3": "ami-0d5d86281edca346f",
"eu-west-3": "ami-0a06fa501d424d43f"
}
return ami.get(self._region_name, "")
def _launch(self, launch_count):
s = boto3.Session(region_name=self._region_name)
iam_client = s.client('iam')
iam = s.resource("iam")
ec2 = s.resource("ec2")
# unique role per dataset config
if self._iam_role is None:
self._iam_role, _ = get_or_create_role(
"milano-" + sha1short(json.dumps(self._datasets)),
self._datasets, iam, iam_client)
profile_name, _ = get_or_create_instance_profile(
self._iam_role + "-ip", self._iam_role, iam)
sg_id = get_or_create_ssh_security_group("milano-worker-ssh", ec2)
create_params = {
'InstanceType': "p3.2xlarge",
'ImageId': self._ami_for_region(),
'KeyName': self._key_name,
'MinCount': launch_count,
'MaxCount': launch_count,
'SecurityGroupIds': [sg_id],
'BlockDeviceMappings': [{
"DeviceName": "/dev/xvda",
"Ebs": {
"DeleteOnTermination": True,
# TODO expose this as a top level config option?
"VolumeSize": 64
}
}],
'TagSpecifications': [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'Name',
'Value': 'milano-worker',
}]
}],
"IamInstanceProfile": {
"Name": profile_name,
},
# If ~/.milano_keep_alive isn't touched every 5 minutes, the instance
# will auto terminate.
'InstanceInitiatedShutdownBehavior': "terminate",
'UserData': startup_script(self._datasets),
}
if self._spot_instances:
create_params['InstanceMarketOptions'] = {
'MarketType': 'spot',
'SpotOptions': {
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
}
}
create_params.update(self._user_params)
instance_resources = ec2.create_instances(**create_params)
with self._lock:
for instance_resource in instance_resources:
self._instances[instance_resource.instance_id] = EC2Instance(
instance_resource, "ubuntu", self._private_key_path)
def active_instance_ids(self):
with self._lock:
return self._active_instance_ids.copy()
def get_instance(self, instance_id):
with self._lock:
return self._instances[instance_id]
def terminate(self):
self._stop_event.set()
self._thread.join()
for _, instance in self._instances.items():
instance.terminate()
def _management_thread_main(self):
while not self._stop_event.is_set():
next_active_ids = []
alive_count = 0
for instance_id, instance in self._instances.items():
if not instance.is_terminated():
alive_count += 1
if instance.is_running():
instance.keep_alive()
if instance.is_driver_working() and instance.datasets_present(
self._datasets):
next_active_ids.append(instance_id)
if alive_count < self._desired_count:
needed_count = self._desired_count - alive_count
print("launching {count} EC2 instances and mounting datasets. this may take a few minutes...".
format(count=needed_count))
try:
self._launch(needed_count)
except Exception as e:
print(e)
pass
with self._lock:
self._active_instance_ids = next_active_ids
time.sleep(10)
def get_or_create_ssh_security_group(name, ec2):
try:
groups = ec2.security_groups.filter(GroupNames=[name])
for group in groups:
return group.group_id
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidGroup.NotFound':
raise e
# No existing security group, create one.
sg = ec2.create_security_group(Description=name, GroupName=name)
sg.authorize_ingress(
IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort=22, ToPort=22)
return sg.group_id
def get_or_create_role(name, datasets, iam, client):
try:
role = iam.Role(name)
return role.role_name, role.role_id
except Exception as e:
pass
role = iam.create_role(RoleName=name, AssumeRolePolicyDocument=json.dumps({
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["ec2.amazonaws.com"]
},
"Action": ["sts:AssumeRole"]
}]
}))
for i in range(len(datasets)):
bucket = bucket=datasets[i]['bucket']
prefix = datasets[i].get('prefix', "")
resp = client.put_role_policy(
RoleName=name,
PolicyName=name + "-policy-" + str(i),
PolicyDocument=json.dumps({
"Statement":[
{
"Action": ["s3:ListBucket"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::{}".format(bucket)],
"Condition":{"StringLike":{"s3:prefix":["{}/*".format(prefix)]}}
},
{
"Effect": "Allow",
"Action": ["s3:Get*"],
"Resource": ["arn:aws:s3:::{}/{}*".format(bucket, prefix)]
}
]
}
)
)
return role.role_name, role.role_id
def get_or_create_instance_profile(name, role, iam):
try:
instance_profile = iam.InstanceProfile(name)
return name, instance_profile.instance_profile_id
except Exception as e:
pass
instance_profile = iam.create_instance_profile(InstanceProfileName=name)
instance_profile.add_role(RoleName=role)
# create_instances will fail if we try to use this instance profile too soon.
time.sleep(10)
return name, instance_profile.instance_profile_id
def sha1short(str):
return hashlib.sha1(str.encode()).hexdigest()[:6] | Milano-master | milano/backends/aws_utils.py |
# Copyright (c) 2018 NVIDIA Corporation
from .base import Backend, JobStatus, RetrievingJobLogsError, KillingJobError, \
IsWorkerAvailableError, GettingJobStatusError, \
LaunchingJobError
from .azkaban_utils import AzkabanManager, commands_to_job, \
strings_to_zipped_file, AzkabanConnectionError
from typing import Iterable
class AzkabanBackend(Backend):
def __init__(self, script_to_run: str, workers_config: Iterable,
url="http://127.0.0.1", port="8081",
username="azkaban", password="azkaban") -> None:
"""Constructor of Azkaban backend class.
In this method, project named "Milano" will be created with number of flows
equal to the number of workers. Each flow will have just one job named
"worker-<worker_id>.job" which will set environment variables as described
in workers_config and launch script_to_run appending current parameters
with ${params} argument.
Note that it's user's responsibility to ensure that script_to_run will
correctly execute with specified parameters in the environment where
Azkaban is launched.
"""
super().__init__(script_to_run, workers_config)
self._azkaban_manager = AzkabanManager()
self._azkaban_manager.connect(url, port, username, password)
self._project_name = "Milano"
# TODO: delete project if exists?
self._azkaban_manager.create_project(
self._project_name, "Milano tuning for {}".format(script_to_run)
)
workers_envs = []
for worker_dict in workers_config:
workers_envs.extend(
[worker_dict["env_vars"]] * worker_dict["num_workers"]
)
with open(script_to_run, "r") as fin:
script_code = fin.read()
strings_dict = {script_to_run: script_code}
for worker_id, worker_envs in enumerate(workers_envs):
job_name = 'worker-{}.job'.format(worker_id)
strings_dict[job_name] = commands_to_job(
[
"chmod +x {}".format(script_to_run),
"./{} ".format(script_to_run) + "${params}",
],
job_name=job_name,
envs=worker_envs,
)
self._azkaban_manager.upload_zip(
self._project_name,
strings_to_zipped_file(strings_dict),
)
self._num_workers = len(workers_envs)
def get_job_status(self, job_info: dict) -> JobStatus:
try:
status = self._azkaban_manager.get_run_status(job_info)
except AzkabanConnectionError as e:
raise GettingJobStatusError(e.message)
# TODO: check other statuses?
if status == "SUCCEEDED":
return JobStatus.SUCCEEDED
elif status == "FAILED":
return JobStatus.FAILED
else:
return JobStatus.RUNNING
def kill_job(self, job_info: dict) -> None:
try:
self._azkaban_manager.kill_flow_execution(job_info)
except AzkabanConnectionError as e:
raise KillingJobError(e.message)
def get_logs_for_job(self, job_info: dict) -> str:
try:
return self._azkaban_manager.get_logs_for_job(job_info)
except AzkabanConnectionError as e:
raise RetrievingJobLogsError(e.message)
def is_worker_available(self, worker_id: int) -> bool:
try:
flow_running = self._azkaban_manager.is_flow_running(
self._project_name, "worker-{}".format(worker_id),
)
except AzkabanConnectionError as e:
raise IsWorkerAvailableError(e.message)
return not flow_running
def launch_job(self, worker_id: int, job_params: str) -> dict:
try:
return self._azkaban_manager.run_flow(
project_name=self._project_name,
flow_id="worker-{}".format(worker_id),
properties=[("params", job_params)],
)
except AzkabanConnectionError as e:
raise LaunchingJobError(e.message)
@property
def num_workers(self):
return self._num_workers
| Milano-master | milano/backends/azkaban.py |
# Copyright (c) 2018 NVIDIA Corporation
from .azkaban import AzkabanBackend
from .aws import AWSBackend
from .slurm import SLURMBackend
| Milano-master | milano/backends/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
import requests
import io
import zipfile
from typing import Iterable, Tuple, Sequence
class AzkabanConnectionError(Exception):
"""An exception that is going to be raised if something fails
with Azkaban access.
"""
def __init__(self, message):
self.message = message
class AzkabanManager:
def __init__(self) -> None:
self.session_id = None
self.url_port = None
def connect(self, url="http://127.0.0.1", port="8081",
username="azkaban", password="azkaban") -> None:
data = {
"action": "login",
"username": username,
"password": password,
}
url_port = '{}:{}'.format(url, port)
response = requests.post(url_port, data=data).json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
self.session_id = response['session.id']
self.url_port = url_port
def _check_connection(self) -> None:
if self.session_id is None:
raise AzkabanConnectionError(
"AzkabanManager is not connected to server. "
"Make sure you ran self.connect()."
)
def create_project(self, name: str, description: str) -> None:
self._check_connection()
data = {
"action": "create",
"session.id": self.session_id,
"name": name,
"description": description,
}
response = requests.post(self.url_port + '/manager', data=data).json()
# TODO: figure out how to handle this warnings that project exists, since
# we usually don't worry about them, but they will interfere with
# other important logs that are being printed
# if "message" in response:
# print("WARNING: {}".format(response['message']))
def upload_zip(self, project_name: str, zipped_file: io.BytesIO) -> None:
self._check_connection()
data = {
"ajax": "upload",
"session.id": self.session_id,
"project": project_name,
}
files = {"file": ("jobs.zip", zipped_file, "application/zip", {})}
response = requests.post(self.url_port + '/manager',
files=files, data=data).json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
def get_project_flows(self, project_name: str) -> list:
self._check_connection()
data = {
"ajax": "fetchprojectflows",
"session.id": self.session_id,
"project": project_name,
}
response = requests.get(self.url_port + '/manager', params=data).json()
return [flow["flowId"] for flow in response["flows"]]
def run_flow(self, project_name: str, flow_id: str,
properties: Iterable[Tuple[str, str]] = None) -> dict:
self._check_connection()
data = {
"ajax": "executeFlow",
"session.id": self.session_id,
"project": project_name,
"flow": flow_id,
}
for name, value in properties:
data["flowOverride[{}]".format(name)] = value
job_info = requests.get(self.url_port + '/executor', params=data).json()
if "error" in job_info:
raise AzkabanConnectionError(
"Got error for flow {} with properties \"{}\": {}".format(
flow_id, properties, job_info['error'],
)
)
return job_info
def get_logs_for_job(self, job_info: dict) -> str:
# TODO: for now this assumes that there is 1 job in the flow
self._check_connection()
data = {
"ajax": "fetchExecJobLogs",
"session.id": self.session_id,
"execid": job_info["execid"],
"jobId": job_info["flow"],
"offset": 0,
"length": 10000000,
}
response = requests.get(self.url_port + '/executor', params=data)
if response.status_code != 200:
raise AzkabanConnectionError(
'Job "flow={}, exeid={}" returned with status {} and error "{}"'.format(
job_info["flow"], job_info["execid"],
response.status_code, response.reason,
)
)
response = response.json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
return response["data"]
def get_run_status(self, run_info: dict) -> str:
self._check_connection()
data = {
"ajax": "fetchexecflow",
"session.id": self.session_id,
"execid": run_info['execid'],
}
response = requests.get(self.url_port + '/executor', params=data).json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
return response["status"]
def kill_flow_execution(self, run_info: dict) -> None:
self._check_connection()
data = {
"ajax": "cancelFlow",
"session.id": self.session_id,
"execid": run_info['execid'],
}
response = requests.get(self.url_port + '/executor', params=data).json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
def get_flow_executions(self, project_name: str, flow_id: str) -> list:
self._check_connection()
data = {
"ajax": "getRunning",
"session.id": self.session_id,
"project": project_name,
"flow": flow_id,
}
response = requests.get(self.url_port + '/executor', params=data).json()
if "error" in response:
raise AzkabanConnectionError(response['error'])
if "execIds" in response:
return response["execIds"]
else:
return []
def is_flow_running(self, project_name: str, flow_id: str) -> bool:
exec_ids = self.get_flow_executions(project_name, flow_id)
return len(exec_ids) > 0
def strings_to_zipped_file(strings_dict: dict) -> io.BytesIO:
"""name: content"""
zipped_file = io.BytesIO()
with zipfile.ZipFile(zipped_file, 'w') as f:
for name, content in strings_dict.items():
f.writestr(name, content)
zipped_file.seek(0)
return zipped_file
def commands_to_job(commands: Sequence[str], job_name="job.job",
envs=()) -> str:
job = "# {}\n".format(job_name)
for env in envs:
job += "env.{}\n".format(env)
job += "type=command\ncommand={}\n".format(commands[0])
if len(commands) > 1:
for idx, cmd in enumerate(commands[1:]):
job += "command.{}={}".format(idx + 1, cmd)
return job
| Milano-master | milano/backends/azkaban_utils.py |
# Copyright (c) 2018 NVIDIA Corporation
from .aws_utils import EC2InstanceManager
from .base import Backend, JobStatus, RetrievingJobLogsError, \
GettingJobStatusError, KillingJobError, \
IsWorkerAvailableError, LaunchingJobError
class AWSJob:
def __init__(self, worker, container_id):
self._container_id = container_id
self._worker = worker
self._archived = False
self._archived_logs = ""
self._archived_exit_code = ""
def is_running(self):
if self._archived:
return False
try:
return self._exec("sudo docker inspect -f {{.State.Running}} " +
self._container_id).strip() == 'true'
except Exception as e:
# If something went wrong, assume it's not running anymore.
# Should probably retry here.
return False
def exit_code(self):
if self._archived:
return self._archived_exit_code
try:
return int(
self._exec("sudo docker inspect -f {{.State.ExitCode}} " +
self._container_id).strip())
except Exception as e:
# If something went wrong, assume it's not running anymore.
# Should probably retry here.
return -1
def logs(self):
if self._archived:
return self._archived_logs
return self._exec("sudo docker logs " + self._container_id)
def kill(self):
# It would be nice if we could just rely on docker to keep these, but we
# have tp prune old containers as we launch new ones, or the instance's EBS
# volume fills up really quickly.
try:
self._archived_exit_code = self.exit_code()
self._archived_logs = self.logs()
except:
self._archived_exit_code = -1
pass
self._archived = True
try:
self._exec("sudo docker kill " + self._container_id)
except Exception as e:
# It's fine if we fail to find the container to kill.
pass
self._worker = None
def _exec(self, command):
exit_code, stdout, stderr = self._worker.exec_command_blocking(command)
if exit_code != 0:
raise Exception(
"remote command failed with exit code {code}: {log}".format(
code=exit_code, log=stderr))
return stdout
class AWSBackend(Backend):
def __init__(self, script_to_run: str, config: dict) -> None:
super().__init__(script_to_run, config)
self._config = config
with open(script_to_run) as fin:
self._script_code = fin.read()
self._datasets = self._config.get('datasets', [])
self._instance_manager = EC2InstanceManager(
count=self.num_workers,
region_name=self._config.get('region_name', "us-west-2"),
key_name=self._config['key_name'],
private_key_path=self._config['private_key_path'],
spot_instances=self._config.get('spot_instances', False),
datasets=self._datasets,
iam_role=self._config.get('iam_role', None),
user_params=self._config.get('instance_params', {}),
)
self._instance_workers = {}
self._worker_instances = [None] * self.num_workers
self._worker_jobs = [-1] * self.num_workers
self._job_workers = {}
self._jobs = {}
self._job_index = 0
def _get_job(self, job_info: int) -> AWSJob:
job = self._jobs[job_info]
if job is None:
raise Exception("no active job for id {}".format(job_info))
return job
def get_job_status(self, job_info: int) -> JobStatus:
try:
job = self._get_job(job_info)
if job.is_running():
return JobStatus.RUNNING
elif job.exit_code() == 0:
return JobStatus.SUCCEEDED
else:
return JobStatus.FAILED
except Exception as e:
return GettingJobStatusError(
"failed to retrieve job status: {}".format(e))
def get_logs_for_job(self, job_info: int) -> str:
try:
job = self._get_job(job_info)
return job.logs()
except Exception as e:
print("error retrieving logs", e)
raise RetrievingJobLogsError("failed to retrieve logs: {}".format(e))
def kill_job(self, job_info: int) -> None:
job = self._get_job(job_info)
job.kill()
self._worker_jobs[self._job_workers[job_info]] = -1
del self._job_workers[job_info]
def _update_worker_instances(self):
# Since the backend API relies on these fixed worker ids, we need a
# dynamic mapping from those to the actual instances, which can be swapped
# in and out.
active_ids = self._instance_manager.active_instance_ids()
active_id_set = set(active_ids)
for i in range(self.num_workers):
if self._worker_instances[i] is not None:
if self._worker_instances[i] not in active_ids and self._worker_jobs[i] == -1:
# This worker is assigned to an inactive instance, and it has no job
# currently running. Free this slot.
del self._instance_workers[self._worker_instances[i]]
self._worker_instances[i] = None
for active_id in active_ids:
if active_id not in self._instance_workers:
# Try to assign a worker slot for this instance, since it's unassigned.
for i in range(self.num_workers):
if self._worker_instances[i] is None:
self._worker_instances[i] = active_id
self._instance_workers[active_id] = i
break
return active_id_set
def is_worker_available(self, worker_id: int) -> bool:
active_instances = self._update_worker_instances()
if self._worker_instances[worker_id] is None:
# This worker slot isn't assigned to an instance, unavailable.
return False
job_id = self._worker_jobs[worker_id]
if job_id == -1:
return self._worker_instances[worker_id] in active_instances
else:
try:
status = self.get_job_status(job_id)
except GettingJobStatusError as e:
raise IsWorkerAvailableError(e.message)
if status == JobStatus.RUNNING:
return False
else:
self.kill_job(job_id)
return self._worker_instances[worker_id] in active_instances
def _worker_exec(self, worker_id, command):
instance = self._instance_manager.get_instance(
self._worker_instances[worker_id])
exit_code, stdout, stderr = instance.exec_command_blocking(command)
if exit_code != 0:
raise Exception(
"remote command failed with exit code {code}: {log}".format(
code=exit_code, log=stderr))
return stdout
def launch_job(self, worker_id: int, params: str) -> int:
if not self.is_worker_available(worker_id):
raise LaunchingJobError("worker busy")
# The command to run inside the docker container.
command = "echo $'{script}' > start_exp.sh && chmod +x start_exp.sh && ./start_exp.sh {params}".format(
script=self._script_code.replace("'", "\\'").replace("\n", "\\n"),
params=params)
# Bind datasets.
volumes = ""
for i in range(len(self._datasets)):
volumes += "-v {src}:{dst} ".format(
src="/home/ubuntu/data/" + str(i),
dst=self._datasets[i]['mount'],
)
# The command for running the docker container.
docker_command = "sudo nvidia-docker run -d {volumes} {docker_image} /bin/bash -c $'{command}'".format(
volumes=volumes,
docker_image=self._config['docker_image_name'],
command=command.replace("'", "\\'"))
try:
# Currently we allow only one job per worker, so it's safe to kill any
# stragglers and purge.
self._worker_exec(worker_id,
"sudo docker kill $(sudo docker ps -q) || true")
self._worker_exec(worker_id, "sudo docker container prune -f")
job_id = self._job_index
self._job_index = self._job_index + 1
print("launching job", job_id, "on worker", worker_id)
# docker run -d returns the container id via stdout
container_id = self._worker_exec(worker_id, docker_command).strip()
instance = self._instance_manager.get_instance(
self._worker_instances[worker_id])
self._jobs[job_id] = AWSJob(instance, container_id)
except Exception as e:
raise LaunchingJobError("failed to launch job on worker: {}".format(
worker_id, e))
self._worker_jobs[worker_id] = job_id
self._job_workers[job_id] = worker_id
return job_id
@property
def num_workers(self) -> int:
return self._config['num_workers']
| Milano-master | milano/backends/aws.py |
# Copyright (c) 2018 NVIDIA Corporation
import paramiko
import time
class RemoteCommand:
"""Represents a command run via ssh"""
def __init__(self, channel):
self._channel = channel
self._stdout_buffer = ""
self._stderr_buffer = ""
self._exit_code = None
def is_running(self):
return self._exit_code is None and not self._channel.exit_status_ready()
def exit_code(self):
if self.is_running():
return None
if self._exit_code is None:
self._exit_code = self._channel.recv_exit_status()
return self._exit_code
def poll(self):
while self._channel.recv_ready():
chunk = self._channel.recv(1024 * 64)
self._stdout_buffer = self._stdout_buffer + chunk.decode("utf-8")
while self._channel.recv_stderr_ready():
chunk = self._channel.recv_stderr(1024 * 64)
self._stderr_buffer = self._stderr_buffer + chunk.decode("utf-8")
def stdout(self):
self.poll()
return self._stdout_buffer
def stderr(self):
self.poll()
return self._stderr_buffer
def close(self):
self._channel.close()
class SSHClient:
"""SSH client to run commands on the backend."""
def __init__(self, private_key_path):
self.private_key_path = private_key_path
self.client = paramiko.SSHClient()
def connect(self, address, username):
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(
address, username=username, key_filename=self.private_key_path)
def exec_command(self, command):
channel = self.client.get_transport().open_session()
channel.exec_command(command)
return RemoteCommand(channel)
def exec_command_blocking(self, command):
rc = self.exec_command(command)
while rc.is_running():
rc.poll()
time.sleep(0.1)
ec = rc.exit_code()
stdout = rc.stdout()
stderr = rc.stderr()
rc.close()
return ec, stdout, stderr
def close(self):
self.client.close() | Milano-master | milano/backends/utils.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
This module contains base Backend class and a number of exceptions that
backends can raise.
When defining new backends you must inherit from Backend class. Since it is
important for backend to be failure-safe, we define a number of "safe"
exceptions here which all inherit from BackendError and should be raise by
corresponding Backend functions when "safe" error occurs. By that we mean
errors that should not stop execution of ExecutionManager, but rather be
treated as normal failures and require retrying to run the command that raised
the exception. All other exceptions will not be handled and will generally
break the normal flow of ExecutionManager.
"""
import abc
import six
from enum import Enum
class BackendError(Exception):
"""Base class for exceptions in this module."""
pass
class RetrievingJobLogsError(BackendError):
"""Exception raised for errors occurring while retrieving job logs."""
def __init__(self, message):
self.message = message
class GettingJobStatusError(BackendError):
"""Exception raised for errors occurring while getting job status."""
def __init__(self, message):
self.message = message
class KillingJobError(BackendError):
"""Exception raised for errors occurring while trying to kill job."""
def __init__(self, message):
self.message = message
class LaunchingJobError(BackendError):
"""Exception raised for errors occurring while trying to launch a job."""
def __init__(self, message):
self.message = message
class IsWorkerAvailableError(BackendError):
"""Exception raised for errors occurring while trying to check
if worker is available.
"""
def __init__(self, message):
self.message = message
class JobStatus(Enum):
RUNNING = 0
SUCCEEDED = 1
FAILED = 2
PENDING = 3
KILLED = 4
NOTFOUND = 5
UNKNOWN = 6
@six.add_metaclass(abc.ABCMeta)
class Backend:
def __init__(self, script_to_run: str, workers_config: object) -> None:
self._script_to_run = script_to_run
self._workers_config = workers_config
@abc.abstractmethod
def get_job_status(self, job_info: object) -> JobStatus:
"""This method should take the ``job_info`` as returned from
``self.launch_job`` and return correct JobStatus for that job.
"""
pass
@abc.abstractmethod
def get_logs_for_job(self, job_info: object) -> str:
"""This method should take the ``job_info`` as returned from
``self.launch_job`` and return job logs or raise ``RetrievingLogsError``
exception if something goes wrong. If exception is raised, ExecutionManager
will retry getting logs a few times and indicate that the job failed if
still unsuccessful.
"""
pass
@abc.abstractmethod
def kill_job(self, job_info: object) -> None:
"""This method should kill the job, identified with ``job_info``.
``job_info`` is returned from ``self._launch_job``.
"""
pass
@abc.abstractmethod
def is_worker_available(self, worker_id: int) -> bool:
"""This method takes worker id and should return whether there are any jobs
running on that worker. ``self.launch_job`` will only be executed on a
worker that returned True from this method.
"""
pass
@abc.abstractmethod
def launch_job(self, worker_id: int, params: str) -> object:
"""This method should start a new job on a worker <worker_id> with
parameters specified with ``params`` string. This method does not need to
check if the worker is available since this method is only called after
getting True from ``self.is_worker_available`` function.
"""
pass
@property
@abc.abstractmethod
def num_workers(self) -> int:
"""Total number of workers available."""
pass
| Milano-master | milano/backends/base.py |
# Copyright (c) 2017 NVIDIA Corporation
from .random_search import RandomSearch
from .gp.gp_search import GPSearch
| Milano-master | milano/search_algorithms/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
import numpy as np
from typing import Iterable, Mapping, Optional
from .base import SearchAlgorithm
class RandomSearch(SearchAlgorithm):
def _sample_params(self) -> Mapping:
"""Generates new parameters by random sampling."""
sampled_params = {}
for pm_name, pm_dict in self._params_to_tune.items():
if pm_dict["type"] == "range":
sampled_params[pm_name] = np.random.uniform(
pm_dict["min"], pm_dict["max"],
)
if pm_dict["type"] == "log_range":
sampled_params[pm_name] = np.exp(np.random.uniform(
np.log(pm_dict["min"]), np.log(pm_dict["max"]),
))
if pm_dict["type"] == "values":
sampled_params[pm_name] = np.random.choice(pm_dict["values"])
return sampled_params
def gen_initial_params(self) -> Iterable[Mapping]:
"""Generate all parameters here as all evaluations are independent
from each other.
"""
init_params = super().gen_initial_params()
params = []
for _ in range(self._num_evals):
params.append(self._sample_params())
if init_params is not None:
return init_params + params
else:
return params
def gen_new_params(self,
result: float,
params: Mapping,
evaluation_succeeded: bool) -> Iterable[Optional[Mapping]]:
"""Returning None to signal stop."""
return [None]
| Milano-master | milano/search_algorithms/random_search.py |
# Copyright (c) 2017 NVIDIA Corporation
import abc
import six
import numpy as np
from typing import Iterable, Mapping, Optional
@six.add_metaclass(abc.ABCMeta)
class SearchAlgorithm:
"""All search algorithms in MLQuest must inherit from this."""
def __init__(self,
params_to_tune: Mapping,
params_to_try_first: Mapping,
objective: str,
num_evals: int,
random_seed: int = None) -> None:
"""Base SearchAlgorithm constructor.
Args:
params_to_tune (dict): dictionary with parameters that need to be tuned.
Example::
{
"x1": {"type": "range", "min": 0.0, "max": 100.0},
"x2": {"type": "log_range", "min": 1e-7, "max": 1.0},
"color": {"type": "values", "values": ["red", "green", "blue"]},
}
Supported types are:
* "range": this parameter should be sampled uniformly from
min to max values.
* "log_range": this parameter should be sampled "logarithmically"
from min to max values. This means that a uniform value will be
sampled between [log(min), log(max)] and then it will be
exponentiated.
* "values": this parameter can be one of the supplied values.
params_to_try_first (dict): dictionary with configurations to try first
objective (string): "minimize" or "maximize", case insensitive.
num_evals (int): maximum number of evaluations that the algorithm can do.
random_seed (int): random seed to use.
"""
for pm_name, pm_dict in params_to_tune.items():
if "type" not in pm_dict:
raise ValueError('"type" has to be specified for each '
'parameter, not found for "{}"'.format(pm_name))
if pm_dict["type"] not in ["range", "log_range", "values"]:
raise ValueError(
'Unsupported type: "{}" for "{}". '.format(pm_dict["type"], pm_name) +
'type has to be "range", "log_range" or "values"'
)
if pm_dict["type"] == "range" or pm_dict["type"] == "log_range":
if "min" not in pm_dict:
raise ValueError(
'"min" value has to be specified for parameter {}'.format(pm_name)
)
if "max" not in pm_dict:
raise ValueError(
'"max" value has to be specified for parameter {}'.format(pm_name)
)
if pm_dict["type"] == "log_range":
if pm_dict["min"] <= 0:
raise ValueError('"min" value has to be positive '
'when type is "log_range"')
if pm_dict["max"] <= 0:
raise ValueError('"ma" value has to be positive '
'when type is "log_range"')
if pm_dict["type"] == "values":
if "values" not in pm_dict:
raise ValueError(
'"values" has to be specified for parameter {}'.format(pm_name)
)
if len(pm_dict["values"]) == 0:
raise ValueError("No values specified for {}".format(pm_name))
self._params_to_tune = params_to_tune
self._params_to_try_first = params_to_try_first
if self._params_to_try_first is not None:
# TODO check for format correctness
self._pre_configs_counter = len(next(iter(self._params_to_try_first.values())))
else:
self._pre_configs_counter = -1
self._num_evals = num_evals
if objective.lower() not in ["minimize", "maximize"]:
raise ValueError(
'Objective has to be "minimize" or "maximize", '
'but "{}" was provided'.format(objective)
)
self._objective = objective.lower()
self._random_seed = random_seed
np.random.seed(self._random_seed)
#@abc.abstractmethod
def gen_initial_params(self) -> Iterable[Mapping]:
"""This method should return all initial parameters to start the tuning.
Returns:
list of dicts: [{param_name: param_value, ...}, ...]
"""
if self._params_to_try_first is not None:
user_pre_specified_experiments = []
count = len(
next(iter(self._params_to_try_first.values())))
for ind in range(0, count):
one_experiment = {}
for key, value in self._params_to_try_first.items():
one_experiment[key] = value[ind]
user_pre_specified_experiments.append(one_experiment)
return user_pre_specified_experiments
else:
return None
@abc.abstractmethod
def gen_new_params(self,
result: float,
params: Mapping,
evaluation_succeeded: bool) -> Iterable[Optional[Mapping]]:
"""This method should return new parameters to evaluate
based on the last retrieved result.
To indicate that the search is over (which usually happens when
`self._num_evals` values have been tried), the method should return
`None` instead of dictionary with function parameters.
Args:
result (float): the value of the function being optimized.
params (dict): parameters, describing the point at which function was
evaluated. This is the same dictionary as was returned from
`self.gen_initial_params` or `self.gen_new_params`.
evaluation_succeeded (bool): whether the evaluation was successful.
In big experiments some jobs evaluating the function might fail for
various reasons. In this case it is up for the algorithm to decide
if the failed point need to be re-evaluated or if there are more
promising points to focus on.
Returns:
list of dicts: [{param_name: param_value, ...}, ...]
"""
pass
| Milano-master | milano/search_algorithms/base.py |
Milano-master | milano/search_algorithms/gp/__init__.py |
|
# Copyright (c) 2017 NVIDIA Corporation
# This is wrapper around spearmint library. Please note that it imports code licensed under GPL v3.
import numpy as np
import collections
from typing import Iterable, Mapping, Optional
from milano.search_algorithms.base import SearchAlgorithm
from milano.search_algorithms.gp.spearmint.gpei_chooser import GPEIChooser
from milano.search_algorithms.gp.spearmint.utils import GridMap
def hash_dict(dct):
return " ".join("{}={}".format(key, val) for key, val in sorted(dct.items()))
class GPSearch(SearchAlgorithm):
CANDIDATE_STATUS = 0
PENDING_STATUS = 1
COMPLETE_STATUS = 2
def __init__(self,
params_to_tune: Mapping,
params_to_try_first: Mapping,
objective: str,
num_evals: int,
random_seed: int = None,
chooser=None,
chooser_params: Mapping = None,
num_init_jobs=1,
num_jobs_to_launch_each_time=1,
grid_size=1000,
smooth_inf_to=1e7) -> None:
super().__init__(params_to_tune, params_to_try_first,
objective, num_evals, random_seed)
self._num_init_jobs = num_init_jobs
self._num_jobs_to_launch_each_time = num_jobs_to_launch_each_time
self._fixed_params = {}
self._smooth_inf_to = smooth_inf_to
params = []
self._pm_names = []
for pm_name, pm_dict in self._params_to_tune.items():
if pm_dict["type"] == "range":
self._pm_names.append(pm_name)
params.append(collections.OrderedDict([
('name', pm_name),
('type', 'float'),
('min', pm_dict["min"]),
('max', pm_dict["max"]),
('size', 1)
]))
elif pm_dict["type"] == "log_range":
self._pm_names.append(pm_name)
params.append(collections.OrderedDict([
('name', pm_name),
('type', 'log_float'),
('min', pm_dict["min"]),
('max', pm_dict["max"]),
('size', 1)
]))
elif pm_dict["type"] == "values":
if len(pm_dict["values"]) > 1:
self._pm_names.append(pm_name)
params.append(collections.OrderedDict([
('name', pm_name),
('type', 'enum'),
('options', pm_dict["values"]),
('size', 1)
]))
else:
self._fixed_params[pm_name] = pm_dict["values"][0]
if chooser is None:
self._chooser = GPEIChooser(noiseless=True)
else:
if chooser_params is None:
chooser_params = {}
self._chooser = chooser(**chooser_params)
self._gmap = GridMap(params, grid_size)
# has to be explicitly set to number fo Sobol sequence
if random_seed is None:
random_seed = np.random.randint(100000)
self._grid = self._gmap.hypercube_grid(grid_size, random_seed)
self._values = np.zeros(grid_size) + np.inf
self._durations = np.zeros(grid_size) + np.inf
self._status = np.zeros(grid_size) + GPSearch.CANDIDATE_STATUS
self._params_to_id = {}
self._evals_count = 0
def _add_to_grid(self, candidate):
# Checks to prevent numerical over/underflow from corrupting the grid
candidate[candidate > 1.0] = 1.0
candidate[candidate < 0.0] = 0.0
# Set up the grid
self._grid = np.vstack((self._grid, candidate))
self._status = np.append(
self._status,
np.zeros(1, dtype=int) + GPSearch.CANDIDATE_STATUS,
)
self._values = np.append(self._values, np.zeros(1) + np.inf)
self._durations = np.append(self._durations, np.zeros(1) + np.inf)
return self._grid.shape[0] - 1
def _get_new_point(self) -> Mapping:
job_id = self._chooser.next(
self._grid, self._values, self._durations,
np.nonzero(self._status == GPSearch.CANDIDATE_STATUS)[0],
np.nonzero(self._status == GPSearch.PENDING_STATUS)[0],
np.nonzero(self._status == GPSearch.COMPLETE_STATUS)[0],
)
# spearmint can return tuple when it decides to add new points to the grid
if isinstance(job_id, tuple):
(job_id, candidate) = job_id
job_id = self._add_to_grid(candidate)
candidate = self._grid[job_id]
self._status[job_id] = GPSearch.PENDING_STATUS
cur_params = dict(zip(self._pm_names, self._gmap.unit_to_list(candidate)))
cur_params.update(self._fixed_params)
# if we ever generate same parameters again, want to remember all of them
# and then take arbitrary grid id, since they all will point to the same
# point in our search space
pm_hash = hash_dict(cur_params)
if pm_hash not in self._params_to_id:
self._params_to_id[pm_hash] = []
self._params_to_id[pm_hash].append(job_id)
self._evals_count += 1
return cur_params
def gen_initial_params(self) -> Iterable[Mapping]:
init_params = super().gen_initial_params()
params = []
for _ in range(min(self._num_evals, self._num_init_jobs)):
params.append(self._get_new_point())
if init_params is not None:
return init_params + params
else:
return params
def gen_new_params(self,
result: float,
params: Mapping,
evaluation_succeeded: bool) -> Iterable[Optional[Mapping]]:
if self._evals_count == self._num_evals:
return [None]
idx = self._params_to_id[hash_dict(params)].pop()
if evaluation_succeeded:
self._status[idx] = GPSearch.COMPLETE_STATUS
if self._objective == "maximize":
result = -result
# smoothing out infinities that can arise from constraints failure
if np.isinf(result):
result = self._smooth_inf_to
self._values[idx] = result
else:
# if not succeeded, marking point as a potential candidate again
self._status[idx] = GPSearch.CANDIDATE_STATUS
params = []
for _ in range(self._num_jobs_to_launch_each_time):
params.append(self._get_new_point())
return params
| Milano-master | milano/search_algorithms/gp/gp_search.py |
# This code was modified to be compatible with NVAML project
import math
from numpy import *
def i4_bit_hi1 ( n ):
#*****************************************************************************80
#
## I4_BIT_HI1 returns the position of the high 1 bit base 2 in an integer.
#
# Example:
#
# N Binary BIT
# ---- -------- ----
# 0 0 0
# 1 1 1
# 2 10 2
# 3 11 2
# 4 100 3
# 5 101 3
# 6 110 3
# 7 111 3
# 8 1000 4
# 9 1001 4
# 10 1010 4
# 11 1011 4
# 12 1100 4
# 13 1101 4
# 14 1110 4
# 15 1111 4
# 16 10000 5
# 17 10001 5
# 1023 1111111111 10
# 1024 10000000000 11
# 1025 10000000001 11
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative. If N is nonpositive, the value will always be 0.
#
# Output, integer BIT, the number of bits base 2.
#
i = math.floor ( n )
bit = 0
while ( 1 ):
if ( i <= 0 ):
break
bit += 1
i = math.floor ( i / 2. )
return bit
def i4_bit_lo0 ( n ):
#*****************************************************************************80
#
## I4_BIT_LO0 returns the position of the low 0 bit base 2 in an integer.
#
# Example:
#
# N Binary BIT
# ---- -------- ----
# 0 0 1
# 1 1 2
# 2 10 1
# 3 11 3
# 4 100 1
# 5 101 2
# 6 110 1
# 7 111 4
# 8 1000 1
# 9 1001 2
# 10 1010 1
# 11 1011 3
# 12 1100 1
# 13 1101 2
# 14 1110 1
# 15 1111 5
# 16 10000 1
# 17 10001 2
# 1023 1111111111 1
# 1024 10000000000 1
# 1025 10000000001 1
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative.
#
# Output, integer BIT, the position of the low 1 bit.
#
bit = 0
i = math.floor ( n )
while ( 1 ):
bit = bit + 1
i2 = math.floor ( i / 2. )
if ( i == 2 * i2 ):
break
i = i2
return bit
def i4_sobol_generate ( m, n, skip ):
#*****************************************************************************80
#
## I4_SOBOL_GENERATE generates a Sobol dataset.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of points to generate.
#
# Input, integer SKIP, the number of initial points to skip.
#
# Output, real R(M,N), the points.
#
r=zeros((m,n))
for j in range (1, n+1):
seed = skip + j - 2
[ r[0:m,j-1], seed ] = i4_sobol ( m, seed )
return r
def i4_sobol ( dim_num, seed ):
#*****************************************************************************80
#
## I4_SOBOL generates a new quasirandom Sobol vector with each call.
#
# Discussion:
#
# The routine adapts the ideas of Antonov and Saleev.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original FORTRAN77 version by Bennett Fox.
# MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Reference:
#
# Antonov, Saleev,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 19, 1980, pages 252 - 256.
#
# Paul Bratley, Bennett Fox,
# Algorithm 659:
# Implementing Sobol's Quasirandom Sequence Generator,
# ACM Transactions on Mathematical Software,
# Volume 14, Number 1, pages 88-100, 1988.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, pages 362-376, 1986.
#
# Ilya Sobol,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 16, pages 236-242, 1977.
#
# Ilya Sobol, Levitan,
# The Production of Points Uniformly Distributed in a Multidimensional
# Cube (in Russian),
# Preprint IPM Akad. Nauk SSSR,
# Number 40, Moscow 1976.
#
# Parameters:
#
# Input, integer DIM_NUM, the number of spatial dimensions.
# DIM_NUM must satisfy 1 <= DIM_NUM <= 40.
#
# Input/output, integer SEED, the "seed" for the sequence.
# This is essentially the index in the sequence of the quasirandom
# value to be generated. On output, SEED has been set to the
# appropriate next value, usually simply SEED+1.
# If SEED is less than 0 on input, it is treated as though it were 0.
# An input value of 0 requests the first (0-th) element of the sequence.
#
# Output, real QUASI(DIM_NUM), the next quasirandom vector.
#
global atmost
global dim_max
global dim_num_save
global initialized
global lastq
global log_max
global maxcol
global poly
global recipd
global seed_save
global v
if ( not 'initialized' in globals().keys() ):
initialized = 0
dim_num_save = -1
if ( not initialized or dim_num != dim_num_save ):
initialized = 1
dim_max = 40
dim_num_save = -1
log_max = 30
seed_save = -1
#
# Initialize (part of) V.
#
v = zeros((dim_max,log_max))
v[0:40,0] = transpose([ \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ])
v[2:40,1] = transpose([ \
1, 3, 1, 3, 1, 3, 3, 1, \
3, 1, 3, 1, 3, 1, 1, 3, 1, 3, \
1, 3, 1, 3, 3, 1, 3, 1, 3, 1, \
3, 1, 1, 3, 1, 3, 1, 3, 1, 3 ])
v[3:40,2] = transpose([ \
7, 5, 1, 3, 3, 7, 5, \
5, 7, 7, 1, 3, 3, 7, 5, 1, 1, \
5, 3, 3, 1, 7, 5, 1, 3, 3, 7, \
5, 1, 1, 5, 7, 7, 5, 1, 3, 3 ])
v[5:40,3] = transpose([ \
1, 7, 9,13,11, \
1, 3, 7, 9, 5,13,13,11, 3,15, \
5, 3,15, 7, 9,13, 9, 1,11, 7, \
5,15, 1,15,11, 5, 3, 1, 7, 9 ])
v[7:40,4] = transpose([ \
9, 3,27, \
15,29,21,23,19,11,25, 7,13,17, \
1,25,29, 3,31,11, 5,23,27,19, \
21, 5, 1,17,13, 7,15, 9,31, 9 ])
v[13:40,5] = transpose([ \
37,33, 7, 5,11,39,63, \
27,17,15,23,29, 3,21,13,31,25, \
9,49,33,19,29,11,19,27,15,25 ])
v[19:40,6] = transpose([ \
13, \
33,115, 41, 79, 17, 29,119, 75, 73,105, \
7, 59, 65, 21, 3,113, 61, 89, 45,107 ])
v[37:40,7] = transpose([ \
7, 23, 39 ])
#
# Set POLY.
#
poly= [ \
1, 3, 7, 11, 13, 19, 25, 37, 59, 47, \
61, 55, 41, 67, 97, 91, 109, 103, 115, 131, \
193, 137, 145, 143, 241, 157, 185, 167, 229, 171, \
213, 191, 253, 203, 211, 239, 247, 285, 369, 299 ]
atmost = 2**log_max - 1
#
# Find the number of bits in ATMOST.
#
maxcol = i4_bit_hi1 ( atmost )
#
# Initialize row 1 of V.
#
v[0,0:maxcol] = 1
#
# Things to do only if the dimension changed.
#
if ( dim_num != dim_num_save ):
#
# Check parameters.
#
if ( dim_num < 1 or dim_max < dim_num ):
print('I4_SOBOL - Fatal error!')
print(' The spatial dimension DIM_NUM should satisfy:' )
print(' 1 <= DIM_NUM <= %d'%dim_max)
print(' But this input value is DIM_NUM = %d'%dim_num)
return
dim_num_save = dim_num
#
# Initialize the remaining rows of V.
#
for i in range(2 , dim_num+1):
#
# The bits of the integer POLY(I) gives the form of polynomial I.
#
# Find the degree of polynomial I from binary encoding.
#
j = poly[i-1]
m = 0
while ( 1 ):
j = math.floor ( j / 2. )
if ( j <= 0 ):
break
m = m + 1
#
# Expand this bit pattern to separate components of the logical array INCLUD.
#
j = poly[i-1]
includ=zeros(m)
for k in range(m, 0, -1):
j2 = math.floor ( j / 2. )
includ[k-1] = (j != 2 * j2 )
j = j2
#
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
#
for j in range( m+1, maxcol+1 ):
newv = v[i-1,j-m-1]
l = 1
for k in range(1, m+1):
l = 2 * l
if ( includ[k-1] ):
newv = bitwise_xor ( int(newv), int(l * v[i-1,j-k-1]) )
v[i-1,j-1] = newv
#
# Multiply columns of V by appropriate power of 2.
#
l = 1
for j in range( maxcol-1, 0, -1):
l = 2 * l
v[0:dim_num,j-1] = v[0:dim_num,j-1] * l
#
# RECIPD is 1/(common denominator of the elements in V).
#
recipd = 1.0 / ( 2 * l )
lastq=zeros(dim_num)
seed = int(math.floor ( seed ))
if ( seed < 0 ):
seed = 0
if ( seed == 0 ):
l = 1
lastq=zeros(dim_num)
elif ( seed == seed_save + 1 ):
#
# Find the position of the right-hand zero in SEED.
#
l = i4_bit_lo0 ( seed )
elif ( seed <= seed_save ):
seed_save = 0
l = 1
lastq=zeros(dim_num)
for seed_temp in range( int(seed_save), int(seed)):
l = i4_bit_lo0 ( seed_temp )
for i in range(1 , dim_num+1):
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
l = i4_bit_lo0 ( seed )
elif ( seed_save + 1 < seed ):
for seed_temp in range( int(seed_save + 1), int(seed) ):
l = i4_bit_lo0 ( seed_temp )
for i in range(1, dim_num+1):
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
l = i4_bit_lo0 ( seed )
#
# Check that the user is not calling too many times!
#
if ( maxcol < l ):
print('I4_SOBOL - Fatal error!')
print(' Too many calls!')
print(' MAXCOL = %d\n'%maxcol)
print(' L = %d\n'%l)
return
#
# Calculate the new components of QUASI.
#
quasi=zeros(dim_num)
for i in range( 1, dim_num+1):
quasi[i-1] = lastq[i-1] * recipd
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
seed_save = seed
seed = seed + 1
return [ quasi, seed ]
def i4_uniform ( a, b, seed ):
#*****************************************************************************80
#
## I4_UNIFORM returns a scaled pseudorandom I4.
#
# Discussion:
#
# The pseudorandom number will be scaled to be uniformly distributed
# between A and B.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Springer Verlag, pages 201-202, 1983.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley Interscience, page 95, 1998.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, pages 362-376, 1986.
#
# Peter Lewis, Allen Goodman, James Miller
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, pages 136-143, 1969.
#
# Parameters:
#
# Input, integer A, B, the minimum and maximum acceptable values.
#
# Input, integer SEED, a seed for the random number generator.
#
# Output, integer C, the randomly chosen integer.
#
# Output, integer SEED, the updated seed.
#
if ( seed == 0 ):
print('I4_UNIFORM - Fatal error!')
print(' Input SEED = 0!')
seed = math.floor ( seed )
a = round ( a )
b = round ( b )
seed = mod ( seed, 2147483647 )
if ( seed < 0 ) :
seed = seed + 2147483647
k = math.floor ( seed / 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
if ( seed < 0 ):
seed = seed + 2147483647
r = seed * 4.656612875E-10
#
# Scale R to lie between A-0.5 and B+0.5.
#
r = ( 1.0 - r ) * ( min ( a, b ) - 0.5 ) + r * ( max ( a, b ) + 0.5 )
#
# Use rounding to convert R to an integer between A and B.
#
value = round ( r )
value = max ( value, min ( a, b ) )
value = min ( value, max ( a, b ) )
c = value
return [ int(c), int(seed) ]
def prime_ge ( n ):
#*****************************************************************************80
#
## PRIME_GE returns the smallest prime greater than or equal to N.
#
#
# Example:
#
# N PRIME_GE
#
# -10 2
# 1 2
# 2 2
# 3 3
# 4 5
# 5 5
# 6 7
# 7 7
# 8 11
# 9 11
# 10 11
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer N, the number to be bounded.
#
# Output, integer P, the smallest prime number that is greater
# than or equal to N.
#
p = max ( math.ceil ( n ), 2 )
while ( not isprime ( p ) ):
p = p + 1
return p
def isprime(n):
#*****************************************************************************80
#
## IS_PRIME returns True if N is a prime number, False otherwise
#
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Corrado Chisari
#
# Parameters:
#
# Input, integer N, the number to be checked.
#
# Output, boolean value, True or False
#
if n!=int(n) or n<1:
return False
p=2
while p<n:
if n%p==0:
return False
p+=1
return True
| Milano-master | milano/search_algorithms/gp/spearmint/sobol_lib.py |
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
"""
Chooser module for the Gaussian process expected improvement
acquisition function. Candidates are sampled densely in the unit
hypercube and then the highest EI point is selected. Slice sampling
is used to sample Gaussian process hyperparameters for the GP.
"""
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
from . import gp
from .utils import slice_sample
class GPEIChooser:
def __init__(self, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False):
self.cov_func = getattr(gp, covar)
self.mcmc_iters = int(mcmc_iters)
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
self.noiseless = bool(int(noiseless))
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
def _real_init(self, dims, values):
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values) + 1e-4
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6 * np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
def next(self, grid, values, durations, candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete])
# Grab out the relevant sets.
comp = grid[complete, :]
cand = grid[candidates, :]
pend = grid[pending, :]
vals = values[complete]
if self.mcmc_iters > 0:
# Sample from hyperparameters.
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in range(self.mcmc_iters):
self.sample_hypers(comp, vals)
overall_ei[:, mcmc_iter] = self.compute_ei(comp, pend, cand, vals)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
return int(candidates[best_cand])
else:
# Optimize hyperparameters
try:
self.optimize_hypers(comp, vals)
except:
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(vals)
# Initial observation noise.
self.noise = 1e-3
ei = self.compute_ei(comp, pend, cand, vals)
best_cand = np.argmax(ei)
return int(candidates[best_cand])
def compute_ei(self, comp, pend, cand, vals):
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(comp)
cand_cross = self.cov(comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise * np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2 * (1 + 1e-6) - np.sum(beta ** 2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s * (u * ncdf + npdf)
return ei
else:
# If there are pending experiments, fantasize their outcomes.
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = self.cov(comp_pend) + self.noise * np.eye(
comp_pend.shape[0])
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(comp, pend)
pend_kappa = self.cov(pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0], :comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
pend_fant = (
np.dot(pend_chol, npr.randn(pend.shape[0], self.pending_samples))
+ pend_m[:, None])
# Include the fantasies.
fant_vals = np.concatenate((np.tile(vals[:, np.newaxis],
(1, self.pending_samples)),
pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True), fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2 * (1 + 1e-6) - np.sum(beta ** 2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:, np.newaxis])
u = (bests[np.newaxis, :] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s * (u * ncdf + npdf)
return np.mean(ei, axis=1)
def sample_hypers(self, comp, vals):
if self.noiseless:
self.noise = 1e-3
self._sample_noiseless(comp, vals)
else:
self._sample_noisy(comp, vals)
self._sample_ls(comp, vals)
def _sample_ls(self, comp, vals):
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.max_ls):
return -np.inf
cov = self.amp2 * (self.cov_func(ls, comp, None) + 1e-6 * np.eye(
comp.shape[0])) + self.noise * np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - self.mean)
lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - self.mean,
solve)
return lp
self.ls = slice_sample(self.ls, logprob, compwise=True)
def _sample_noisy(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = hypers[2]
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0 or noise < 0:
return -np.inf
cov = amp2 * (self.cov_func(self.ls, comp, None) +
1e-6 * np.eye(comp.shape[0])) + noise * np.eye(
comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - mean, solve)
# Roll in noise horseshoe prior.
lp += np.log(np.log(1 + (self.noise_scale / noise) ** 2))
# Roll in amplitude lognormal prior
lp -= 0.5 * (np.log(amp2) / self.amp2_scale) ** 2
return lp
hypers = slice_sample(np.array([self.mean, self.amp2, self.noise]),
logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = hypers[2]
def _sample_noiseless(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = 1e-3
if amp2 < 0:
return -np.inf
cov = amp2 * (self.cov_func(self.ls, comp, None) +
1e-6 * np.eye(comp.shape[0])) + noise * np.eye(
comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - mean, solve)
# Roll in amplitude lognormal prior
lp -= 0.5 * (np.log(amp2) / self.amp2_scale) ** 2
return lp
hypers = slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob,
compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = 1e-3
def optimize_hypers(self, comp, vals):
mygp = gp.GP(self.cov_func.__name__)
mygp.real_init(comp.shape[1], vals)
mygp.optimize_hypers(comp, vals)
self.mean = mygp.mean
self.ls = mygp.ls
self.amp2 = mygp.amp2
self.noise = mygp.noise | Milano-master | milano/search_algorithms/gp/spearmint/gpei_chooser.py |
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
"""
gp.py contains utility functions related to computation in Gaussian processes.
"""
import numpy as np
import scipy.linalg as spla
import scipy.optimize as spo
SQRT_3 = np.sqrt(3.0)
SQRT_5 = np.sqrt(5.0)
def dist2(ls, x1, x2=None):
# Assumes NxD and MxD matrices.
# Compute the squared distance matrix, given length scales.
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
r2 = np.maximum(-(np.dot(xx1, 2*xx2.T)
- np.sum(xx1*xx1, axis=1)[:,np.newaxis]
- np.sum(xx2*xx2, axis=1)[:,np.newaxis].T), 0.0)
return r2
def grad_dist2(ls, x1, x2=None):
if x2 is None:
x2 = x1
# Rescale.
x1 = x1 / ls
x2 = x2 / ls
return (x1[:, np.newaxis, :] - x2[np.newaxis, :, :]) * 2.0 / ls
def SE(ls, x1, x2=None, grad=False):
ls = np.ones(ls.shape)
cov = np.exp(-0.5 * dist2(ls, x1, x2))
if grad:
return (cov, grad_ARDSE(ls, x1, x2))
else:
return cov
def ARDSE(ls, x1, x2=None, grad=False):
cov = np.exp(-0.5 * dist2(ls, x1, x2))
if grad:
return (cov, grad_ARDSE(ls, x1, x2))
else:
return cov
def grad_ARDSE(ls, x1, x2=None):
r2 = dist2(ls, x1, x2)
r = np.sqrt(r2)
return -0.5*np.exp(-0.5*r2)[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
def Matern32(ls, x1, x2=None, grad=False):
r = np.sqrt(dist2(ls, x1, x2))
cov = (1 + SQRT_3*r) * np.exp(-SQRT_3*r)
if grad:
return (cov, grad_Matern32(ls, x1, x2))
else:
return cov
def grad_Matern32(ls, x1, x2=None):
r = np.sqrt(dist2(ls, x1, x2))
grad_r2 = -1.5*np.exp(-SQRT_3*r)
return grad_r2[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
def Matern52(ls, x1, x2=None, grad=False):
r2 = np.abs(dist2(ls, x1, x2))
r = np.sqrt(r2)
cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)
if grad:
return (cov, grad_Matern52(ls, x1, x2))
else:
return cov
def grad_Matern52(ls, x1, x2=None):
r = np.sqrt(dist2(ls, x1, x2))
grad_r2 = -(5.0/6.0)*np.exp(-SQRT_5*r)*(1 + SQRT_5*r)
return grad_r2[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
class GP:
def __init__(self, covar="Matern52", mcmc_iters=10, noiseless=False):
self.cov_func = globals()[covar]
self.mcmc_iters = int(mcmc_iters)
self.D = -1
self.hyper_iters = 1
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
def real_init(self, dims, values):
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
def logprob(self, comp, vals):
mean = self.mean
amp2 = self.amp2
noise = self.noise
cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
return lp
def optimize_hypers(self, comp, vals):
self.mean = np.mean(vals)
diffs = vals - self.mean
state = { }
def jitter_chol(covmat):
passed = False
jitter = 1e-8
val = 0
while not passed:
if (jitter > 100000):
val = spla.cholesky(np.eye(covmat.shape[0]))
break
try:
val = spla.cholesky(covmat +
jitter*np.eye(covmat.shape[0]), lower=True)
passed = True
except ValueError:
jitter = jitter*1.1
print("Covariance matrix not PSD, adding jitter:", jitter)
passed = False
return val
def memoize(amp2, noise, ls):
if ( 'corr' not in state
or state['amp2'] != amp2
or state['noise'] != noise
or np.any(state['ls'] != ls)):
# Get the correlation matrix
(corr, grad_corr) = self.cov_func(ls, comp, None, grad=True)
# Scale and add noise & jitter.
covmat = (amp2 * (corr + 1e-6*np.eye(comp.shape[0]))
+ noise * np.eye(comp.shape[0]))
# Memoize
state['corr'] = corr
state['grad_corr'] = grad_corr
state['chol'] = jitter_chol(covmat)
state['amp2'] = amp2
state['noise'] = noise
state['ls'] = ls
return (state['chol'], state['corr'], state['grad_corr'])
def nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol = memoize(amp2, noise, ls)[0]
solve = spla.cho_solve((chol, True), diffs)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(diffs, solve)
return -lp
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in range(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(vals)
# Initial observation noise.
self.noise = 1e-3
hypers = np.zeros(self.ls.shape[0]+2)
hypers[0] = np.log(self.amp2)
hypers[1] = np.log(self.noise)
hypers[2:] = np.log(self.ls)
# Use a bounded bfgs just to prevent the length-scales and noise from
# getting into regions that are numerically unstable
b = [(-10,10),(-10,10)]
for i in range(comp.shape[1]):
b.append((-10,5))
hypers = spo.fmin_l_bfgs_b(nlogprob, hypers, grad_nlogprob, args=(), bounds=b, disp=0)
#hypers = spo.fmin_bfgs(nlogprob, hypers, grad_nlogprob, maxiter=100)
hypers = hypers[0]
#hypers = spo.fmin_bfgs(nlogprob, hypers, grad_nlogprob, maxiter=100)
self.amp2 = np.exp(hypers[0])
self.noise = np.exp(hypers[1])
self.ls = np.exp(hypers[2:])
def main():
try:
import matplotlib.pyplot as plt
except:
pass
# Let's start with some random values
x = np.linspace(0,1,10)[:,np.newaxis]*10#np.random.rand(100)[:,np.newaxis]
y = np.random.randn(10)
mygp = GP(covar='ARDSE')
mygp.real_init(x.shape[1], y)
# Sample some functions given these hyperparameters and plot them
for i in range(0,5):
x = np.linspace(0,1,100)[:,np.newaxis]*10
K = mygp.cov(x)
y = np.random.randn(100)
fsamp = mygp.mean + np.dot(spla.cholesky(K).transpose(), y)
try:
plt.plot(x, fsamp)
except:
pass
print('Loglikelihood before optimizing: ', mygp.logprob(x,y))
mygp.optimize_hypers(x,y)
print('Loglikelihood after optimizing: ', mygp.logprob(x,y))
try:
plt.show()
except:
print('Install matplotlib to get figures')
if __name__ == '__main__':
main()
| Milano-master | milano/search_algorithms/gp/spearmint/gp.py |
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
"""
Chooser module for the Gaussian process expected improvement (EI)
acquisition function where points are sampled densely in the unit
hypercube and then a subset of the points are optimized to maximize EI
over hyperparameter samples. Slice sampling is used to sample
Gaussian process hyperparameters.
"""
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
import scipy.optimize as spo
import multiprocessing
import time
import copy
from . import gp
from .utils import slice_sample
def optimize_pt(c, b, comp, pend, vals, model):
ret = spo.fmin_l_bfgs_b(model.grad_optimize_ei_over_hypers,
c.flatten(), args=(comp, pend, vals),
bounds=b, disp=0)
return ret[0]
class GPEIOptChooser:
def __init__(self, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20, use_multiprocessing=True):
self.cov_func = getattr(gp, covar)
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = int(pending_samples)
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
# If multiprocessing fails or deadlocks, set this to False
self.use_multiprocessing = bool(int(use_multiprocessing))
def _real_init(self, dims, values):
self.randomstate = npr.get_state()
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)+1e-4
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
# Save hyperparameter samples
self.hyper_samples.append((self.mean, self.noise, self.amp2,
self.ls))
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
# Given a set of completed 'experiments' in the unit hypercube with
# corresponding objective 'values', pick from the next experiment to
# run according to the acquisition function.
def next(self, grid, values, durations,
candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete])
# Grab out the relevant sets.
comp = grid[complete,:]
cand = grid[candidates,:]
pend = grid[pending,:]
vals = values[complete]
numcand = cand.shape[0]
# Spray a set of candidates around the min so far
best_comp = np.argmin(vals)
cand2 = np.vstack((np.random.randn(10,comp.shape[1])*0.001 +
comp[best_comp,:], cand))
if self.mcmc_iters > 0:
# Possibly burn in.
if self.needs_burnin:
for mcmc_iter in range(self.burnin):
self.sample_hypers(comp, vals)
self.needs_burnin = False
# Sample from hyperparameters.
# Adjust the candidates to hit ei peaks
self.hyper_samples = []
for mcmc_iter in range(self.mcmc_iters):
self.sample_hypers(comp, vals)
b = []# optimization bounds
for i in range(0, cand.shape[1]):
b.append((0, 1))
overall_ei = self.ei_over_hypers(comp,pend,cand2,vals)
inds = np.argsort(np.mean(overall_ei,axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]
# Optimize each point in parallel
if self.use_multiprocessing:
pool = multiprocessing.Pool(self.grid_subset)
results = [pool.apply_async(optimize_pt,args=(
c,b,comp,pend,vals,copy.copy(self))) for c in cand2]
for res in results:
cand = np.vstack((cand, res.get(1e8)))
pool.close()
else:
# This is old code to optimize each point in parallel.
for i in range(0, cand2.shape[0]):
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei_over_hypers,
cand2[i,:].flatten(), args=(comp,pend,vals),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
overall_ei = self.ei_over_hypers(comp,pend,cand,vals)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
else:
# Optimize hyperparameters
self.optimize_hypers(comp, vals)
# Optimize over EI
b = []# optimization bounds
for i in range(0, cand.shape[1]):
b.append((0, 1))
for i in range(0, cand2.shape[0]):
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei,
cand2[i,:].flatten(), args=(comp,vals,True),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
ei = self.compute_ei(comp, pend, cand, vals)
best_cand = np.argmax(ei)
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
# Compute EI over hyperparameter samples
def ei_over_hypers(self,comp,pend,cand,vals):
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in range(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
overall_ei[:,mcmc_iter] = self.compute_ei(comp, pend, cand,
vals)
return overall_ei
def check_grad_ei(self, cand, comp, pend, vals):
(ei,dx1) = self.grad_optimize_ei_over_hypers(cand, comp, pend, vals)
dx2 = dx1*0
idx = np.zeros(cand.shape[0])
for i in range(0, cand.shape[0]):
idx[i] = 1e-6
(ei1,tmp) = self.grad_optimize_ei_over_hypers(cand + idx, comp, pend, vals)
(ei2,tmp) = self.grad_optimize_ei_over_hypers(cand - idx, comp, pend, vals)
dx2[i] = (ei - ei2)/(2*1e-6)
idx[i] = 0
print('computed grads', dx1)
print('finite diffs', dx2)
print(dx1/dx2)
print(np.sum((dx1 - dx2)**2))
time.sleep(2)
# Adjust points by optimizing EI over a set of hyperparameter samples
def grad_optimize_ei_over_hypers(self, cand, comp, pend, vals, compute_grad=True):
summed_ei = 0
summed_grad_ei = np.zeros(cand.shape).flatten()
ls = self.ls.copy()
amp2 = self.amp2
mean = self.mean
noise = self.noise
for hyper in self.hyper_samples:
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
if compute_grad:
(ei,g_ei) = self.grad_optimize_ei(cand,comp,pend,vals,compute_grad)
summed_grad_ei = summed_grad_ei + g_ei
else:
ei = self.grad_optimize_ei(cand,comp,pend,vals,compute_grad)
summed_ei += ei
self.mean = mean
self.amp2 = amp2
self.noise = noise
self.ls = ls.copy()
if compute_grad:
return (summed_ei, summed_grad_ei)
else:
return summed_ei
# Adjust points based on optimizing their ei
def grad_optimize_ei(self, cand, comp, pend, vals, compute_grad=True):
if pend.shape[0] == 0:
best = np.min(vals)
cand = np.reshape(cand, (-1, comp.shape[1]))
# The primary covariances for prediction.
comp_cov = self.cov(comp)
cand_cross = self.cov(comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.ls, comp, cand)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
if not compute_grad:
return ei
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
grad_cross = np.squeeze(cand_cross_grad)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve(
(obsv_chol, True),cand_cross).transpose(), grad_cross)
grad_xp = 0.5*self.amp2*(grad_xp_m*g_ei_m + grad_xp_v*g_ei_s2)
ei = -np.sum(ei)
return ei, grad_xp.flatten()
else:
# If there are pending experiments, fantasize their outcomes.
cand = np.reshape(cand, (-1, comp.shape[1]))
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = (self.cov(comp_pend) +
self.noise*np.eye(comp_pend.shape[0]))
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(comp, pend)
pend_kappa = self.cov(pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
npr.set_state(self.randomstate)
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + pend_m[:,None]
# Include the fantasies.
fant_vals = np.concatenate(
(np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(comp_pend, cand)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.ls, comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True),
fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:,np.newaxis])
u = (bests[np.newaxis,:] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
# Squeeze can break the 1D case be careful
if pend.shape[1] == 1:
grad_cross = np.squeeze(cand_cross_grad, axis=(2,))
else:
grad_cross = np.squeeze(cand_cross_grad)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve(
(comp_pend_chol, True),cand_cross).transpose(), grad_cross)
grad_xp = 0.5*self.amp2*(grad_xp_m*np.tile(g_ei_m,(comp.shape[1],1)).T + (grad_xp_v.T*g_ei_s2).T)
ei = -np.mean(ei, axis=1)
grad_xp = np.mean(grad_xp,axis=0)
return ei, grad_xp.flatten()
def compute_ei(self, comp, pend, cand, vals):
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(comp)
cand_cross = self.cov(comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return ei
else:
# If there are pending experiments, fantasize their outcomes.
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = (self.cov(comp_pend) +
self.noise*np.eye(comp_pend.shape[0]))
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(comp, pend)
pend_kappa = self.cov(pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
npr.set_state(self.randomstate)
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + pend_m[:,None]
# Include the fantasies.
fant_vals = np.concatenate(
(np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True),
fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:,np.newaxis])
u = (bests[np.newaxis,:] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return np.mean(ei, axis=1)
def sample_hypers(self, comp, vals):
if self.noiseless:
self.noise = 1e-3
self._sample_noiseless(comp, vals)
else:
self._sample_noisy(comp, vals)
self._sample_ls(comp, vals)
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def _sample_ls(self, comp, vals):
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.max_ls):
return -np.inf
cov = (self.amp2 * (self.cov_func(ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + self.noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - self.mean)
lp = (-np.sum(np.log(np.diag(chol))) -
0.5*np.dot(vals-self.mean, solve))
return lp
self.ls = slice_sample(self.ls, logprob, compwise=True)
def _sample_noisy(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = hypers[2]
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0 or noise < 0:
return -np.inf
cov = (amp2 * (self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in noise horseshoe prior.
lp += np.log(np.log(1 + (self.noise_scale/noise)**2))
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(np.sqrt(amp2))/self.amp2_scale)**2
return lp
hypers = slice_sample(np.array(
[self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = hypers[2]
def _sample_noiseless(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = 1e-3
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0:
return -np.inf
cov = (amp2 * (self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(np.sqrt(amp2))/self.amp2_scale)**2
return lp
hypers = slice_sample(np.array(
[self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = 1e-3
def optimize_hypers(self, comp, vals):
mygp = gp.GP(self.cov_func.__name__)
mygp.real_init(comp.shape[1], vals)
mygp.optimize_hypers(comp,vals)
self.mean = mygp.mean
self.ls = mygp.ls
self.amp2 = mygp.amp2
self.noise = mygp.noise
# Save hyperparameter samples
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
return | Milano-master | milano/search_algorithms/gp/spearmint/gpeiopt_chooser.py |
Milano-master | milano/search_algorithms/gp/spearmint/__init__.py |
|
# ##
# # Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
# #
# # This code is written for research and educational purposes only to
# # supplement the paper entitled "Practical Bayesian Optimization of
# # Machine Learning Algorithms" by Snoek, Larochelle and Adams Advances
# # in Neural Information Processing Systems, 2012
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
# #
# # This program is distributed in the hope that it will be useful, but
# # WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# # General Public License for more details.
# #
# # You should have received a copy of the GNU General Public License
# # along with this program. If not, see
# # <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
import numpy as np
import numpy.random as npr
from .sobol_lib import i4_sobol_generate
def slice_sample(init_x, logprob, sigma=1.0, step_out=True, max_steps_out=1000,
compwise=False, verbose=False):
def direction_slice(direction, init_x):
def dir_logprob(z):
return logprob(direction * z + init_x)
upper = sigma * npr.rand()
lower = upper - sigma
llh_s = np.log(npr.rand()) + dir_logprob(0.0)
l_steps_out = 0
u_steps_out = 0
if step_out:
while dir_logprob(lower) > llh_s and l_steps_out < max_steps_out:
l_steps_out += 1
lower -= sigma
while dir_logprob(upper) > llh_s and u_steps_out < max_steps_out:
u_steps_out += 1
upper += sigma
steps_in = 0
while True:
steps_in += 1
new_z = (upper - lower) * npr.rand() + lower
new_llh = dir_logprob(new_z)
if np.isnan(new_llh):
print(new_z, direction * new_z + init_x, new_llh, llh_s, init_x,
logprob(init_x))
raise Exception("Slice sampler got a NaN")
if new_llh > llh_s:
break
elif new_z < 0:
lower = new_z
elif new_z > 0:
upper = new_z
else:
raise Exception("Slice sampler shrank to zero!")
if verbose:
print("Steps Out:", l_steps_out, u_steps_out, " Steps In:", steps_in)
return new_z * direction + init_x
if not init_x.shape:
init_x = np.array([init_x])
dims = init_x.shape[0]
if compwise:
ordering = np.arange(dims)
npr.shuffle(ordering)
cur_x = init_x.copy()
for d in ordering:
direction = np.zeros((dims))
direction[d] = 1.0
cur_x = direction_slice(direction, cur_x)
return cur_x
else:
direction = npr.randn(dims)
direction = direction / np.sqrt(np.sum(direction ** 2))
return direction_slice(direction, init_x)
class Parameter:
def __init__(self):
self.type = []
self.name = []
self.type = []
self.min = []
self.max = []
self.options = []
self.int_val = []
self.dbl_val = []
self.str_val = []
class GridMap:
def __init__(self, variables, grid_size):
self.variables = variables
self.cardinality = 0
# Count the total number of dimensions and roll into new format.
for variable in variables:
self.cardinality += variable['size']
# Get a list of candidate experiments generated from a sobol sequence
def hypercube_grid(self, size, seed):
# Generate from a sobol sequence
sobol_grid = np.transpose(i4_sobol_generate(self.cardinality, size, seed))
return sobol_grid
# Convert a variable to the unit hypercube
# Takes a single variable encoded as a list, assuming the ordering is
# the same as specified in the configuration file
def to_unit(self, v):
unit = np.zeros(self.cardinality)
index = 0
for variable in self.variables:
# param.name = variable['name']
if variable['type'] == 'int':
for dd in range(variable['size']):
unit[index] = self._index_unmap(float(v.pop(0)) - variable['min'], (
variable['max'] - variable['min']) + 1)
index += 1
elif variable['type'] == 'float':
for dd in range(variable['size']):
unit[index] = (float(v.pop(0)) - variable['min']) / (
variable['max'] - variable['min'])
index += 1
elif variable['type'] == 'enum':
for dd in range(variable['size']):
unit[index] = variable['options'].index(v.pop(0))
index += 1
# TODO: add log_float if this function is going to be used
else:
raise Exception("Unknown parameter type.")
if len(v) > 0:
raise Exception("Too many variables passed to parser")
return unit
def unit_to_list(self, u):
params = self.get_params(u)
paramlist = []
for p in params:
if p.type == 'int':
for v in p.int_val:
paramlist.append(v)
if p.type == 'float':
for v in p.dbl_val:
paramlist.append(v)
if p.type == 'enum':
for v in p.str_val:
paramlist.append(v)
return paramlist
def get_params(self, u):
if u.shape[0] != self.cardinality:
raise Exception("Hypercube dimensionality is incorrect.")
params = []
index = 0
for variable in self.variables:
param = Parameter()
param.name = variable['name']
if variable['type'] == 'int':
param.type = 'int'
for dd in range(variable['size']):
param.int_val.append(
variable['min'] + self._index_map(u[index], variable['max'] -
variable['min'] + 1)
)
index += 1
elif variable['type'] == 'float':
param.type = 'float'
for dd in range(variable['size']):
val = variable['min'] + u[index] * (variable['max'] - variable['min'])
val = variable['min'] if val < variable['min'] else val
val = variable['max'] if val > variable['max'] else val
param.dbl_val.append(val)
index += 1
elif variable['type'] == 'log_float':
param.type = 'float'
for dd in range(variable['size']):
val = np.log(variable['min']) + u[index] * (np.log(variable['max']) - np.log(variable['min']))
val = np.log(variable['min']) if val < np.log(variable['min']) else val
val = np.log(variable['max']) if val > np.log(variable['max']) else val
param.dbl_val.append(np.exp(val))
index += 1
elif variable['type'] == 'enum':
param.type = 'enum'
for dd in range(variable['size']):
ii = self._index_map(u[index], len(variable['options']))
index += 1
param.str_val.append(variable['options'][ii])
else:
raise Exception("Unknown parameter type.")
params.append(param)
return params
def card(self):
return self.cardinality
def _index_map(self, u, items):
return int(np.floor((1 - np.finfo(float).eps) * u * float(items)))
def _index_unmap(self, u, items):
return float(float(u) / float(items)) | Milano-master | milano/search_algorithms/gp/spearmint/utils.py |
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
"""
Chooser module for Constrained Gaussian process expected improvement.
Candidates are sampled densely in the unit hypercube and then a subset
of the most promising points are optimized to maximize constrained EI
over hyperparameter samples. Slice sampling is used to sample
Gaussian process hyperparameters for two GPs, one over the objective
function and the other a probit likelihood classification GP that estimates the
probability that a point is outside of the constraint space.
"""
import numpy as np
import math
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
import scipy.optimize as spo
import time
import multiprocessing
import copy
from . import gp
from .utils import slice_sample
# Wrapper function to pass to parallel ei optimization calls
def optimize_pt(c, b, comp, pend, vals, labels, model):
ret = spo.fmin_l_bfgs_b(model.grad_optimize_ei_over_hypers,
c.flatten(), args=(comp, pend, vals, labels),
bounds=b, disp=0)
return ret[0]
class GPConstrainedEIChooser:
def __init__(self, covar="Matern52", mcmc_iters=20,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20, constraint_violating_value=np.inf,
verbosity=0):
self.cov_func = getattr(gp, covar)
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.constraint_hyper_samples = []
self.ff = None
self.ff_samples = []
self.verbosity = int(verbosity)
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
self.constraint_noise_scale = 0.1 # horseshoe prior
self.constraint_amp2_scale = 1 # zero-mean log normal prio
self.constraint_gain = 1 # top-hat prior on length scales
self.constraint_max_ls = 2 # top-hat prior on length scales
self.bad_value = float(constraint_violating_value)
def _real_init(self, dims, values, durations):
self.randomstate = npr.get_state()
# Identify constraint violations
# Note that we'll treat NaNs and Infs as these values as well
# as an optional user defined value
goodvals = np.nonzero(np.logical_and(values != self.bad_value,
np.isfinite(values)))[0]
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
self.constraint_ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values[goodvals])+1e-4
self.constraint_amp2 = 1.0
# Initial observation noise.
self.noise = 1e-3
self.constraint_noise = 1e-3
self.constraint_gain = 1
# Initial mean.
self.mean = np.mean(values[goodvals])
self.constraint_mean = 0.5
def cov(self, amp2, ls, x1, x2=None):
if x2 is None:
return amp2 * (self.cov_func(ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return amp2 * self.cov_func(ls, x1, x2)
# Given a set of completed 'experiments' in the unit hypercube with
# corresponding objective 'values', pick from the next experiment to
# run according to the acquisition function.
def next(self, grid, values, durations,
candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Grab out the relevant sets.
comp = grid[complete,:]
cand = grid[candidates,:]
pend = grid[pending,:]
vals = values[complete]
# Identify constraint violations
# Note that we'll treat NaNs and Infs as these values as well
# as an optional user defined value
idx = np.logical_and(vals != self.bad_value,
np.isfinite(vals))
goodvals = np.nonzero(idx)[0]
badvals = np.nonzero(np.logical_not(idx))[0]
if goodvals.shape[0] < 2:
return int(candidates[0])
labels = np.zeros(vals.shape[0])
labels[goodvals] = 1
if np.sum(labels) < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete],
durations[complete])
# Spray a set of candidates around the min so far
numcand = cand.shape[0]
best_comp = np.argmin(vals)
cand2 = np.vstack((np.random.randn(10,comp.shape[1])*0.001 +
comp[best_comp,:], cand))
if self.mcmc_iters > 0:
# Possibly burn in.
if self.needs_burnin:
for mcmc_iter in range(self.burnin):
self.sample_constraint_hypers(comp, labels)
self.sample_hypers(comp[goodvals,:], vals[goodvals])
self.needs_burnin = False
# Sample from hyperparameters.
# Adjust the candidates to hit ei/sec peaks
self.hyper_samples = []
for mcmc_iter in range(self.mcmc_iters):
self.sample_constraint_hypers(comp, labels)
self.sample_hypers(comp[goodvals,:], vals[goodvals])
comp_preds = np.zeros(labels.shape[0]).flatten()
preds = self.pred_constraint_voilation(cand, comp, labels).flatten()
for ii in range(self.mcmc_iters):
constraint_hyper = self.constraint_hyper_samples[ii]
self.ff = self.ff_samples[ii]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
comp_preds += self.pred_constraint_voilation(comp, comp,
labels).flatten()
comp_preds = comp_preds / float(self.mcmc_iters)
# Pick the top candidates to optimize over
overall_ei = self.ei_over_hypers(comp,pend,cand2,vals,labels)
inds = np.argsort(np.mean(overall_ei, axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]
# Adjust the candidates to hit ei peaks
b = []# optimization bounds
for i in range(0, cand.shape[1]):
b.append((0, 1))
# Optimize each point in parallel
pool = multiprocessing.Pool(self.grid_subset)
results = [pool.apply_async(optimize_pt,args=(
c,b,comp,pend,vals,labels, copy.copy(self))) for c in cand2]
for res in results:
cand = np.vstack((cand, res.get(1024)))
pool.close()
cand = np.vstack((cand, cand2))
overall_ei = self.ei_over_hypers(comp,pend,cand,vals,labels)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
else:
print ('This Chooser module permits only slice sampling with > 0 '
'samples.')
raise Exception('mcmc_iters <= 0')
# Predict constraint voilating points
def pred_constraint_voilation(self, cand, comp, vals):
# The primary covariances for prediction.
comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp)
cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp,
cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), self.ff)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
func_m = sps.norm.cdf(func_m*self.constraint_gain)
return func_m
# Compute EI over hyperparameter samples
def ei_over_hypers(self,comp,pend,cand,vals,labels):
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in range(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
constraint_hyper = self.constraint_hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
overall_ei[:,mcmc_iter] = self.compute_constrained_ei(comp, pend,
cand, vals,
labels)
return overall_ei
# Adjust points by optimizing EI over a set of hyperparameter samples
def grad_optimize_ei_over_hypers(self, cand, comp, pend, vals, labels,
compute_grad=True):
summed_ei = 0
summed_grad_ei = np.zeros(cand.shape).flatten()
for mcmc_iter in range(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
constraint_hyper = self.constraint_hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
if compute_grad:
(ei,g_ei) = self.grad_optimize_ei(cand, comp, pend, vals, labels,
compute_grad)
summed_grad_ei = summed_grad_ei + g_ei
else:
ei = self.grad_optimize_ei(cand, comp, pend, vals,
labels, compute_grad)
summed_ei += ei
if compute_grad:
return (summed_ei, summed_grad_ei)
else:
return summed_ei
def check_grad_ei(self, cand, comp, pend, vals, labels):
(ei,dx1) = self.grad_optimize_ei_over_hypers(cand, comp, pend, vals, labels)
dx2 = dx1*0
idx = np.zeros(cand.shape[0])
for i in range(0, cand.shape[0]):
idx[i] = 1e-6
(ei1,tmp) = self.grad_optimize_ei_over_hypers(
cand + idx, comp, pend, vals, labels)
(ei2,tmp) = self.grad_optimize_ei_over_hypers(
cand - idx, comp, pend, vals, labels)
dx2[i] = (ei - ei2)/(2*1e-6)
idx[i] = 0
print('computed grads', dx1)
print('finite diffs', dx2)
print(dx1/dx2)
print(np.sum((dx1 - dx2)**2))
time.sleep(2)
def grad_optimize_ei(self, cand, comp, pend, vals, labels, compute_grad=True):
if pend.shape[0] == 0:
return self.grad_optimize_ei_nopend(cand, comp, vals, labels,
compute_grad=True)
else:
return self.grad_optimize_ei_pend(cand, comp, pend, vals, labels,
compute_grad=True)
def grad_optimize_ei_pend(self, cand, comp, pend, vals, labels, compute_grad=True):
# Here we have to compute the gradients for constrained ei
# This means deriving through the two kernels, the one for predicting
# constraint violations and the one predicting ei
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Use standard EI if there aren't enough observations of either
# positive or negative constraint violations
use_vanilla_ei = (np.all(labels > 0) or np.all(labels <= 0))
best = np.min(vals)
cand = np.reshape(cand, (-1, comp.shape[1]))
func_constraint_m = 1
if (not use_vanilla_ei):
# First we make predictions for the durations
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = (comp_constraint_cov +
self.constraint_noise*np.eye(compfull.shape[0]))
obsv_constraint_chol = spla.cholesky(obsv_constraint_cov,lower=True)
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)
# Predict marginal mean times and (possibly) variances
ff = np.dot(cand_constraint_cross.T, t_alpha)
# Squash through Gaussian cdf
func_constraint_m = sps.norm.cdf(self.constraint_gain*ff)
# Apply covariance function
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.constraint_ls, compfull, cand)
grad_cross_t = np.squeeze(cand_cross_grad)
# Now compute the gradients w.r.t. ei
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = (self.cov(self.amp2, self.ls, comp_pend) +
self.noise*np.eye(comp_pend.shape[0]))
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(self.amp2, self.ls, comp, pend)
pend_kappa = self.cov(self.amp2,self.ls, pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Compute the required Cholesky.
#obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
#obsv_chol = spla.cholesky(obsv_cov, lower=True)
obsv_cov_full = comp_cov_full + self.noise*np.eye(compfull.shape[0])
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
npr.set_state(self.randomstate)
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + pend_m[:,None]
# Include the fantasies.
fant_vals = np.concatenate(
(np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(self.amp2, self.ls, comp_pend, cand)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.ls, comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True),
fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
#beta = spla.solve_triangular(obsv_chol_full, cand_cross_full,
# lower=True)
#beta = spla.solve_triangular(obsv_chol, cand_cross,
# lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*(u*ncdf + npdf)
constrained_ei = -np.sum(ei*func_constraint_m)
if not compute_grad:
return constrained_ei
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
grad_cross = np.squeeze(cand_cross_grad)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve(
(comp_pend_chol, True),cand_cross).transpose(), grad_cross)
grad_xp = 0.5*self.amp2*(grad_xp_m*np.tile(g_ei_m,(comp.shape[1],1)).T + (grad_xp_v.T*g_ei_s2).T)
grad_xp = np.sum(grad_xp,axis=0)
if use_vanilla_ei:
return -np.sum(ei), grad_xp.flatten()
grad_constraint_xp_m = np.dot(t_alpha.transpose(),grad_cross_t)
grad_constraint_xp_m = (0.5*self.constraint_amp2*
self.constraint_gain*
grad_constraint_xp_m*
sps.norm.pdf(self.constraint_gain*ff))
grad_xp = (func_constraint_m*grad_xp + np.sum(ei)*grad_constraint_xp_m)
return constrained_ei, grad_xp.flatten()
def grad_optimize_ei_nopend(self, cand, comp, vals, labels, compute_grad=True):
# Here we have to compute the gradients for constrained ei
# This means deriving through the two kernels, the one for predicting
# constraint violations and the one predicting ei
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Use standard EI if there aren't enough observations of either
# positive or negative constraint violations
use_vanilla_ei = (np.all(labels > 0) or np.all(labels <= 0))
best = np.min(vals)
cand = np.reshape(cand, (-1, comp.shape[1]))
func_constraint_m = 1
if (not use_vanilla_ei):
# First we make predictions for the durations
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = (comp_constraint_cov +
self.constraint_noise*np.eye(compfull.shape[0]))
obsv_constraint_chol = spla.cholesky(obsv_constraint_cov,lower=True)
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)
# Predict marginal mean times and (possibly) variances
ff = np.dot(cand_constraint_cross.T, t_alpha)
# Squash through Gaussian cdf
func_constraint_m = sps.norm.cdf(self.constraint_gain*ff)
# Now compute the gradients w.r.t. ei
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
obsv_cov_full = comp_cov_full + self.noise*np.eye(compfull.shape[0])
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol_full, cand_cross_full,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*(u*ncdf + npdf)
constrained_ei = -np.sum(ei*func_constraint_m)
if not compute_grad:
return constrained_ei
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.ls, comp, cand)
grad_cross = np.squeeze(cand_cross_grad)
cand_cross_grad_full = cov_grad_func(self.ls, compfull, cand)
grad_cross_full = np.squeeze(cand_cross_grad_full)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve((obsv_chol_full, True),
cand_cross_full).transpose(),
grad_cross_full)
grad_xp = 0.5*self.amp2*(grad_xp_m*g_ei_m + grad_xp_v*g_ei_s2)
if use_vanilla_ei:
return -np.sum(ei), grad_xp.flatten()
# Apply constraint classifier
cand_cross_grad = cov_grad_func(self.constraint_ls, compfull, cand)
grad_cross_t = np.squeeze(cand_cross_grad)
grad_constraint_xp_m = np.dot(t_alpha.transpose(),grad_cross_t)
grad_constraint_xp_m = (0.5*self.constraint_amp2*
self.constraint_gain*
grad_constraint_xp_m*
sps.norm.pdf(self.constraint_gain*ff))
grad_xp = (func_constraint_m*grad_xp + ei*grad_constraint_xp_m)
return constrained_ei, grad_xp.flatten()
def compute_constrained_ei(self, comp, pend, cand, vals, labels):
# First we make predictions for the durations as that
# doesn't depend on pending experiments
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Use standard EI if there aren't enough observations of either
# positive or negative constraint violations
if (np.all(labels > 0) or np.all(labels <= 0)):
func_constraint_m = 1
else:
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = (comp_constraint_cov +
self.constraint_noise*np.eye(compfull.shape[0]))
obsv_constraint_chol = spla.cholesky(
obsv_constraint_cov, lower=True)
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)
t_beta = spla.solve_triangular(obsv_constraint_chol,
cand_constraint_cross, lower=True)
# Predict marginal mean times and (possibly) variances
func_constraint_m = (np.dot(cand_constraint_cross.T, t_alpha))
# Squash through a probit
func_constraint_m = sps.norm.cdf(self.constraint_gain*func_constraint_m)
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_cov_full = (comp_cov_full +
self.noise*np.eye(compfull.shape[0]))
obsv_chol = spla.cholesky( obsv_cov, lower=True)
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True)
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
#beta = spla.solve_triangular(obsv_chol_full, cand_cross_full,
# lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
constrained_ei = ei*func_constraint_m
return constrained_ei
else:
# If there are pending experiments, fantasize their outcomes.
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = (self.cov(self.amp2, self.ls, comp_pend) +
self.noise*np.eye(comp_pend.shape[0]))
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(self.amp2, self.ls, comp, pend)
pend_kappa = self.cov(self.amp2, self.ls, pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],
self.pending_samples)) + pend_m[:,None]
# Include the fantasies.
fant_vals = np.concatenate((np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(self.amp2, self.ls, comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True),
fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:,np.newaxis])
u = (bests[np.newaxis,:] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return np.mean(ei, axis=1)*func_constraint_m
def compute_ei(self, comp, pend, cand, vals, labels):
# First we make predictions for the durations as that
# doesn't depend on pending experiments
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2,
self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = (comp_constraint_cov +
self.constraint_noise*np.eye(
compfull.shape[0]))
obsv_constraint_chol = spla.cholesky( obsv_constraint_cov, lower=True )
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)
# Predict marginal mean times and (possibly) variances
func_constraint_m = (np.dot(cand_constraint_cross.T, t_alpha))
# Squash through a probit to get prob of not violating a constraint
func_constraint_m = 1./(1+np.exp(-self.constraint_gain*
func_constraint_m))
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_cov_full = (comp_cov_full +
self.noise*np.eye(compfull.shape[0]))
obsv_chol = spla.cholesky( obsv_cov, lower=True )
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True )
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
#beta = spla.solve_triangular(obsv_chol_full, cand_cross_full,
#lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return ei
else:
return 0
def sample_constraint_hypers(self, comp, labels):
# The latent GP projection
# The latent GP projection
if (self.ff is None or self.ff.shape[0] < comp.shape[0]):
self.ff_samples = []
comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp)
obsv_cov = comp_cov + 1e-6*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
self.ff = np.dot(obsv_chol,npr.randn(obsv_chol.shape[0]))
self._sample_constraint_noisy(comp, labels)
self._sample_constraint_ls(comp, labels)
self.constraint_hyper_samples.append((self.constraint_mean,
self.constraint_gain,
self.constraint_amp2,
self.constraint_ls))
self.ff_samples.append(self.ff)
def sample_hypers(self, comp, vals):
if self.noiseless:
self.noise = 1e-3
self._sample_noiseless(comp, vals)
else:
self._sample_noisy(comp, vals)
self._sample_ls(comp, vals)
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def _sample_ls(self, comp, vals):
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.max_ls):
return -np.inf
cov = (self.amp2 * (self.cov_func(ls, comp, None) +
1e-6*np.eye(comp.shape[0])) +
self.noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - self.mean)
lp = (-np.sum(np.log(np.diag(chol))) -
0.5*np.dot(vals-self.mean, solve))
return lp
self.ls = slice_sample(self.ls, logprob, compwise=True)
def _sample_constraint_ls(self, comp, vals):
def lpProbit(ff, gain=self.constraint_gain):
probs = sps.norm.cdf(ff*gain)
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) +
(1-vals)*np.log(1-probs))
return llh
def lpSigmoid(ff, gain=self.constraint_gain):
probs = 1./(1. + np.exp(-gain*ff));
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs));
return llh
def updateGain(gain):
if gain < 0.01 or gain > 10:
return -np.inf
cov = (self.constraint_amp2 * (self.cov_func(
self.constraint_ls, comp, None) +
1e-6*np.eye(comp.shape[0])) +
self.constraint_noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals)
lp = lpProbit(self.ff, gain)
return lp
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.constraint_max_ls):
return -np.inf
cov = self.constraint_amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), self.ff)
lp = lpProbit(self.ff)
return lp
hypers = slice_sample(self.constraint_ls, logprob, compwise=True)
self.constraint_ls = hypers
cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=False)
ff = self.ff
for jj in range(20):
(ff, lpell) = self.elliptical_slice(ff, chol, lpProbit)
self.ff = ff
# Update gain
hypers = slice_sample(np.array([self.constraint_gain]),
updateGain, compwise=True)
self.constraint_gain = hypers[0]
def _sample_noisy(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = hypers[2]
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0 or noise < 0:
return -np.inf
cov = amp2 * ((self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) +
noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in noise horseshoe prior.
lp += np.log(np.log(1 + (self.noise_scale/noise)**2))
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = slice_sample(np.array(
[self.mean, self.amp2, self.noise]),
logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = hypers[2]
def _sample_constraint_noisy(self, comp, vals):
def lpProbit(ff, gain=self.constraint_gain):
probs = sps.norm.cdf(ff*gain)
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) +
(1-vals)*np.log(1-probs))
if np.any(np.isnan(probs)):
print(probs)
return llh
def lpSigmoid(ff,gain=self.constraint_gain):
probs = 1./(1. + np.exp(-gain*ff));
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs));
return llh
def logprob(hypers):
amp2 = hypers[0]
ff = hypers[1:]
if amp2 < 0:
return -np.inf
noise = self.constraint_noise
cov = amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), ff)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff, solve)
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.constraint_amp2_scale)**2
lp += lpProbit(ff,self.constraint_gain)
return lp
hypers = slice_sample(np.hstack((np.array([self.constraint_amp2]),
self.ff)), logprob, compwise=False)
self.constraint_amp2 = hypers[0]
self.ff = hypers[1:]
cov = self.constraint_amp2 * ((
self.cov_func(self.constraint_ls, comp, None) +
1e-6*np.eye(comp.shape[0])) +
self.constraint_noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=False)
ff = self.ff
for jj in range(50):
(ff, lpell) = self.elliptical_slice(ff, chol, lpProbit)
self.ff = ff
def _sample_noiseless(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = 1e-3
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0:
return -np.inf
cov = amp2 * ((self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) +
noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = slice_sample(np.array(
[self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = 1e-3
def elliptical_slice(self, xx, chol_Sigma, log_like_fn, cur_log_like=None,
angle_range=0):
D = xx.shape[0]
if cur_log_like is None:
cur_log_like = log_like_fn(xx)
nu = np.dot(chol_Sigma.T,np.random.randn(D, 1)).flatten()
hh = np.log(np.random.rand()) + cur_log_like
# Set up a bracket of angles and pick a first proposal.
# "phi = (theta'-theta)" is a change in angle.
if angle_range <= 0:
# Bracket whole ellipse with both edges at first proposed point
phi = np.random.rand()*2*math.pi
phi_min = phi - 2*math.pi
phi_max = phi
else:
# Randomly center bracket on current point
phi_min = -angle_range*np.random.rand()
phi_max = phi_min + angle_range
phi = np.random.rand()*(phi_max - phi_min) + phi_min
# Slice sampling loop
while True:
# Compute xx for proposed angle difference
# and check if it's on the slice
xx_prop = xx*np.cos(phi) + nu*np.sin(phi)
cur_log_like = log_like_fn(xx_prop)
if cur_log_like > hh:
# New point is on slice, ** EXIT LOOP **
break;
# Shrink slice to rejected point
if phi > 0:
phi_max = phi
elif phi < 0:
phi_min = phi
else:
raise Exception('BUG DETECTED: Shrunk to current position '
'and still not acceptable.')
# Propose new angle difference
phi = np.random.rand()*(phi_max - phi_min) + phi_min
xx = xx_prop
return (xx, cur_log_like) | Milano-master | milano/search_algorithms/gp/spearmint/gpei_constrained_chooser.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import AzkabanBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
script_to_run = "examples/os2s/cifar10/start_azk.sh"
params_to_try_first = {
"--lr_policy_params/learning_rate": [0.01, 0.02, 0.03],
"--lr_policy_params/power": [2.0, 2.0, 1.5],
"--batch_size_per_gpu": [64, 64, 64],
"--regularizer_params/scale": [0.0001, 0.0002, 0.0002],
"--optimizer_params/momentum": [0.71, 0.72, 0.73],
}
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--lr_policy_params/learning_rate": {
"type": "log_range", "min": 0.00001, "max": 0.1
},
"--lr_policy_params/power": {
"type": "values", "values": [0.5, 1.0, 2.0]
},
"--batch_size_per_gpu": {
"type": "values", "values": [32, 64, 128, 256]
},
"--regularizer_params/scale": {
"type": "log_range", "min": 0.00001, "max": 0.001
},
"--optimizer_params/momentum": {
"type": "range", "min": 0.45, "max": 0.99
},
}
# specify result pattern used to parse logs
result_pattern = "Validation top-1:"
# maximize or minimize
objective = "maximize"
# specify backend information and workers configuration
backend = AzkabanBackend
backend_params = {
"url": "http://192.168.42.149", # URL of your Azkaban UI
"port": "8081", # Azkaban port. You should see Azkaban UI at url:port
"username": "azkaban",
"password": "azkaban",
# If you are using Azkaban solo server on a single machine, set this to the number of GPUs you have
"workers_config": [
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=0"]},
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=1"]},
],
}
# specify search algorithm to use
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 1024,
}
| Milano-master | examples/os2s/cifar10/cifar10_azkaban.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import SLURMBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
script_to_run = "examples/os2s/cifar10/start_slurm.sh"
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--lr_policy_params/learning_rate": {
"type": "log_range", "min": 0.00001, "max": 0.1
},
"--lr_policy_params/power": {
"type": "values", "values": [0.5, 1.0, 2.0]
},
"--batch_size_per_gpu": {
"type": "values", "values": [32, 64, 128, 256, 512, 1024]
},
"--regularizer_params/scale": {
"type": "log_range", "min": 0.00001, "max": 0.001
},
"--optimizer_params/momentum": {
"type": "range", "min": 0.5, "max": 0.99
},
}
# specify result pattern used to parse logs
result_pattern = "Validation top-1:"
# maximize or minimize
objective = "maximize"
# BACKEND parameters. We will use SLURMBackend to run on DB Cluster
backend = SLURMBackend
backend_params = {
"workers_config": {
"num_workers": 2, # NUMBER OF SLURM *NODES* to run at a time.
"partition": "batch", # PARTITION name
"username": "okuchaiev", # CHANGE THIS to your username
"key_path": "/home/okuchaiev/.ssh/id_rsa", # CHANGE THIS to your id_rsa path for pasword-less ssh to the cluster
"entrypoint": "dbcluster", # CHANGE THIS to your cluster entrypoint name
},
}
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 10, # TOTAL EXPERIMENTS TO RUN. You can set it arbitrary high
}
| Milano-master | examples/os2s/cifar10/cifar10_slurm.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import AWSBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
script_to_run = "examples/os2s/cifar10/start_aws.sh"
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--lr_policy_params/learning_rate": {
"type": "log_range", "min": 0.00001, "max": 0.1
},
"--lr_policy_params/power": {
"type": "values", "values": [0.5, 1.0, 2.0]
},
"--batch_size_per_gpu": {
"type": "values", "values": [32, 64, 128, 256, 512, 1024]
},
"--regularizer_params/scale": {
"type": "log_range", "min": 0.00001, "max": 0.001
},
"--optimizer_params/momentum": {
"type": "range", "min": 0.5, "max": 0.99
},
}
# specify result pattern used to parse logs
result_pattern = "Validation top-1:"
# maximize or minimize
objective = "maximize"
# specify backend information and workers configuration
backend = AWSBackend
backend_params = {
# TODO maybe automate the creation of a keypair if one isn't supplied
"config": {
"num_workers": 1,
"spot_instances": False,
"key_name": "milano-test",
"private_key_path": "/home/okuchaiev/.aws/milano-test.pem", # FILL THIS IN WITH YOUR .pem FILE
"region_name": "us-west-2",
"docker_image_name": "tensorflow/tensorflow:1.9.0-gpu-py3",
# "iam_role": "..." # if omitted, a role with read access to the dataset bucket/prefixes is created.
"datasets": [
{
"type": "s3",
"bucket": "milano-test-data",
"prefix": "cifar-10",
"mount": "/data",
},
],
"instance_params": {
"InstanceType": "p3.2xlarge",
}
}
}
# specify search algorithm to use
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 3,
}
| Milano-master | examples/os2s/cifar10/cifar10_aws.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import SLURMBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
# this should be on a driver machine
script_to_run = "examples/pytorch/wlm/start_wlm_slurm.sh"
# You can have this section to user-prespecify which configurations to explore first
params_to_try_first = {
"--model": ["LSTM", "GRU"],
"--emsize": [1504, 1504],
"--nlayers": [2, 2],
"--lr": [20, 25],
"--bptt": [35, 35],
"--clip": [0.25, 0.35],
"--dropout": [0.2, 0.2],
}
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--model": {
"type": "values", "values": ["LSTM", "GRU"]
},
"--emsize": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--nlayers": {
"type": "values", "values": [1, 2, 3]
},
"--lr": {
"type": "values", "values": [10, 20, 5, 30, 25, 40],
},
"--nhid": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--bptt": {
"type": "values", "values": [15, 20, 30, 35, 45]
},
"--clip": {
"type": "range", "min": 0.1, "max": 2.0
},
"--dropout": {
"type": "range", "min": 0.0, "max": 0.9
},
}
constraints = [
{"pattern": 'valid ppl ',
"range": [0, 310.0],
"skip_first": 1,
"formatter": lambda x: float(x[:-1])},
]
# specify result pattern used to parse logs
result_pattern = "valid ppl"
# maximize or minimize
objective = "minimize"
# BACKEND parameters. We will use SLURMBackend to run on DB Cluster
backend = SLURMBackend
backend_params = {
"workers_config": {
"num_workers": 2, # NUMBER OF SLURM *NODES* to run at a time.
"partition": "batch", # PARTITION
"username": "okuchaiev", # CHANGE THIS
"key_path": "/home/okuchaiev/.ssh/id_rsa", # CHANGE THIS
"entrypoint": "prom.nvidia.com", # CHANGE THIS
},
}
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 3,
}
| Milano-master | examples/pytorch/wlm/wlm_slurm.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import AzkabanBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
# this should be on a driver machine
script_to_run = "examples/pytorch/wlm/start_wlm_azkaban.sh"
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--model": {
"type": "values", "values": ["RNN_TANH", "RNN_RELU", "LSTM", "GRU"]
},
"--emsize": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--nlayers": {
"type": "values", "values": [1, 2, 3]
},
"--lr": {
"type": "values", "values": [10, 20, 5, 30, 25, 40],
},
"--nhid": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--bptt": {
"type": "values", "values": [15, 20, 30, 35, 45]
},
"--clip": {
"type": "range", "min": 0.1, "max": 2.0
},
"--dropout": {
"type": "range", "min": 0.0, "max": 0.9
},
}
constraints = [
{"pattern": 'valid ppl ',
"range": [0, 310.0],
"skip_first": 4,
"formatter": lambda x: float(x[:-1])},
]
# specify result pattern used to parse logs
result_pattern = "valid ppl"
# maximize or minimize
objective = "minimize"
# specify backend information and workers configuration
backend = AzkabanBackend
backend_params = {
"url": "http://127.0.0.1", # URL of your Azkaban UI
"port": "8081", # Azkaban port. You should see Azkaban UI at url:port
"username": "azkaban",
"password": "azkaban",
# If you are using Azkaban solo server on a single machine, set this to the number of GPUs you have
# PRO TIP: If your workload isn't too heavy, you can allocate more than one worker per GPU as
# is done below:
"workers_config": [
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=0"]},
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=1"]},
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=0"]},
{"num_workers": 1, "env_vars": ["CUDA_VISIBLE_DEVICES=1"]},
],
}
# specify search algorithm to use
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 8,
}
| Milano-master | examples/pytorch/wlm/wlm_azkaban.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.backends import AWSBackend
from milano.search_algorithms import RandomSearch
# specify path to the script that is going to be tuned
# path has to be absolute or relative to tune.py script
script_to_run = "examples/pytorch/wlm/start_wlm_aws.sh"
# These configurations will be tried first
params_to_try_first = {
"--model": ["LSTM", "GRU"],
"--emsize": [1504, 1504],
"--nlayers": [2, 2],
"--lr": [20, 25],
"--bptt": [35, 35],
"--clip": [0.25, 0.35],
"--dropout": [0.2, 0.2],
}
# specify the tunable parameters as cmd arguments and their possible ranges
params_to_tune = {
"--emsize": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--nlayers": {
"type": "values", "values": [1, 2, 3]
},
"--lr": {
"type": "values", "values": [10, 20, 5, 30, 25, 40],
},
"--nhid": {
"type": "values", "values": [256, 512, 650, 1024, 1504, 2048]
},
"--bptt": {
"type": "values", "values": [15, 20, 30, 35, 45]
},
"--clip": {
"type": "range", "min": 0.1, "max": 2.0
},
"--dropout": {
"type": "range", "min": 0.0, "max": 0.9
},
}
# specify result pattern used to parse logs
result_pattern = "valid ppl"
# maximize or minimize
objective = "minimize"
# specify backend information and workers configuration
backend = AWSBackend
backend_params = {
# TODO maybe automate the creation of a keypair if one isn't supplied
"config": {
"num_workers": 1,
"spot_instances": False,
"key_name": "milano-test",
"private_key_path": "/home/okuchaiev/.aws/milano-test.pem", # FILL THIS IN WITH YOUR .pem FILE
"region_name": "us-west-2",
"docker_image_name": "pytorch/pytorch:0.4_cuda9_cudnn7",
# "iam_role": "..." # if omitted, a role with read access to the dataset bucket/prefixes is created.
"datasets": [
{
"type": "s3",
"bucket": "milano-test-data",
"prefix": "cifar-10",
"mount": "/workdir",
},
],
"instance_params": {
"InstanceType": "p3.2xlarge",
}
}
}
# specify search algorithm to use
search_algorithm = RandomSearch
search_algorithm_params = {
"num_evals": 3,
}
| Milano-master | examples/pytorch/wlm/wlm_aws.py |
# Copyright (c) 2018 NVIDIA Corporation
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import argparse
def build_images(results_dir, img_format="png"):
num_steps = 100
results_dict = {}
for bench_dir in os.listdir(os.path.join(results_dir, 'results_csvs')):
bench = bench_dir[6:]
results_dict[bench] = {}
cur_dir = os.path.join(results_dir, 'results_csvs', bench_dir)
for dim_dir in os.listdir(cur_dir):
dim = int(dim_dir[4:])
results_dict[bench][dim] = {}
cur_dir = os.path.join(results_dir, 'results_csvs', bench_dir, dim_dir)
for algo_run_csv in os.listdir(cur_dir):
algo, run = algo_run_csv.split('__')
run = int(run[:-4])
results_df = pd.read_csv(os.path.join(cur_dir, algo_run_csv),
index_col=0)
results_df = results_df.sort_values(by="job_id")["Result:"]
# assuming all runs in same algo/bench/dim
# tuple have same number of steps, i.e. results_df.shape[0] is the same
num_evals = results_df.shape[0]
if algo == '2x_random_search':
num_evals /= 2
plot_steps = np.linspace(10, num_evals, num_steps, dtype=np.int)
values = np.empty(plot_steps.shape)
for i, plot_step in enumerate(plot_steps):
if algo == "2x_random_search":
values[i] = results_df[:plot_step * 2].min()
else:
values[i] = results_df[:plot_step].min()
if algo not in results_dict[bench][dim]:
results_dict[bench][dim][algo] = [plot_steps, values[:, np.newaxis]]
else:
if not np.allclose(plot_steps, results_dict[bench][dim][algo][0]):
raise RuntimeError(
"Check that all runs for bench={}, dim={}, algo={} ".format(
bench, dim, algo
) +
"have the same number of samples"
)
results_dict[bench][dim][algo][1] = np.hstack((
results_dict[bench][dim][algo][1],
values[:, np.newaxis],
))
img_dir = os.path.join(results_dir, 'results_images')
os.makedirs(img_dir, exist_ok=True)
joint_plot_steps = None
steps_matched = True
for bench, dims in results_dict.items():
cur_dir = os.path.join(img_dir, 'bench-{}'.format(bench))
os.makedirs(cur_dir, exist_ok=True)
for dim, algos in dims.items():
cur_dir = os.path.join(
img_dir, 'bench-{}'.format(bench), 'dim-{}'.format(dim)
)
os.makedirs(cur_dir, exist_ok=True)
for aggr_mode in ["first", "second"]:
plt.figure()
for algo, steps_results in algos.items():
plot_steps, results = steps_results
rs_results = results_dict[bench][dim]["random_search"][1]
if algo == "random_search":
assert np.allclose(results, rs_results)
if joint_plot_steps is None:
joint_plot_steps = plot_steps
else:
if not np.allclose(joint_plot_steps, plot_steps):
steps_matched = False
if aggr_mode == "first":
means = np.mean(results, axis=1) / np.mean(rs_results, axis=1)
else:
means = np.mean(results / rs_results, axis=1)
stds = np.std(results / rs_results, axis=1)
if aggr_mode != "first":
plt.errorbar(plot_steps, means, yerr=stds, errorevery=10,
label=algo, alpha=0.8, capsize=3)
else:
plt.plot(plot_steps, means, label=algo)
plt.legend()
plt.title("bench={}, dim={}".format(bench, dim))
plt.xlabel("Number of evaluations")
plt.ylabel("Improvement over random search")
im_name = 'bench-{}__dim-{}__aggr_{}.{}'.format(
bench, dim, aggr_mode, img_format,
)
full_path = os.path.join(cur_dir, im_name)
plt.savefig(full_path, bbox_inches="tight")
plt.close()
if not steps_matched:
print("Different benchmarks/dims have different number of steps, "
"can't draw joint plots.")
return
means_b = {}
nums_b = {}
means_d = {}
nums_d = {}
means_all = {}
nums_all = {}
for bench, dims in results_dict.items():
for dim, algos in dims.items():
for algo, steps_results in algos.items():
res = steps_results[1]
rs_res = results_dict[bench][dim]["random_search"][1]
if algo not in means_all:
means_all[algo] = np.zeros(joint_plot_steps.shape[0])
nums_all[algo] = 0
if bench not in means_b:
means_b[bench] = {}
nums_b[bench] = {}
if algo not in means_b[bench]:
means_b[bench][algo] = np.zeros(joint_plot_steps.shape[0])
nums_b[bench][algo] = 0
if dim not in means_d:
means_d[dim] = {}
nums_d[dim] = {}
if algo not in means_d[dim]:
means_d[dim][algo] = np.zeros(joint_plot_steps.shape[0])
nums_d[dim][algo] = 0
means_b[bench][algo] += np.mean(res, axis=1) / np.mean(rs_res, axis=1)
means_d[dim][algo] += np.mean(res, axis=1) / np.mean(rs_res, axis=1)
means_all[algo] += np.mean(res, axis=1) / np.mean(rs_res, axis=1)
nums_b[bench][algo] += 1
nums_d[dim][algo] += 1
nums_all[algo] += 1
# drawing plots aggregated across dims
for bench, algo_means in means_b.items():
for algo, means in algo_means.items():
means_cur = means / nums_b[bench][algo]
plt.plot(joint_plot_steps, means_cur, label=algo)
plt.legend()
plt.title("bench={}, dim=all".format(bench))
plt.xlabel("Number of evaluations")
plt.ylabel("Improvement over random search")
im_name = 'bench-{}__dim-all.{}'.format(bench, img_format)
full_path = os.path.join(img_dir, im_name)
plt.savefig(full_path, bbox_inches="tight")
plt.close()
# drawing plots aggregated across benchmarks
for dim, algo_means in means_d.items():
for algo, means in algo_means.items():
means_cur = means / nums_d[dim][algo]
plt.plot(joint_plot_steps, means_cur, label=algo)
plt.legend()
plt.title("bench=all, dim={}".format(dim))
plt.xlabel("Number of evaluations")
plt.ylabel("Improvement over random search")
im_name = 'bench-all__dim-{}.{}'.format(dim, img_format)
full_path = os.path.join(img_dir, im_name)
plt.savefig(full_path, bbox_inches="tight")
plt.close()
# drawing one plot aggregated across everything
for algo, means in means_all.items():
means_cur = means / nums_all[algo]
plt.plot(joint_plot_steps, means_cur, label=algo)
plt.legend()
plt.title("bench=all, dim=all".format(dim))
plt.xlabel("Number of evaluations")
plt.ylabel("Improvement over random search")
im_name = 'bench-all__dim-all.{}'.format(img_format)
full_path = os.path.join(img_dir, im_name)
plt.savefig(full_path, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', required=True,
help='Directory with .csv files with results')
parser.add_argument('--img_format', default="png",
help='Format to generate images in. '
'E.g. png, jpg, pdf, etc.')
args = parser.parse_args()
build_images(args.results_dir, args.img_format)
| Milano-master | benchmarking/build_images.py |
# Copyright (c) 2018 NVIDIA Corporation
import argparse
import runpy
import os
import sys
import pandas as pd
sys.path.insert(0, "../")
from milano.exec_utils import ExecutionManager
from milano.backends import AzkabanBackend
def gen_bench_funcs(bench_type):
funcs_dict = {
"bbob": (prepare_bbob, finalize_bbob),
"cifar10": (prepare_cifar10, finalize_cifar10),
}
return funcs_dict[bench_type]
def prepare_bbob(args, config):
script_to_run = "bbob_func_eval.py"
params_to_tune = {
"func_name": {"type": "values", "values": [args.bench_name]}
}
for i in range(args.bench_dim):
params_to_tune["x{}".format(i)] = {"min": -5, "max": 5, "type": "range"}
if 'backend' not in config:
backend_manager = AzkabanBackend(
script_to_run=script_to_run,
workers_config=[{"num_workers": args.num_workers, "env_vars": []}],
)
else:
backend_manager = config['backend'](
script_to_run=script_to_run,
**config['backend_params'],
)
exp_params = {
'script_to_run': script_to_run,
'params_to_tune': params_to_tune,
'result_pattern': "Result:",
'objective': "minimize",
'backend_manager': backend_manager,
'sleep_time': 0.1,
'wait_for_logs_time': 0.5,
}
return exp_params
def finalize_bbob(out_file):
return
def prepare_cifar10(args, config):
script_to_run = "cifar10_eval.sh"
params_to_tune = {
"--lr_policy_params/learning_rate": {
"type": "log_range", "min": 0.00001, "max": 0.1
},
"--lr_policy_params/power": {
"type": "values", "values": [0.5, 1.0, 2.0]
},
"--batch_size_per_gpu": {
"type": "values", "values": [32, 64, 128, 256, 512, 1024]
},
"--regularizer_params/scale": {
"type": "log_range", "min": 0.00001, "max": 0.001
},
"--optimizer_params/momentum": {
"type": "range", "min": 0.5, "max": 0.99
},
}
if 'backend' not in config:
backend_manager = AzkabanBackend(
script_to_run=script_to_run,
workers_config=[{"num_workers": args.num_workers, "env_vars": []}],
)
else:
backend_manager = config['backend'](
script_to_run=script_to_run,
**config['backend_params'],
)
exp_params = {
'script_to_run': script_to_run,
'params_to_tune': params_to_tune,
'result_pattern': "Validation top-1:",
'objective': "maximize",
'backend_manager': backend_manager,
'sleep_time': 5,
'wait_for_logs_time': 10,
}
return exp_params
def finalize_cifar10(out_file):
# making the resulting csv in the required format for image generation
df = pd.read_csv(out_file, index_col=0)
df['Validation top-1:'] = 1.0 - df['Validation top-1:']
df = df.rename(index=str, columns={"Validation top-1:": "Result:"})
df.to_csv(out_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Benchmarking parameters')
parser.add_argument("--bench_name", required=True,
help="Benchmark name, e.g. sphere, rastrigin, etc.")
parser.add_argument("--config", required=True, help="Config to use.")
parser.add_argument("--bench_dim", type=int, default=2,
help="Benchmarking dimensionality")
parser.add_argument("--num_evals", type=int, default=100,
help="Maximum number of evaluations "
"allowed for algorithm.")
parser.add_argument("--num_workers", type=int, default=10,
help="Number of workers to use.")
parser.add_argument("--verbose", type=int, default=1,
help="How much output to print. Setting to 0 mutes "
"the script, 3 is the highest level.")
parser.add_argument("--output_file", required=False,
help="Output file to save the results to.")
args = parser.parse_args()
config = runpy.run_path(args.config)
if args.bench_name != "cifar10":
prepare_func, finalize_func = gen_bench_funcs("bbob")
else:
prepare_func, finalize_func = gen_bench_funcs("cifar10")
exp_params = prepare_func(args, config)
# a hack to make 2x_random_search work
algo_name = os.path.basename(args.config)[:-3] # stripping off .py
if algo_name == '2x_random_search':
args.num_evals *= 2
search_algorithm = config['search_algorithm'](
params_to_tune=exp_params['params_to_tune'],
objective=exp_params['objective'],
num_evals=args.num_evals,
**config['search_algorithm_params'],
)
if args.output_file is not None:
out_file = args.output_file
else:
out_file = "{}__{}.csv".format(args.bench_name, algo_name)
exec_mng = ExecutionManager(
backend_manager=exp_params['backend_manager'],
search_algorithm=search_algorithm,
res_pattern=exp_params['result_pattern'],
objective=exp_params['objective'],
constraints=[],
output_file=out_file,
verbose=args.verbose,
sleep_time=exp_params['sleep_time'],
wait_for_logs_time=exp_params['wait_for_logs_time'],
)
exec_mng.start_tuning()
print("\nScore: {:.6f}".format(exec_mng.final_results.iloc[0, 0]))
finalize_func(out_file)
| Milano-master | benchmarking/benchmark_algo.py |
# Copyright (c) 2018 NVIDIA Corporation
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import inspect
import sys
class BenchmarkGenerator:
def __init__(self, random_seed=None, dim=1):
if random_seed is not None:
np.random.seed(random_seed)
self._dim = dim
def _gen_xopt_fopt(self):
x_opt = np.random.uniform(-5.0, 5.0, size=self._dim)
# TODO: should this be Cauchy as in the BBOB paper?
f_opt = np.random.uniform(-1000, 1000)
return x_opt, f_opt
def _tosz(self, x):
x_hat = np.zeros_like(x)
x_hat[x != 0] = np.log(np.abs(x[x != 0]))
c1 = np.ones_like(x) * 5.5
c1[x > 0] = 10
c2 = np.ones_like(x) * 3.1
c2[x > 0] = 7.9
return np.sign(x) * \
np.exp(x_hat + 0.049 * (np.sin(c1 * x_hat) + np.sin(c2 * x_hat)))
def _tasy(self, x, beta):
if self._dim == 1:
raise ValueError("Tasy can't be applied with D=1, use dim > 1")
assert len(x.shape) == 1
assert x.shape[0] == self._dim
x_tr = x.copy()
x_tr_gr0 = x_tr[x_tr > 0]
tmp_pr = (np.arange(self._dim) / (self._dim - 1.0))[x_tr > 0]
x_tr[x_tr > 0] = x_tr_gr0 ** (1.0 + beta * tmp_pr * np.sqrt(x_tr_gr0))
return x_tr
def _lambda(self, alpha):
if self._dim == 1:
raise ValueError("Can't build lambda with dim < 2")
return np.diag(alpha ** (0.5 * np.arange(self._dim) / (self._dim - 1.0)))
def get_function_by_name(self, name):
for m_name, m_func in inspect.getmembers(self, predicate=inspect.ismethod):
if m_name == "get_{}".format(name):
return m_func()
raise ValueError('Function "{}" is not supported'.format(name))
def get_sphere(self):
x_opt, f_opt = self._gen_xopt_fopt()
def func(x):
z = x - x_opt
return np.sum(z ** 2) + f_opt
return func, x_opt, f_opt
def get_elipsoidal(self):
if self._dim == 1:
raise ValueError("Can't build 1D elipsoid, dim should be > 1")
x_opt, f_opt = self._gen_xopt_fopt()
def func(x):
z = self._tosz(x - x_opt)
return np.sum(1e6 ** (np.arange(self._dim) / (self._dim - 1.0)) *
z ** 2) + f_opt
return func, x_opt, f_opt
def get_rastrigin(self):
x_opt, f_opt = self._gen_xopt_fopt()
def func(x):
z = self._lambda(10.0).dot(self._tasy(self._tosz(x - x_opt), 0.2))
return 10.0 * (self._dim - np.sum(np.cos(2.0 * np.pi * z))) + \
np.sum(z ** 2) + f_opt
return func, x_opt, f_opt
def get_rosenbrock(self):
x_opt, f_opt = self._gen_xopt_fopt()
def func(x):
z = np.maximum(1.0, np.sqrt(1.0 * self._dim) / 8.0) * (x - x_opt) + 1.0
return np.sum(100.0 * (z[:-1] ** 2 - z[1:]) ** 2 + (z[:-1] - 1.0) ** 2) \
+ f_opt
return func, x_opt, f_opt
def visualize_function(self, func, show_3d=True, show_3d_inv=True,
show_contour=True, num_levels=15, rng_x=(-5, 5),
rng_y=(-5, 5)):
import matplotlib.pyplot as plt
if self._dim == 1:
xs = np.linspace(rng_x[0], rng_x[1], 100)
plt.plot(xs, np.apply_along_axis(func, 0, xs[np.newaxis, :]))
elif self._dim == 2:
freq = 50
x = np.linspace(rng_x[0], rng_x[1], freq)
y = np.linspace(rng_y[0], rng_y[1], freq)
Xs, Ys = np.meshgrid(x, y)
xs = np.reshape(Xs, -1)
ys = np.reshape(Ys, -1)
zs = np.apply_along_axis(func, 0, np.vstack((xs, ys)))
if show_3d:
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.plot_trisurf(xs, ys, zs, linewidth=0.2, antialiased=True)
plt.show()
if show_3d_inv:
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.invert_zaxis()
ax.plot_trisurf(xs, ys, zs, linewidth=0.2, antialiased=True)
plt.show()
if show_contour:
fig = plt.figure(figsize=(10, 10))
cs = plt.contour(Xs, Ys, zs.reshape(freq, freq), num_levels)
plt.clabel(cs, inline=1, fontsize=10)
plt.show()
else:
raise ValueError("Only dim=1 or dim=2 are supported")
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Not enough arguments provided. Should be func_name= x1= x2= ...")
sys.exit(1)
func_name = None
x = np.empty(len(sys.argv) - 2)
for arg in sys.argv[1:]:
name, value = arg.split('=')
if name == 'func_name':
func_name = value
else:
pos = int(name[1:])
x[pos] = value
if func_name is None:
raise ValueError("func_name is not defined")
benchmarks = BenchmarkGenerator(random_seed=0, dim=x.shape[0])
func, x_opt, f_opt = benchmarks.get_function_by_name(func_name)
value = func(x)
print("Result: {}".format(np.abs(f_opt - value)))
| Milano-master | benchmarking/bbob_func_eval.py |
# Copyright (c) 2018 NVIDIA Corporation
import os
import argparse
import subprocess
import sys
from build_images import build_images
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--configs_dir', default="benchmarking_configs",
help='Directory with all benchmarking configs '
'that should be evaluated.')
parser.add_argument('--output_dir', default='benchmarking_results',
help="Directory to store results in.")
parser.add_argument('--reuse_results', dest='reuse_results',
action='store_true',
help="Whether to reuse existing results")
parser.add_argument('--num_evals', default=100, type=int,
help="Maximum number of evaluations allowed "
"for each algorithm")
parser.add_argument('--num_algo_runs', default=5, type=int,
help="Number of times each algorithm is "
"going to be applied to a single benchmark. "
"Results will be later averaged.")
parser.add_argument('--benchmarks', nargs='+', required=False,
help='Benchmarks to evaluate algorithms on. By default'
'all available benchmarks will be used')
parser.add_argument('--dims', nargs='+', required=False, type=int,
help='Dimensions of benchmarks (each benchmark is '
'going to be tested with all dims listed here)')
parser.add_argument('--python_bin', default="python",
help="python3 executable, e.g. python or python3")
args = parser.parse_args()
configs = [os.path.join(args.configs_dir, cfg)
for cfg in os.listdir(args.configs_dir)]
if "random_search" not in [os.path.basename(cfg)[:-3] for cfg in configs]:
raise ValueError("random_search.py config has to be present "
"for full benchmarking")
if args.benchmarks is None:
benchmarks = ['sphere', 'elipsoidal', 'rastrigin', 'rosenbrock', 'cifar10']
else:
benchmarks = args.benchmarks
num_algo_runs = args.num_algo_runs
num_evals = args.num_evals
if args.dims is None:
dims = [4, 8, 16, 32, 64]
else:
dims = args.dims
base_dir = args.output_dir
if not args.reuse_results:
if os.path.exists(base_dir):
print("Directory {} already exists, did you want ".format(base_dir) +
"to specify --reuse_results flag?")
sys.exit(1)
os.makedirs(os.path.join(base_dir, 'results_csvs'), exist_ok=True)
for config in configs:
for bench in benchmarks:
for i in range(num_algo_runs):
cur_dims = dims if bench != "cifar10" else [5]
for dim in cur_dims:
algo_name = os.path.basename(config)[:-3]
dir_name = os.path.join(
base_dir, 'results_csvs',
'bench-{}'.format(bench), 'dim-{}'.format(dim),
)
os.makedirs(dir_name, exist_ok=True)
out_name = os.path.join(
dir_name,
"{}__{}.csv".format(algo_name, i)
)
if args.reuse_results and os.path.isfile(out_name):
continue
run_cmd = "{} benchmark_algo.py --bench_name={} ".format(
args.python_bin, bench
)
run_cmd += "--config={} --num_evals={} ".format(config, num_evals)
run_cmd += "--bench_dim={} --output_file={}".format(dim, out_name)
print('Testing "{}" on "{}" with dim={}, run #{}'.format(
algo_name, bench, dim, i
))
subprocess.run(run_cmd, shell=True, check=True)
build_images(args.output_dir)
| Milano-master | benchmarking/run_benchmarks.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.search_algorithms import RandomSearch
# Note that this config the same as random_search.py
# This is intentional and additional steps are automatically
# added by full_benchmark.py
search_algorithm = RandomSearch
search_algorithm_params = {}
| Milano-master | benchmarking/benchmarking_configs/2x_random_search.py |
# Copyright (c) 2018 NVIDIA Corporation
from milano.search_algorithms import RandomSearch
# For benchmarks only search algorithm need to be specified.
# Optionally you can also specify a custom backend.
# If not specified, AzkabanBackend with default parameters and
# 10 identical workers will be used.
search_algorithm = RandomSearch
# note that you don't need to provide "num_evals" parameter,
# as it will be overwritten by benchmarking script
search_algorithm_params = {}
| Milano-master | benchmarking/benchmarking_configs/random_search.py |
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
import os
import os.path
import sys
import re
from optparse import OptionParser
VERBOSE = False
JS_FILES = [
"../tdl/base.js",
"../tdl/buffers.js",
"../tdl/clock.js",
"../tdl/fast.js",
"../tdl/fps.js",
"../tdl/framebuffers.js",
"../tdl/fullscreen.js",
"../tdl/io.js",
"../tdl/loader.js",
"../tdl/log.js",
"../tdl/math.js",
"../tdl/misc.js",
"../tdl/models.js",
"../tdl/particles.js",
"../tdl/primitives.js",
"../tdl/programs.js",
"../tdl/quaternions.js",
"../tdl/screenshot.js",
"../tdl/shader.js",
"../tdl/string.js",
"../tdl/sync.js",
"../tdl/textures.js",
"../tdl/webgl.js",
]
def Execute(cmd, args, file=None):
global VERBOSE
if VERBOSE:
print "%s %s" % (cmd, " ".join(args))
return subprocess.Popen([cmd] + args, stdin=file, stdout=subprocess.PIPE).communicate()[0]
def main(argv):
"""This is the main function."""
global VERBOSE
parser = OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
VERBOSE = options.verbose
out_file = "tdl-min.js"
files = ["--js=%s" % (name) for name in JS_FILES]
#print files
Execute("java",
["-jar", "compiler.jar",
("--js_output_file=%s" % (out_file, )),
] + files)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| tdl-master | build/build.py |
#!/usr/bin/python3
# SPDX-License-Identifier: BSD-2-Clause-Patent
import os
import sys
import json
import shutil
import pprint
import argparse
import subprocess
def openssl_configure(openssldir, target, ec = True):
""" Run openssl Configure script. """
cmdline = [
'perl',
'Configure',
'--config=../UefiAsm.conf',
'--api=1.1.1',
'--with-rand-seed=none',
target,
'no-afalgeng',
'no-aria',
'no-async',
'no-autoerrinit',
'no-autoload-config',
'no-bf',
'no-blake2',
'no-camellia',
'no-capieng',
'no-cast',
'no-chacha',
'no-cmac',
'no-cmp',
'no-cms',
'no-ct',
'no-deprecated',
'no-des',
'no-dgram',
'no-dsa',
'no-dso',
'no-dtls',
'no-dtls1-method',
'no-dtls1_2-method',
'no-dynamic-engine',
'no-ec2m',
'no-engine',
'no-err',
'no-filenames',
'no-gost',
'no-hw',
'no-idea',
'no-ktls',
'no-makedepend',
'no-module',
'no-md4',
'no-mdc2',
'no-multiblock',
'no-nextprotoneg',
'no-pic',
'no-psk',
'no-ocb',
'no-ocsp',
'no-padlockeng',
'no-poly1305',
'no-posix-io',
'no-rc2',
'no-rc4',
'no-rc5',
'no-rfc3779',
'no-rmd160',
'no-scrypt',
'no-seed',
'no-shared',
'no-siphash',
'no-siv',
'no-sm2',
'no-sm4',
'no-sock',
'no-srp',
'no-srtp',
'no-ssl',
'no-ssl3-method',
'no-ssl-trace',
'no-static-engine',
'no-stdio',
'no-threads',
'no-tls1_3',
'no-ts',
'no-ui-console',
'no-whirlpool',
'disable-legacy',
]
if not ec:
cmdline += [ 'no-ec', ]
print('')
print(f'# -*- configure openssl for {target} (ec={ec}) -*-')
rc = subprocess.run(cmdline, cwd = openssldir,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
if rc.returncode:
print(rc.stdout)
print(rc.stderr)
sys.exit(rc.returncode)
def openssl_run_make(openssldir, target):
"""
Run make utility to generate files or cleanup.
Target can be either a string or a list of strings.
"""
cmdline = [ 'make', '--silent' ]
if isinstance(target, list):
cmdline += target
else:
cmdline += [ target, ]
rc = subprocess.run(cmdline, cwd = openssldir)
rc.check_returncode()
def get_configdata(openssldir):
"""
Slurp openssl config data as JSON,
using a little perl helper script.
"""
cmdline = [
'perl',
'perl2json.pl',
openssldir,
]
rc = subprocess.run(cmdline, stdout = subprocess.PIPE)
rc.check_returncode()
return json.loads(rc.stdout)
def is_asm(filename):
""" Check whenevr the passed file is an assembler file """
if filename.endswith('.s') or filename.endswith('.S'):
return True
return False
def copy_generated_file(src, dst):
src_file = []
with open(src, 'r') as fsrc:
src_file = fsrc.readlines()
with open(dst, 'w') as fdst:
for lines in range(len(src_file)):
s = src_file[lines]
s = s.rstrip() + "\r\n"
fdst.write(s.expandtabs())
def generate_files(openssldir, opensslgendir, asm, filelist):
"""
Generate files, using make, and copy over the results to the
directory tree for generated openssl files. Creates
subdirectories as needed.
"""
openssl_run_make(openssldir, filelist)
for filename in filelist:
src = os.path.join(openssldir, filename)
if is_asm(filename):
""" rename MSFT asm files to .nasm """
if 'IA32-MSFT' in asm:
filename = filename.replace('.S', '.nasm')
elif 'X64-MSFT' in asm:
filename = filename.replace('.s', '.nasm')
dst = os.path.join(opensslgendir, asm, filename)
else:
dst = os.path.join(opensslgendir, filename)
os.makedirs(os.path.dirname(dst), exist_ok = True)
copy_generated_file(src, dst)
def generate_include_files(openssldir, opensslgendir, asm, cfg):
""" Generate openssl include files """
print('# generate include files')
filelist = cfg['unified_info']['generate'].keys()
filelist = list(filter(lambda f: 'include' in f, filelist))
generate_files(openssldir, opensslgendir, asm, filelist)
def generate_library_files(openssldir, opensslgendir, asm, cfg, obj):
"""
Generate openssl source files for a given library. Handles
mostly assembler files, but a few C sources are generated too.
"""
filelist = get_source_list(cfg, obj, True)
if filelist:
print(f'# generate source files for {obj}')
generate_files(openssldir, opensslgendir, asm, filelist)
def generate_all_files(openssldir, opensslgendir, asm, cfg):
""" Generate all files needed. """
generate_include_files(openssldir, opensslgendir, asm, cfg)
generate_library_files(openssldir, opensslgendir, asm, cfg, 'libcrypto')
generate_library_files(openssldir, opensslgendir, asm, cfg, 'providers/libcommon.a')
generate_library_files(openssldir, opensslgendir, asm, cfg, 'libssl')
def get_source_list(cfg, obj, gen):
"""
Gets the list of source files needed to create a specific object.
* If 'gen' is True the function returns the list of generated
files.
* If 'gen' is False the function returns the list of files not
generated (which are used from the submodule directly).
Note: Will call itself recursively to resolve nested dependencies.
"""
sources = cfg['unified_info']['sources']
generate = cfg['unified_info']['generate']
srclist = []
if sources.get(obj):
for item in sources.get(obj):
srclist += get_source_list(cfg, item, gen)
else:
is_generated = generate.get(obj) is not None
if is_generated == gen:
srclist += [ obj, ]
return srclist
def asm_filter_fn(filename):
"""
Filter asm source and define lists. Drops files we don't want include.
"""
exclude = [
'/bn/',
'OPENSSL_BN_ASM',
'OPENSSL_IA32_SSE2',
'/ec/',
'ECP_NISTZ256_ASM',
'X25519_ASM',
]
for item in exclude:
if item in filename:
return False
return True
def get_sources(cfg, obj, asm):
"""
Get the list of all sources files. Will fetch both generated
and not generated file lists and update the paths accordingly, so
the openssl submodule or the sub-tree for generated files is
referenced as needed.
"""
srclist = get_source_list(cfg, obj, False)
genlist = get_source_list(cfg, obj, True)
srclist = list(map(lambda x: f'$(OPENSSL_PATH)/{x}', srclist))
c_list = list(map(lambda x: f'$(OPENSSL_GEN_PATH)/{x}',
filter(lambda x: not is_asm(x), genlist)))
asm_list = list(map(lambda x: f'$(OPENSSL_GEN_PATH)/{asm}/{x}',
filter(is_asm, genlist)))
asm_list = list(filter(asm_filter_fn, asm_list))
return srclist + c_list + asm_list
def sources_filter_fn(filename):
"""
Filter source lists. Drops files we don't want include or
need replace with our own uefi-specific version.
"""
exclude = [
'randfile.c',
'/store/',
'/storemgmt/',
'/encode_decode/encode',
'/pkcs12/',
'statem_srvr.c',
'extensions_srvr.c',
'defltprov.c',
'baseprov.c',
'provider_predefined.c',
'ecp_nistz256.c',
'x86_64-gcc.c',
]
for item in exclude:
if item in filename:
return False
return True
def libcrypto_sources(cfg, asm = None):
""" Get source file list for libcrypto """
files = get_sources(cfg, 'libcrypto', asm)
files += get_sources(cfg, 'providers/libcommon.a', asm)
files = list(filter(sources_filter_fn, files))
return files
def libssl_sources(cfg, asm = None):
""" Get source file list for libssl """
files = get_sources(cfg, 'libssl', asm)
files = list(filter(sources_filter_fn, files))
return files
def update_inf(filename, sources, arch = None, defines = []):
"""
Update inf file, replace source file list and build flags.
"""
head = ''
tail = ''
state = 0
if arch:
section = f'Sources.{arch}'
flags = f'OPENSSL_FLAGS_{arch}'
else:
section = None
flags = f'OPENSSL_FLAGS_NOASM'
state = 1
# read and parse file
with open(filename, 'r') as f:
while True:
line = f.readline()
if line == '':
break
if state in [0, 1]:
if flags in line:
(keep, replace) = line.split('=')
args = map(lambda x: f'-D{x}', defines)
head += keep + '= ' + ' '.join(args)
head = head.rstrip() + '\r\n'
else:
head += line.rstrip() + '\r\n'
if state == 0 and section in line:
state = 1
if state == 1 and 'Autogenerated files list starts here' in line:
state = 2
if state == 2 and 'Autogenerated files list ends here' in line:
state = 3
if state == 3:
tail += line.rstrip() + '\r\n'
# write updated file
with open(filename, 'w') as f:
f.write(head)
for src in sources:
f.write(f' {src}\r\n')
f.write(tail)
def update_MSFT_asm_format(asm, filelist):
""" rename MSFT asm files to .nasm """
if 'IA32-MSFT' in asm:
for file_index in range(len(filelist)):
filelist[file_index] = filelist[file_index].replace('.S', '.nasm')
elif 'X64-MSFT' in asm:
for file_index in range(len(filelist)):
filelist[file_index] = filelist[file_index].replace('.s', '.nasm')
def main():
# prepare
os.chdir(os.path.dirname(os.path.abspath(__file__)))
openssldir = os.path.join(os.getcwd(), 'openssl')
opensslgendir = os.path.join(os.getcwd(), 'OpensslGen')
# asm accel configs (see UefiAsm.conf)
for ec in [True, False]:
if ec:
inf = 'OpensslLibFullAccel.inf'
hdr = 'configuration-ec.h'
else:
inf = 'OpensslLibAccel.inf'
hdr = 'configuration-noec.h'
sources = {}
defines = {}
for asm in [ 'UEFI-IA32-MSFT', 'UEFI-IA32-GCC',
'UEFI-X64-MSFT', 'UEFI-X64-GCC']:
(uefi, arch, cc) = asm.split('-')
archcc = f'{arch}-{cc}'
openssl_configure(openssldir, asm, ec = ec);
cfg = get_configdata(openssldir)
generate_all_files(openssldir, opensslgendir, archcc, cfg)
shutil.move(os.path.join(opensslgendir, 'include', 'openssl', 'configuration.h'),
os.path.join(opensslgendir, 'include', 'openssl', hdr))
openssl_run_make(openssldir, 'distclean')
srclist = libcrypto_sources(cfg, archcc) + libssl_sources(cfg, archcc)
sources[archcc] = list(map(lambda x: f'{x} | {cc}', filter(is_asm, srclist)))
update_MSFT_asm_format(archcc, sources[archcc])
sources[arch] = list(filter(lambda x: not is_asm(x), srclist))
defines[arch] = cfg['unified_info']['defines']['libcrypto']
defines[arch] = list(filter(asm_filter_fn, defines[arch]))
ia32accel = sources['IA32'] + sources['IA32-MSFT'] + sources['IA32-GCC']
x64accel = sources['X64'] + sources['X64-MSFT'] + sources['X64-GCC']
update_inf(inf, ia32accel, 'IA32', defines['IA32'])
update_inf(inf, x64accel, 'X64', defines['X64'])
# noaccel - ec enabled
openssl_configure(openssldir, 'UEFI', ec = True);
cfg = get_configdata(openssldir)
generate_all_files(openssldir, opensslgendir, None, cfg)
openssl_run_make(openssldir, 'distclean')
defines = []
if 'libcrypto' in cfg['unified_info']['defines']:
defines = cfg['unified_info']['defines']['libcrypto']
update_inf('OpensslLibFull.inf',
libcrypto_sources(cfg) + libssl_sources(cfg),
defines)
# noaccel - ec disabled
openssl_configure(openssldir, 'UEFI', ec = False);
cfg = get_configdata(openssldir)
generate_all_files(openssldir, opensslgendir, None, cfg)
openssl_run_make(openssldir, 'distclean')
update_inf('OpensslLibCrypto.inf',
libcrypto_sources(cfg),
None, defines)
update_inf('OpensslLib.inf',
libcrypto_sources(cfg) + libssl_sources(cfg),
None, defines)
# wrap header file
confighdr = os.path.join(opensslgendir, 'include', 'openssl', 'configuration.h')
with open(confighdr, 'w') as f:
f.write('#ifdef EDK2_OPENSSL_NOEC\r\n'
'# include "configuration-noec.h"\r\n'
'#else\r\n'
'# include "configuration-ec.h"\r\n'
'#endif\r\n')
if __name__ == '__main__':
sys.exit(main())
| edk2-master | CryptoPkg/Library/OpensslLib/configure.py |
# @file Edk2ToolsBuild.py
# Invocable class that builds the basetool c files.
#
# Supports VS2017, VS2019, and GCC5
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
import logging
import argparse
import multiprocessing
from edk2toolext import edk2_logging
from edk2toolext.environment import self_describing_environment
from edk2toolext.base_abstract_invocable import BaseAbstractInvocable
from edk2toollib.utility_functions import RunCmd
from edk2toollib.windows.locate_tools import QueryVcVariables
class Edk2ToolsBuild(BaseAbstractInvocable):
def ParseCommandLineOptions(self):
''' parse arguments '''
ParserObj = argparse.ArgumentParser()
ParserObj.add_argument("-t", "--tool_chain_tag", dest="tct", default="VS2017",
help="Set the toolchain used to compile the build tools")
args = ParserObj.parse_args()
self.tool_chain_tag = args.tct
def GetWorkspaceRoot(self):
''' Return the workspace root for initializing the SDE '''
# this is the bastools dir...not the traditional EDK2 workspace root
return os.path.dirname(os.path.abspath(__file__))
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
# for now don't use scopes
return ('global',)
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type (return Logging.Level)
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
if(loggerType == "con"):
return logging.ERROR
else:
return logging.DEBUG
def GetLoggingFolderRelativeToRoot(self):
''' Return a path to folder for log files '''
return "BaseToolsBuild"
def GetVerifyCheckRequired(self):
''' Will call self_describing_environment.VerifyEnvironment if this returns True '''
return True
def GetLoggingFileName(self, loggerType):
''' Get the logging file name for the type.
Return None if the logger shouldn't be created
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
return "BASETOOLS_BUILD"
def WritePathEnvFile(self, OutputDir):
''' Write a PyTool path env file for future PyTool based edk2 builds'''
content = '''##
# Set shell variable EDK_TOOLS_BIN to this folder
#
# Autogenerated by Edk2ToolsBuild.py
#
# Copyright (c), Microsoft Corporation
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
{
"id": "You-Built-BaseTools",
"scope": "edk2-build",
"flags": ["set_shell_var", "set_path"],
"var_name": "EDK_TOOLS_BIN"
}
'''
with open(os.path.join(OutputDir, "basetoolsbin_path_env.yaml"), "w") as f:
f.write(content)
def Go(self):
logging.info("Running Python version: " + str(sys.version_info))
(build_env, shell_env) = self_describing_environment.BootstrapEnvironment(
self.GetWorkspaceRoot(), self.GetActiveScopes())
# # Bind our current execution environment into the shell vars.
ph = os.path.dirname(sys.executable)
if " " in ph:
ph = '"' + ph + '"'
shell_env.set_shell_var("PYTHON_HOME", ph)
# PYTHON_COMMAND is required to be set for using edk2 python builds.
pc = sys.executable
if " " in pc:
pc = '"' + pc + '"'
shell_env.set_shell_var("PYTHON_COMMAND", pc)
if self.tool_chain_tag.lower().startswith("vs"):
# # Update environment with required VC vars.
interesting_keys = ["ExtensionSdkDir", "INCLUDE", "LIB"]
interesting_keys.extend(
["LIBPATH", "Path", "UniversalCRTSdkDir", "UCRTVersion", "WindowsLibPath", "WindowsSdkBinPath"])
interesting_keys.extend(
["WindowsSdkDir", "WindowsSdkVerBinPath", "WindowsSDKVersion", "VCToolsInstallDir"])
vc_vars = QueryVcVariables(
interesting_keys, 'x86', vs_version=self.tool_chain_tag.lower())
for key in vc_vars.keys():
logging.debug(f"Var - {key} = {vc_vars[key]}")
if key.lower() == 'path':
shell_env.set_path(vc_vars[key])
else:
shell_env.set_shell_var(key, vc_vars[key])
self.OutputDir = os.path.join(
shell_env.get_shell_var("EDK_TOOLS_PATH"), "Bin", "Win32")
# compiled tools need to be added to path because antlr is referenced
shell_env.insert_path(self.OutputDir)
# Actually build the tools.
output_stream = edk2_logging.create_output_stream()
ret = RunCmd('nmake.exe', None,
workingdir=shell_env.get_shell_var("EDK_TOOLS_PATH"))
edk2_logging.remove_output_stream(output_stream)
problems = edk2_logging.scan_compiler_output(output_stream)
for level, problem in problems:
logging.log(level, problem)
if ret != 0:
raise Exception("Failed to build.")
self.WritePathEnvFile(self.OutputDir)
return ret
elif self.tool_chain_tag.lower().startswith("gcc"):
cpu_count = self.GetCpuThreads()
output_stream = edk2_logging.create_output_stream()
ret = RunCmd("make", f"-C . -j {cpu_count}", workingdir=shell_env.get_shell_var("EDK_TOOLS_PATH"))
edk2_logging.remove_output_stream(output_stream)
problems = edk2_logging.scan_compiler_output(output_stream)
for level, problem in problems:
logging.log(level, problem)
if ret != 0:
raise Exception("Failed to build.")
self.OutputDir = os.path.join(
shell_env.get_shell_var("EDK_TOOLS_PATH"), "Source", "C", "bin")
self.WritePathEnvFile(self.OutputDir)
return ret
logging.critical("Tool Chain not supported")
return -1
def GetCpuThreads(self) -> int:
''' Function to return number of cpus. If error return 1'''
cpus = 1
try:
cpus = multiprocessing.cpu_count()
except:
# from the internet there are cases where cpu_count is not implemented.
# will handle error by just doing single proc build
pass
return cpus
def main():
Edk2ToolsBuild().Invoke()
if __name__ == "__main__":
main()
| edk2-master | BaseTools/Edk2ToolsBuild.py |
## @file
#
#
# Copyright (c) 2009 - 2014, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import sys
import locale
if sys.platform == "darwin" and sys.version_info[0] < 3:
DefaultLocal = locale.getdefaultlocale()[1]
if DefaultLocal is None:
DefaultLocal = 'UTF8'
sys.setdefaultencoding(DefaultLocal)
| edk2-master | BaseTools/Source/Python/sitecustomize.py |
## @file
# generate capsule
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable, FindExtendTool
from CommonDataClass.FdfClass import CapsuleClassObject
import Common.LongFilePathOs as os
from io import BytesIO
from Common.Misc import SaveFileOnChange, PackGUID
import uuid
from struct import pack
from Common import EdkLogger
from Common.BuildToolError import GENFDS_ERROR
from Common.DataType import TAB_LINE_BREAK
WIN_CERT_REVISION = 0x0200
WIN_CERT_TYPE_EFI_GUID = 0x0EF1
EFI_CERT_TYPE_PKCS7_GUID = uuid.UUID('{4aafd29d-68df-49ee-8aa9-347d375665a7}')
EFI_CERT_TYPE_RSA2048_SHA256_GUID = uuid.UUID('{a7717414-c616-4977-9420-844712a735bf}')
## create inf file describes what goes into capsule and call GenFv to generate capsule
#
#
class Capsule (CapsuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CapsuleClassObject.__init__(self)
# For GenFv
self.BlockSize = None
# For GenFv
self.BlockNum = None
self.CapsuleName = None
## Generate FMP capsule
#
# @retval string Generated Capsule file path
#
def GenFmpCapsule(self):
#
# Generate capsule header
# typedef struct {
# EFI_GUID CapsuleGuid;
# UINT32 HeaderSize;
# UINT32 Flags;
# UINT32 CapsuleImageSize;
# } EFI_CAPSULE_HEADER;
#
Header = BytesIO()
#
# Use FMP capsule GUID: 6DCBD5ED-E82D-4C44-BDA1-7194199AD92A
#
Header.write(PackGUID('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A'.split('-')))
HdrSize = 0
if 'CAPSULE_HEADER_SIZE' in self.TokensDict:
Header.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)))
HdrSize = int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)
else:
Header.write(pack('=I', 0x20))
HdrSize = 0x20
Flags = 0
if 'CAPSULE_FLAGS' in self.TokensDict:
for flag in self.TokensDict['CAPSULE_FLAGS'].split(','):
flag = flag.strip()
if flag == 'PopulateSystemTable':
Flags |= 0x00010000 | 0x00020000
elif flag == 'PersistAcrossReset':
Flags |= 0x00010000
elif flag == 'InitiateReset':
Flags |= 0x00040000
Header.write(pack('=I', Flags))
#
# typedef struct {
# UINT32 Version;
# UINT16 EmbeddedDriverCount;
# UINT16 PayloadItemCount;
# // UINT64 ItemOffsetList[];
# } EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER;
#
FwMgrHdr = BytesIO()
if 'CAPSULE_HEADER_INIT_VERSION' in self.TokensDict:
FwMgrHdr.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_INIT_VERSION'], 16)))
else:
FwMgrHdr.write(pack('=I', 0x00000001))
FwMgrHdr.write(pack('=HH', len(self.CapsuleDataList), len(self.FmpPayloadList)))
FwMgrHdrSize = 4+2+2+8*(len(self.CapsuleDataList)+len(self.FmpPayloadList))
#
# typedef struct _WIN_CERTIFICATE {
# UINT32 dwLength;
# UINT16 wRevision;
# UINT16 wCertificateType;
# //UINT8 bCertificate[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE;
#
# typedef struct _WIN_CERTIFICATE_UEFI_GUID {
# WIN_CERTIFICATE Hdr;
# EFI_GUID CertType;
# //UINT8 CertData[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE_UEFI_GUID;
#
# typedef struct {
# UINT64 MonotonicCount;
# WIN_CERTIFICATE_UEFI_GUID AuthInfo;
# } EFI_FIRMWARE_IMAGE_AUTHENTICATION;
#
# typedef struct _EFI_CERT_BLOCK_RSA_2048_SHA256 {
# EFI_GUID HashType;
# UINT8 PublicKey[256];
# UINT8 Signature[256];
# } EFI_CERT_BLOCK_RSA_2048_SHA256;
#
PreSize = FwMgrHdrSize
Content = BytesIO()
for driver in self.CapsuleDataList:
FileName = driver.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += os.path.getsize(FileName)
File = open(FileName, 'rb')
Content.write(File.read())
File.close()
for fmp in self.FmpPayloadList:
if fmp.Existed:
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
continue
if fmp.ImageFile:
for Obj in fmp.ImageFile:
fmp.ImageFile = Obj.GenCapsuleSubItem()
if fmp.VendorCodeFile:
for Obj in fmp.VendorCodeFile:
fmp.VendorCodeFile = Obj.GenCapsuleSubItem()
if fmp.Certificate_Guid:
ExternalTool, ExternalOption = FindExtendTool([], GenFdsGlobalVariable.ArchList, fmp.Certificate_Guid)
CmdOption = ''
CapInputFile = fmp.ImageFile
if not os.path.isabs(fmp.ImageFile):
CapInputFile = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, fmp.ImageFile)
CapOutputTmp = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.tmp'
if ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % fmp.Certificate_Guid)
else:
CmdOption += ExternalTool
if ExternalOption:
CmdOption = CmdOption + ' ' + ExternalOption
CmdOption += ' -e ' + ' --monotonic-count ' + str(fmp.MonotonicCount) + ' -o ' + CapOutputTmp + ' ' + CapInputFile
CmdList = CmdOption.split()
GenFdsGlobalVariable.CallExternalTool(CmdList, "Failed to generate FMP auth capsule")
if uuid.UUID(fmp.Certificate_Guid) == EFI_CERT_TYPE_PKCS7_GUID:
dwLength = 4 + 2 + 2 + 16 + os.path.getsize(CapOutputTmp) - os.path.getsize(CapInputFile)
else:
dwLength = 4 + 2 + 2 + 16 + 16 + 256 + 256
fmp.ImageFile = CapOutputTmp
AuthData = [fmp.MonotonicCount, dwLength, WIN_CERT_REVISION, WIN_CERT_TYPE_EFI_GUID, fmp.Certificate_Guid]
fmp.Buffer = fmp.GenCapsuleSubItem(AuthData)
else:
fmp.Buffer = fmp.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
BodySize = len(FwMgrHdr.getvalue()) + len(Content.getvalue())
Header.write(pack('=I', HdrSize + BodySize))
#
# The real capsule header structure is 28 bytes
#
Header.write(b'\x00'*(HdrSize-28))
Header.write(FwMgrHdr.getvalue())
Header.write(Content.getvalue())
#
# Generate FMP capsule file
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.Cap'
SaveFileOnChange(CapOutputFile, Header.getvalue(), True)
return CapOutputFile
## Generate capsule
#
# @param self The object pointer
# @retval string Generated Capsule file path
#
def GenCapsule(self):
if self.UiCapsuleName.upper() + 'cap' in GenFdsGlobalVariable.ImageBinDict:
return GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap']
GenFdsGlobalVariable.InfLogger( "\nGenerate %s Capsule" %self.UiCapsuleName)
if ('CAPSULE_GUID' in self.TokensDict and
uuid.UUID(self.TokensDict['CAPSULE_GUID']) == uuid.UUID('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A')):
return self.GenFmpCapsule()
CapInfFile = self.GenCapInf()
CapInfFile.append("[files]" + TAB_LINE_BREAK)
CapFileList = []
for CapsuleDataObj in self.CapsuleDataList:
CapsuleDataObj.CapsuleName = self.CapsuleName
FileName = CapsuleDataObj.GenCapsuleSubItem()
CapsuleDataObj.CapsuleName = None
CapFileList.append(FileName)
CapInfFile.append("EFI_FILE_NAME = " + \
FileName + \
TAB_LINE_BREAK)
SaveFileOnChange(self.CapInfFileName, ''.join(CapInfFile), False)
#
# Call GenFv tool to generate capsule
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName)
CapOutputFile = CapOutputFile + '.Cap'
GenFdsGlobalVariable.GenerateFirmwareVolume(
CapOutputFile,
[self.CapInfFileName],
Capsule=True,
FfsList=CapFileList
)
GenFdsGlobalVariable.VerboseLogger( "\nGenerate %s Capsule Successfully" %self.UiCapsuleName)
GenFdsGlobalVariable.SharpCounter = 0
GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap'] = CapOutputFile
return CapOutputFile
## Generate inf file for capsule
#
# @param self The object pointer
# @retval file inf file object
#
def GenCapInf(self):
self.CapInfFileName = os.path.join(GenFdsGlobalVariable.FvDir,
self.UiCapsuleName + "_Cap" + '.inf')
CapInfFile = []
CapInfFile.append("[options]" + TAB_LINE_BREAK)
for Item in self.TokensDict:
CapInfFile.append("EFI_" + \
Item + \
' = ' + \
self.TokensDict[Item] + \
TAB_LINE_BREAK)
return CapInfFile
| edk2-master | BaseTools/Source/Python/GenFds/Capsule.py |
## @file
# process FFS generation
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from Common.DataType import *
# mapping between FILE type in FDF and file type for GenFfs
FdfFvFileTypeToFileType = {
SUP_MODULE_SEC : 'EFI_FV_FILETYPE_SECURITY_CORE',
SUP_MODULE_PEI_CORE : 'EFI_FV_FILETYPE_PEI_CORE',
SUP_MODULE_PEIM : 'EFI_FV_FILETYPE_PEIM',
SUP_MODULE_DXE_CORE : 'EFI_FV_FILETYPE_DXE_CORE',
'FREEFORM' : 'EFI_FV_FILETYPE_FREEFORM',
'DRIVER' : 'EFI_FV_FILETYPE_DRIVER',
'APPLICATION' : 'EFI_FV_FILETYPE_APPLICATION',
'FV_IMAGE' : 'EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE',
'RAW' : 'EFI_FV_FILETYPE_RAW',
'PEI_DXE_COMBO' : 'EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER',
'SMM' : 'EFI_FV_FILETYPE_SMM',
SUP_MODULE_SMM_CORE : 'EFI_FV_FILETYPE_SMM_CORE',
SUP_MODULE_MM_STANDALONE : 'EFI_FV_FILETYPE_MM_STANDALONE',
SUP_MODULE_MM_CORE_STANDALONE : 'EFI_FV_FILETYPE_MM_CORE_STANDALONE'
}
# mapping between section type in FDF and file suffix
SectionSuffix = {
BINARY_FILE_TYPE_PE32 : '.pe32',
BINARY_FILE_TYPE_PIC : '.pic',
BINARY_FILE_TYPE_TE : '.te',
BINARY_FILE_TYPE_DXE_DEPEX : '.dpx',
'VERSION' : '.ver',
BINARY_FILE_TYPE_UI : '.ui',
'COMPAT16' : '.com16',
'RAW' : '.raw',
'FREEFORM_SUBTYPE_GUID': '.guid',
'SUBTYPE_GUID' : '.guid',
'FV_IMAGE' : 'fv.sec',
'COMPRESS' : '.com',
'GUIDED' : '.guided',
BINARY_FILE_TYPE_PEI_DEPEX : '.dpx',
BINARY_FILE_TYPE_SMM_DEPEX : '.dpx'
}
| edk2-master | BaseTools/Source/Python/GenFds/Ffs.py |
## @file
# process depex section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import DepexSectionClassObject
from AutoGen.GenDepex import DependencyExpression
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import PathClass
from Common.DataType import *
## generate data section
#
#
class DepexSection (DepexSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
DepexSectionClassObject.__init__(self)
def __FindGuidValue(self, CName):
for Arch in GenFdsGlobalVariable.ArchList:
PkgList = GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform,
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag)
for Inf in GenFdsGlobalVariable.FdfParser.Profile.InfList:
ModuleData = GenFdsGlobalVariable.WorkSpace.BuildObject[
PathClass(Inf, GenFdsGlobalVariable.WorkSpaceDir),
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag
]
for Pkg in ModuleData.Packages:
if Pkg not in PkgList:
PkgList.append(Pkg)
for PkgDb in PkgList:
if CName in PkgDb.Ppis:
return PkgDb.Ppis[CName]
if CName in PkgDb.Protocols:
return PkgDb.Protocols[CName]
if CName in PkgDb.Guids:
return PkgDb.Guids[CName]
return None
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name list, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, keyStringList, FfsFile = None, Dict = None, IsMakefile = False):
if self.ExpressionProcessed == False:
self.Expression = self.Expression.replace("\n", " ").replace("\r", " ")
ExpList = self.Expression.split()
for Exp in ExpList:
if Exp.upper() not in ('AND', 'OR', 'NOT', 'TRUE', 'FALSE', 'SOR', 'BEFORE', 'AFTER', 'END'):
GuidStr = self.__FindGuidValue(Exp)
if GuidStr is None:
EdkLogger.error("GenFds", RESOURCE_NOT_AVAILABLE,
"Depex GUID %s could not be found in build DB! (ModuleName: %s)" % (Exp, ModuleName))
self.Expression = self.Expression.replace(Exp, GuidStr)
self.Expression = self.Expression.strip()
self.ExpressionProcessed = True
if self.DepexType == 'PEI_DEPEX_EXP':
ModuleType = SUP_MODULE_PEIM
SecType = BINARY_FILE_TYPE_PEI_DEPEX
elif self.DepexType == 'DXE_DEPEX_EXP':
ModuleType = SUP_MODULE_DXE_DRIVER
SecType = BINARY_FILE_TYPE_DXE_DEPEX
elif self.DepexType == 'SMM_DEPEX_EXP':
ModuleType = SUP_MODULE_DXE_SMM_DRIVER
SecType = BINARY_FILE_TYPE_SMM_DEPEX
else:
EdkLogger.error("GenFds", FORMAT_INVALID,
"Depex type %s is not valid for module %s" % (self.DepexType, ModuleName))
InputFile = os.path.join (OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + '.depex')
InputFile = os.path.normpath(InputFile)
Depex = DependencyExpression(self.Expression, ModuleType)
Depex.Generate(InputFile)
OutputFile = os.path.join (OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + '.dpx')
OutputFile = os.path.normpath(OutputFile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [InputFile], Section.Section.SectionType.get (SecType), IsMakefile=IsMakefile)
return [OutputFile], self.Alignment
| edk2-master | BaseTools/Source/Python/GenFds/DepexSection.py |
## @file
# generate capsule
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
class CapsuleData:
## The constructor
#
# @param self The object pointer
def __init__(self):
pass
## generate capsule data
#
# @param self The object pointer
def GenCapsuleSubItem(self):
pass
## FFS class for capsule data
#
#
class CapsuleFfs (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
## generate FFS capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
FfsFile = self.Ffs.GenFfs()
return FfsFile
## FV class for capsule data
#
#
class CapsuleFv (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
self.CapsuleName = None
## generate FV capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FvName.find('.fv') == -1:
if self.FvName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[self.FvName.upper()]
FdBuffer = BytesIO()
FvObj.CapsuleName = self.CapsuleName
FvFile = FvObj.AddToBuffer(FdBuffer)
FvObj.CapsuleName = None
FdBuffer.close()
return FvFile
else:
FvFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvName)
return FvFile
## FD class for capsule data
#
#
class CapsuleFd (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FdName = None
self.CapsuleName = None
## generate FD capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FdName.find('.fd') == -1:
if self.FdName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[self.FdName.upper()]
FdFile = FdObj.GenFd()
return FdFile
else:
FdFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FdName)
return FdFile
## AnyFile class for capsule data
#
#
class CapsuleAnyFile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate AnyFile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
## Afile class for capsule data
#
#
class CapsuleAfile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate Afile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
class CapsulePayload(CapsuleData):
'''Generate payload file, the header is defined below:
#pragma pack(1)
typedef struct {
UINT32 Version;
EFI_GUID UpdateImageTypeId;
UINT8 UpdateImageIndex;
UINT8 reserved_bytes[3];
UINT32 UpdateImageSize;
UINT32 UpdateVendorCodeSize;
UINT64 UpdateHardwareInstance; //Introduced in v2
} EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
'''
def __init__(self):
self.UiName = None
self.Version = None
self.ImageTypeId = None
self.ImageIndex = None
self.HardwareInstance = None
self.ImageFile = []
self.VendorCodeFile = []
self.Certificate_Guid = None
self.MonotonicCount = None
self.Existed = False
self.Buffer = None
def GenCapsuleSubItem(self, AuthData=[]):
if not self.Version:
self.Version = '0x00000002'
if not self.ImageIndex:
self.ImageIndex = '0x1'
if not self.HardwareInstance:
self.HardwareInstance = '0x0'
ImageFileSize = os.path.getsize(self.ImageFile)
if AuthData:
# the ImageFileSize need include the full authenticated info size. From first bytes of MonotonicCount to last bytes of certificate.
# the 32 bit is the MonotonicCount, dwLength, wRevision, wCertificateType and CertType
ImageFileSize += 32
VendorFileSize = 0
if self.VendorCodeFile:
VendorFileSize = os.path.getsize(self.VendorCodeFile)
#
# Fill structure
#
Guid = self.ImageTypeId.split('-')
Buffer = pack('=ILHHBBBBBBBBBBBBIIQ',
int(self.Version, 16),
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
int(self.ImageIndex, 16),
0,
0,
0,
ImageFileSize,
VendorFileSize,
int(self.HardwareInstance, 16)
)
if AuthData:
Buffer += pack('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])
Buffer += uuid.UUID(AuthData[4]).bytes_le
#
# Append file content to the structure
#
ImageFile = open(self.ImageFile, 'rb')
Buffer += ImageFile.read()
ImageFile.close()
if self.VendorCodeFile:
VendorFile = open(self.VendorCodeFile, 'rb')
Buffer += VendorFile.read()
VendorFile.close()
self.Existed = True
return Buffer
| edk2-master | BaseTools/Source/Python/GenFds/CapsuleData.py |
## @file
# process compress section generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .Ffs import SectionSuffix
from . import Section
import subprocess
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import CompressSectionClassObject
from Common.DataType import *
## generate compress section
#
#
class CompressSection (CompressSectionClassObject) :
## compress types: PI standard and non PI standard
CompTypeDict = {
'PI_STD' : 'PI_STD',
'PI_NONE' : 'PI_NONE'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CompressSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False):
if FfsInf is not None:
self.CompType = FfsInf.__ExtendMacro__(self.CompType)
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
SectFiles = tuple()
SectAlign = []
Index = 0
MaxAlign = None
if Dict is None:
Dict = {}
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' %(SecNum, Index)
ReturnSectList, AlignValue = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if AlignValue is not None:
if MaxAlign is None:
MaxAlign = AlignValue
if GenFdsGlobalVariable.GetAlignment (AlignValue) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = AlignValue
if ReturnSectList != []:
if AlignValue is None:
AlignValue = "1"
for FileData in ReturnSectList:
SectFiles += (FileData,)
SectAlign.append(AlignValue)
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['COMPRESS']
OutputFile = os.path.normpath(OutputFile)
DummyFile = OutputFile + '.dummy'
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFiles, InputAlign=SectAlign, IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [DummyFile], Section.Section.SectionType['COMPRESS'],
CompressionType=self.CompTypeDict[self.CompType], IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
| edk2-master | BaseTools/Source/Python/GenFds/CompressSection.py |
## @file
# process FD Region generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import *
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
import string
import Common.LongFilePathOs as os
from stat import *
from Common import EdkLogger
from Common.BuildToolError import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.DataType import BINARY_FILE_TYPE_FV
## generate Region
#
#
class Region(object):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Offset = None # The begin position of the Region
self.Size = None # The Size of the Region
self.PcdOffset = None
self.PcdSize = None
self.SetVarDict = {}
self.RegionType = None
self.RegionDataList = []
## PadBuffer()
#
# Add padding bytes to the Buffer
#
# @param Buffer The buffer the generated region data will be put
# in
# @param ErasePolarity Flash erase polarity
# @param Size Number of padding bytes requested
#
def PadBuffer(self, Buffer, ErasePolarity, Size):
if Size > 0:
if (ErasePolarity == '1') :
PadByte = pack('B', 0xFF)
else:
PadByte = pack('B', 0)
for i in range(0, Size):
Buffer.write(PadByte)
## AddToBuffer()
#
# Add region data to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated region data will be put
# @param BaseAddress base address of region
# @param BlockSize block size of region
# @param BlockNum How many blocks in region
# @param ErasePolarity Flash erase polarity
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer(self, Buffer, BaseAddress, BlockSizeList, ErasePolarity, ImageBinDict, MacroDict=None, Flag=False):
Size = self.Size
if MacroDict is None:
MacroDict = {}
if not Flag:
GenFdsGlobalVariable.InfLogger('\nGenerate Region at Offset 0x%X' % self.Offset)
GenFdsGlobalVariable.InfLogger(" Region Size = 0x%X" % Size)
GenFdsGlobalVariable.SharpCounter = 0
if Flag and (self.RegionType != BINARY_FILE_TYPE_FV):
return
if self.RegionType == BINARY_FILE_TYPE_FV:
#
# Get Fv from FvDict
#
self.FvAddress = int(BaseAddress, 16) + self.Offset
FvBaseAddress = '0x%X' % self.FvAddress
FvOffset = 0
for RegionData in self.RegionDataList:
FileName = None
if RegionData.endswith(".fv"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region FV File Name = .fv : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'fv' in ImageBinDict:
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
FileName = ImageBinDict[RegionData.upper() + 'fv']
else:
#
# Generate FvImage.
#
FvObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[RegionData.upper()]
if FvObj is not None :
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
#
# Call GenFv tool
#
self.BlockInfoOfRegion(BlockSizeList, FvObj)
self.FvAddress = self.FvAddress + FvOffset
FvAlignValue = GenFdsGlobalVariable.GetAlignment(FvObj.FvAlignment)
if self.FvAddress % FvAlignValue != 0:
EdkLogger.error("GenFds", GENFDS_ERROR,
"FV (%s) is NOT %s Aligned!" % (FvObj.UiFvName, FvObj.FvAlignment))
FvBuffer = BytesIO()
FvBaseAddress = '0x%X' % self.FvAddress
BlockSize = None
BlockNum = None
FvObj.AddToBuffer(FvBuffer, FvBaseAddress, BlockSize, BlockNum, ErasePolarity, Flag=Flag)
if Flag:
continue
FvBufferLen = len(FvBuffer.getvalue())
if FvBufferLen > Size:
FvBuffer.close()
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV (%s) is larger than Region Size 0x%X specified." % (RegionData, Size))
#
# Put the generated image into FD buffer.
#
Buffer.write(FvBuffer.getvalue())
FvBuffer.close()
FvOffset = FvOffset + FvBufferLen
Size = Size - FvBufferLen
continue
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the exist Fv image into FD buffer
#
if not Flag:
if FileName is not None:
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
if not Flag:
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'CAPSULE':
#
# Get Capsule from Capsule Dict
#
for RegionData in self.RegionDataList:
if RegionData.endswith(".cap"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
GenFdsGlobalVariable.InfLogger(' Region CAPSULE Image Name = .cap : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'cap' in ImageBinDict:
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
FileName = ImageBinDict[RegionData.upper() + 'cap']
else:
#
# Generate Capsule image and Put it into FD buffer
#
CapsuleObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[RegionData.upper()]
if CapsuleObj is not None :
CapsuleObj.CapsuleName = RegionData.upper()
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
#
# Call GenFv tool to generate Capsule Image
#
FileName = CapsuleObj.GenCapsule()
CapsuleObj.CapsuleName = None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Capsule (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the capsule image into FD buffer
#
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size 0x%X of Capsule File (%s) is larger than Region Size 0x%X specified." \
% (FileLength, RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType in ('FILE', 'INF'):
for RegionData in self.RegionDataList:
if self.RegionType == 'INF':
RegionData.__InfParse__(None)
if len(RegionData.BinFileList) != 1:
EdkLogger.error('GenFds', GENFDS_ERROR, 'INF in FD region can only contain one binary: %s' % RegionData)
File = RegionData.BinFileList[0]
RegionData = RegionData.PatchEfiFile(File.Path, File.Type)
else:
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
#
# Add the file image into FD buffer
#
FileLength = os.stat(RegionData)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
GenFdsGlobalVariable.InfLogger(' Region File Name = %s' % RegionData)
BinFile = open(RegionData, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'DATA' :
GenFdsGlobalVariable.InfLogger(' Region Name = DATA')
DataSize = 0
for RegionData in self.RegionDataList:
Data = RegionData.split(',')
DataSize = DataSize + len(Data)
if DataSize > Size:
EdkLogger.error("GenFds", GENFDS_ERROR, "Size of DATA is larger than Region Size ")
else:
for item in Data :
Buffer.write(pack('B', int(item, 16)))
Size = Size - DataSize
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType is None:
GenFdsGlobalVariable.InfLogger(' Region Name = None')
self.PadBuffer(Buffer, ErasePolarity, Size)
## BlockSizeOfRegion()
#
# @param BlockSizeList List of block information
# @param FvObj The object for FV
#
def BlockInfoOfRegion(self, BlockSizeList, FvObj):
Start = 0
End = 0
RemindingSize = self.Size
ExpectedList = []
for (BlockSize, BlockNum, pcd) in BlockSizeList:
End = Start + BlockSize * BlockNum
# region not started yet
if self.Offset >= End:
Start = End
continue
# region located in current blocks
else:
# region ended within current blocks
if self.Offset + self.Size <= End:
ExpectedList.append((BlockSize, (RemindingSize + BlockSize - 1) // BlockSize))
break
# region not ended yet
else:
# region not started in middle of current blocks
if self.Offset <= Start:
UsedBlockNum = BlockNum
# region started in middle of current blocks
else:
UsedBlockNum = (End - self.Offset) // BlockSize
Start = End
ExpectedList.append((BlockSize, UsedBlockNum))
RemindingSize -= BlockSize * UsedBlockNum
if FvObj.BlockSizeList == []:
FvObj.BlockSizeList = ExpectedList
else:
# first check whether FvObj.BlockSizeList items have only "BlockSize" or "NumBlocks",
# if so, use ExpectedList
for Item in FvObj.BlockSizeList:
if Item[0] is None or Item[1] is None:
FvObj.BlockSizeList = ExpectedList
break
# make sure region size is no smaller than the summed block size in FV
Sum = 0
for Item in FvObj.BlockSizeList:
Sum += Item[0] * Item[1]
if self.Size < Sum:
EdkLogger.error("GenFds", GENFDS_ERROR, "Total Size of FV %s 0x%x is larger than Region Size 0x%x "
% (FvObj.UiFvName, Sum, self.Size))
# check whether the BlockStatements in FV section is appropriate
ExpectedListData = ''
for Item in ExpectedList:
ExpectedListData += "BlockSize = 0x%x\n\tNumBlocks = 0x%x\n\t" % Item
Index = 0
for Item in FvObj.BlockSizeList:
if Item[0] != ExpectedList[Index][0]:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
elif Item[1] != ExpectedList[Index][1]:
if (Item[1] < ExpectedList[Index][1]) and (Index == len(FvObj.BlockSizeList) - 1):
break;
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
else:
Index += 1
| edk2-master | BaseTools/Source/Python/GenFds/Region.py |
## @file
# process FV image section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from io import BytesIO
from .Ffs import SectionSuffix
import subprocess
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import FvImageSectionClassObject
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## generate FV image section
#
#
class FvImageSection(FvImageSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FvImageSectionClassObject.__init__(self)
## GenSection() method
#
# Generate FV image section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False):
OutputFileList = []
if Dict is None:
Dict = {}
if self.FvFileType is not None:
FileList, IsSect = Section.Section.GetFileList(FfsInf, self.FvFileType, self.FvFileExtension)
if IsSect :
return FileList, self.Alignment
Num = SecNum
MaxFvAlignment = 0
for FvFileName in FileList:
FvAlignmentValue = 0
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
FvFileObj.close()
if FvAlignmentValue > MaxFvAlignment:
MaxFvAlignment = FvAlignmentValue
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
# MaxFvAlignment is larger than or equal to 1K
if MaxFvAlignment >= 0x400:
if MaxFvAlignment >= 0x100000:
#The max alignment supported by FFS is 16M.
if MaxFvAlignment >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(MaxFvAlignment // 0x100000) + "M"
else:
self.Alignment = str (MaxFvAlignment // 0x400) + "K"
else:
# MaxFvAlignment is less than 1K
self.Alignment = str (MaxFvAlignment)
return OutputFileList, self.Alignment
#
# Generate Fv
#
if self.FvName is not None:
Buffer = BytesIO()
Fv = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName)
if Fv is not None:
self.Fv = Fv
if not self.FvAddr and self.Fv.BaseAddress:
self.FvAddr = self.Fv.BaseAddress
FvFileName = Fv.AddToBuffer(Buffer, self.FvAddr, MacroDict = Dict, Flag=IsMakefile)
if Fv.FvAlignment is not None:
if self.Alignment is None:
self.Alignment = Fv.FvAlignment
else:
if GenFdsGlobalVariable.GetAlignment (Fv.FvAlignment) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = Fv.FvAlignment
else:
if self.FvFileName is not None:
FvFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvFileName)
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
# FvAlignmentValue is larger than or equal to 1K
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x100000:
#The max alignment supported by FFS is 16M.
if FvAlignmentValue >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(FvAlignmentValue // 0x100000) + "M"
else:
self.Alignment = str (FvAlignmentValue // 0x400) + "K"
else:
# FvAlignmentValue is less than 1K
self.Alignment = str (FvAlignmentValue)
FvFileObj.close()
else:
if len (mws.getPkgPath()) == 0:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in WORKSPACE: %s" % self.FvFileName, GenFdsGlobalVariable.WorkSpaceDir)
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.FvFileName, '\n\t'.join(mws.getPkgPath())))
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FvImageSection Failed! %s NOT found in FDF" % self.FvName)
#
# Prepare the parameter of GenSection
#
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
| edk2-master | BaseTools/Source/Python/GenFds/FvImageSection.py |
## @file
# process OptionROM generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import subprocess
from . import OptRomInfStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import OptionRomClassObject
from Common.Misc import SaveFileOnChange
from Common import EdkLogger
from Common.BuildToolError import *
##
#
#
class OPTIONROM (OptionRomClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self, Name = ""):
OptionRomClassObject.__init__(self)
self.DriverName = Name
## AddToBuffer()
#
# Generate Option ROM
#
# @param self The object pointer
# @param Buffer The buffer generated OptROM data will be put
# @retval string Generated OptROM file path
#
def AddToBuffer (self, Buffer, Flag=False) :
if not Flag:
GenFdsGlobalVariable.InfLogger( "\nGenerating %s Option ROM ..." %self.DriverName)
EfiFileList = []
BinFileList = []
# Process Modules in FfsList
for FfsFile in self.FfsList :
if isinstance(FfsFile, OptRomInfStatement.OptRomInfStatement):
FilePathNameList = FfsFile.GenFfs(IsMakefile=Flag)
if len(FilePathNameList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module %s not produce .efi files, so NO file could be put into option ROM." % (FfsFile.InfFileName))
if FfsFile.OverrideAttribs is None:
EfiFileList.extend(FilePathNameList)
else:
FileName = os.path.basename(FilePathNameList[0])
TmpOutputDir = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName, FfsFile.CurrentArch)
if not os.path.exists(TmpOutputDir) :
os.makedirs(TmpOutputDir)
TmpOutputFile = os.path.join(TmpOutputDir, FileName+'.tmp')
GenFdsGlobalVariable.GenerateOptionRom(TmpOutputFile,
FilePathNameList,
[],
FfsFile.OverrideAttribs.NeedCompress,
FfsFile.OverrideAttribs.PciClassCode,
FfsFile.OverrideAttribs.PciRevision,
FfsFile.OverrideAttribs.PciDeviceId,
FfsFile.OverrideAttribs.PciVendorId,
IsMakefile = Flag)
BinFileList.append(TmpOutputFile)
else:
FilePathName = FfsFile.GenFfs(IsMakefile=Flag)
if FfsFile.OverrideAttribs is not None:
FileName = os.path.basename(FilePathName)
TmpOutputDir = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName, FfsFile.CurrentArch)
if not os.path.exists(TmpOutputDir) :
os.makedirs(TmpOutputDir)
TmpOutputFile = os.path.join(TmpOutputDir, FileName+'.tmp')
GenFdsGlobalVariable.GenerateOptionRom(TmpOutputFile,
[FilePathName],
[],
FfsFile.OverrideAttribs.NeedCompress,
FfsFile.OverrideAttribs.PciClassCode,
FfsFile.OverrideAttribs.PciRevision,
FfsFile.OverrideAttribs.PciDeviceId,
FfsFile.OverrideAttribs.PciVendorId,
IsMakefile=Flag)
BinFileList.append(TmpOutputFile)
else:
if FfsFile.FileType == 'EFI':
EfiFileList.append(FilePathName)
else:
BinFileList.append(FilePathName)
#
# Call EfiRom tool
#
OutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName)
OutputFile = OutputFile + '.rom'
GenFdsGlobalVariable.GenerateOptionRom(
OutputFile,
EfiFileList,
BinFileList,
IsMakefile=Flag)
if not Flag:
GenFdsGlobalVariable.InfLogger( "\nGenerate %s Option ROM Successfully" %self.DriverName)
GenFdsGlobalVariable.SharpCounter = 0
return OutputFile
class OverrideAttribs:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.PciVendorId = None
self.PciClassCode = None
self.PciDeviceId = None
self.PciRevision = None
self.NeedCompress = None
| edk2-master | BaseTools/Source/Python/GenFds/OptionRom.py |
## @file
# parse FDF file
#
# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2015, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile, DOTALL
from string import hexdigits
from uuid import UUID
from Common.BuildToolError import *
from Common import EdkLogger
from Common.Misc import PathClass, tdict, ProcessDuplicatedInf, GuidStructureStringToGuidString
from Common.StringUtils import NormPath, ReplaceMacro
from Common import GlobalData
from Common.Expression import *
from Common.DataType import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.LongFilePathOs as os
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.RangeExpression import RangeExpression
from collections import OrderedDict
from .Fd import FD
from .Region import Region
from .Fv import FV
from .AprioriSection import AprioriSection
from .FfsInfStatement import FfsInfStatement
from .FfsFileStatement import FileStatement
from .VerSection import VerSection
from .UiSection import UiSection
from .FvImageSection import FvImageSection
from .DataSection import DataSection
from .DepexSection import DepexSection
from .CompressSection import CompressSection
from .GuidSection import GuidSection
from .SubTypeGuidSection import SubTypeGuidSection
from .Capsule import EFI_CERT_TYPE_PKCS7_GUID, EFI_CERT_TYPE_RSA2048_SHA256_GUID, Capsule
from .CapsuleData import CapsuleFfs, CapsulePayload, CapsuleFv, CapsuleFd, CapsuleAnyFile, CapsuleAfile
from .RuleComplexFile import RuleComplexFile
from .RuleSimpleFile import RuleSimpleFile
from .EfiSection import EfiSection
from .OptionRom import OPTIONROM
from .OptRomInfStatement import OptRomInfStatement, OverrideAttribs
from .OptRomFileStatement import OptRomFileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
T_CHAR_CR = '\r'
T_CHAR_TAB = '\t'
T_CHAR_DOUBLE_QUOTE = '\"'
T_CHAR_SINGLE_QUOTE = '\''
T_CHAR_BRACE_R = '}'
SEPARATORS = {TAB_EQUAL_SPLIT, TAB_VALUE_SPLIT, TAB_COMMA_SPLIT, '{', T_CHAR_BRACE_R}
ALIGNMENTS = {"Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K", "64K", "128K",
"256K", "512K", "1M", "2M", "4M", "8M", "16M"}
ALIGNMENT_NOAUTO = ALIGNMENTS - {"Auto"}
CR_LB_SET = {T_CHAR_CR, TAB_LINE_BREAK}
RegionSizePattern = compile("\s*(?P<base>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<size>(?:0x|0X)?[a-fA-F0-9]+)\s*")
RegionSizeGuidPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*\|\s*(?P<size>\w+\.\w+[\.\w\[\]]*)\s*")
RegionOffsetPcdPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*$")
ShortcutPcdPattern = compile("\s*\w+\s*=\s*(?P<value>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<name>\w+\.\w+)\s*")
BaseAddrValuePattern = compile('^0[xX][0-9a-fA-F]+')
FileExtensionPattern = compile(r'([a-zA-Z][a-zA-Z0-9]*)')
TokenFindPattern = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
AllIncludeFileList = []
# Get the closest parent
def GetParentAtLine (Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile
return None
# Check include loop
def IsValidInclude (File, Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line) and Profile.FileName == File:
return False
return True
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber and Profile.Level == 1:
InsertedLines += Profile.GetTotalLines()
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FdfParser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.OriginalLineNumber = Line
self.Message = Str
self.ToolName = 'FdfParser'
def __str__(self):
return self.Message
# helper functions to facilitate consistency in warnings
# each function is for a different common warning
@staticmethod
def Expected(Str, File, Line):
return Warning("expected {}".format(Str), File, Line)
@staticmethod
def ExpectedEquals(File, Line):
return Warning.Expected("'='", File, Line)
@staticmethod
def ExpectedCurlyOpen(File, Line):
return Warning.Expected("'{'", File, Line)
@staticmethod
def ExpectedCurlyClose(File, Line):
return Warning.Expected("'}'", File, Line)
@staticmethod
def ExpectedBracketClose(File, Line):
return Warning.Expected("']'", File, Line)
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
for index, line in enumerate(self.FileLinesList):
if not line.endswith(TAB_LINE_BREAK):
self.FileLinesList[index] += TAB_LINE_BREAK
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
self.IncludeFileList = []
self.Level = 1 # first level include file
def GetTotalLines(self):
TotalLines = self.InsertAdjust + len(self.FileLinesList)
for Profile in self.IncludeFileList:
TotalLines += Profile.GetTotalLines()
return TotalLines
def IsLineInFile(self, Line):
if Line >= self.InsertStartLineNumber and Line < self.InsertStartLineNumber + self.GetTotalLines():
return True
return False
def GetLineInFile(self, Line):
if not self.IsLineInFile (Line):
return (self.FileName, -1)
InsertedLines = self.InsertStartLineNumber
for Profile in self.IncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber:
InsertedLines += Profile.GetTotalLines()
return (self.FileName, Line - InsertedLines + 1)
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.FileName = FileName
self.PcdDict = OrderedDict()
self.PcdLocalDict = OrderedDict()
self.InfList = []
self.InfDict = {'ArchTBD':[]}
# ECC will use this Dict and List information
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FdNameNotSet = False
self.FvDict = {}
self.CapsuleDict = {}
self.RuleDict = {}
self.OptRomDict = {}
self.FmpPayloadDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self._Token = ""
self._SkippedChars = ""
GlobalData.gFdfParser = self
# Used to section info
self._CurSection = []
# Key: [section name, UI name, arch]
# Value: {MACRO_NAME: MACRO_VALUE}
self._MacroDict = tdict(True, 3)
self._PcdDict = OrderedDict()
self._WipeOffArea = []
if GenFdsGlobalVariable.WorkSpaceDir == '':
GenFdsGlobalVariable.WorkSpaceDir = os.getenv("WORKSPACE")
## _SkipWhiteSpace() method
#
# Skip white spaces from current char.
#
# @param self The object pointer
#
def _SkipWhiteSpace(self):
while not self._EndOfFile():
if self._CurrentChar() in {TAB_PRINTCHAR_NUL, T_CHAR_CR, TAB_LINE_BREAK, TAB_SPACE_SPLIT, T_CHAR_TAB}:
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
else:
return
return
## _EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def _EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
if self.CurrentLineNumber > NumberOfLines:
return True
return False
## _EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def _EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
# @param DestLine Optional new destination line number.
# @param DestOffset Optional new destination offset.
#
def Rewind(self, DestLine = 1, DestOffset = 0):
self.CurrentLineNumber = DestLine
self.CurrentOffsetWithinLine = DestOffset
## _UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def _UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self._CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## _GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def _GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## _CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def _CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## _NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def _NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## _SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def _SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## _CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def _CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def _StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
if not self.Profile.FileLinesList:
EdkLogger.error('FdfParser', FILE_READ_FAILURE, 'The file is empty!', File=self.FileName)
self.Profile.FileLinesList[-1].append(' ')
def _ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def _SetMacroValue(self, Macro, Value):
if not self._CurSection:
return
MacroDict = {}
if not self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]:
self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] = MacroDict
else:
MacroDict = self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]
MacroDict[Macro] = Value
def _GetMacroValue(self, Macro):
# Highest priority
if Macro in GlobalData.gCommandLineDefines:
return GlobalData.gCommandLineDefines[Macro]
if Macro in GlobalData.gGlobalDefines:
return GlobalData.gGlobalDefines[Macro]
if self._CurSection:
MacroDict = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if MacroDict and Macro in MacroDict:
return MacroDict[Macro]
# Lowest priority
if Macro in GlobalData.gPlatformDefines:
return GlobalData.gPlatformDefines[Macro]
return None
def _SectionHeaderParser(self, Section):
# [Defines]
# [FD.UiName]: use dummy instead if UI name is optional
# [FV.UiName]
# [Capsule.UiName]
# [Rule]: don't take rule section into account, macro is not allowed in this section
# [OptionRom.DriverName]
self._CurSection = []
Section = Section.strip()[1:-1].upper().replace(' ', '').strip(TAB_SPLIT)
ItemList = Section.split(TAB_SPLIT)
Item = ItemList[0]
if Item == '' or Item == 'RULE':
return
if Item == TAB_COMMON_DEFINES.upper():
self._CurSection = [TAB_COMMON, TAB_COMMON, TAB_COMMON]
elif len(ItemList) > 1:
self._CurSection = [ItemList[0], ItemList[1], TAB_COMMON]
elif len(ItemList) > 0:
self._CurSection = [ItemList[0], 'DUMMY', TAB_COMMON]
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self._EndOfFile():
if self._CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self._CurrentChar() == TAB_LINE_BREAK:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self._CurrentChar() == TAB_STAR and self._NextChar() == TAB_BACK_SLASH:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
# check for // comment
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_BACK_SLASH and not self._EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self._CurrentChar() == TAB_COMMENT_SPLIT and not self._EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_STAR:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = True
else:
self._GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
# nested include support
Processed = False
MacroDict = {}
while self._GetNextToken():
if self._Token == TAB_DEFINE:
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
MacroDict[Macro] = Value
elif self._Token == TAB_INCLUDE:
Processed = True
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len(TAB_INCLUDE)
if not self._GetNextToken():
raise Warning.Expected("include file name", self.FileName, self.CurrentLineNumber)
IncFileName = self._Token
PreIndex = 0
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1:
Macro = IncFileName[StartPos+2: EndPos]
MacroVal = self._GetMacroValue(Macro)
if not MacroVal:
if Macro in MacroDict:
MacroVal = MacroDict[Macro]
if MacroVal is not None:
IncFileName = IncFileName.replace('$(' + Macro + ')', MacroVal, 1)
if MacroVal.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroVal)
else:
raise Warning("The Macro %s is not defined" %Macro, self.FileName, self.CurrentLineNumber)
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
IncludedFile = NormPath(IncFileName)
#
# First search the include file under the same directory as FDF file
#
IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName))
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Then search the include file under the same directory as DSC file
#
PlatformDir = ''
if GenFdsGlobalVariable.ActivePlatform:
PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir
elif GlobalData.gActivePlatform:
PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir
IncludedFile1 = PathClass(IncludedFile, PlatformDir)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace),
self.FileName, self.CurrentLineNumber)
if not IsValidInclude (IncludedFile1.Path, self.CurrentLineNumber):
raise Warning("The include file {0} is causing a include loop.\n".format (IncludedFile1.Path), self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(IncludedFile1.Path)
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
ParentProfile = GetParentAtLine (CurrentLine)
if ParentProfile is not None:
ParentProfile.IncludeFileList.insert(0, IncFileProfile)
IncFileProfile.Level = ParentProfile.Level + 1
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self._GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self._CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
# reversely sorted to better determine error in file
AllIncludeFileList.insert(0, IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, TAB_COMMENT_SPLIT)
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
if Processed: # Nested and back-to-back support
self.Rewind(DestLine = IncFileProfile.InsertStartLineNumber - 1)
Processed = False
# Preprocess done.
self.Rewind()
@staticmethod
def _GetIfListCurrentItemStat(IfList):
if len(IfList) == 0:
return True
for Item in IfList:
if Item[1] == False:
return False
return True
## PreprocessConditionalStatement() method
#
# Preprocess conditional statement.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
RegionLayoutLine = 0
ReplacedLine = -1
while self._GetNextToken():
# Determine section name and the location dependent macro
if self._GetIfListCurrentItemStat(IfList):
if self._Token.startswith(TAB_SECTION_START):
Header = self._Token
if not self._Token.endswith(TAB_SECTION_END):
self._SkipToToken(TAB_SECTION_END)
Header += self._SkippedChars
if Header.find('$(') != -1:
raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber)
self._SectionHeaderParser(Header)
continue
# Replace macros except in RULE section or out of section
elif self._CurSection and ReplacedLine != self.CurrentLineNumber:
ReplacedLine = self.CurrentLineNumber
self._UndoToken()
CurLine = self.Profile.FileLinesList[ReplacedLine - 1]
PreIndex = 0
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1 and self._Token not in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF, TAB_ELSE_IF}:
MacroName = CurLine[StartPos+2: EndPos]
MacroValue = self._GetMacroValue(MacroName)
if MacroValue is not None:
CurLine = CurLine.replace('$(' + MacroName + ')', MacroValue, 1)
if MacroValue.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroValue)
else:
PreIndex = EndPos + 1
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
self.Profile.FileLinesList[ReplacedLine - 1] = CurLine
continue
if self._Token == TAB_DEFINE:
if self._GetIfListCurrentItemStat(IfList):
if not self._CurSection:
raise Warning("macro cannot be defined in Rule section or out of section", self.FileName, self.CurrentLineNumber)
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len(TAB_DEFINE)
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
self._SetMacroValue(Macro, Value)
self._WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == 'SET':
if not self._GetIfListCurrentItemStat(IfList):
continue
SetLine = self.CurrentLineNumber - 1
SetOffset = self.CurrentOffsetWithinLine - len('SET')
PcdPair = self._GetNextPcdSettings()
PcdName = "%s.%s" % (PcdPair[1], PcdPair[0])
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
self._PcdDict[PcdName] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
self._WipeOffArea.append(((SetLine, SetOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF}:
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
IfList.append([IfStartPos, None, None])
CondLabel = self._Token
Expression = self._GetExpression()
if CondLabel == TAB_IF:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
else:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'in')
if CondLabel == TAB_IF_N_DEF:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_ELSE_IF, TAB_ELSE}:
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self._WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self._Token == TAB_ELSE_IF:
Expression = self._GetExpression()
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == '!endif':
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
self._WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
elif not IfList: # Don't use PCDs inside conditional directive
if self.CurrentLineNumber <= RegionLayoutLine:
# Don't try the same line twice
continue
SetPcd = ShortcutPcdPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if SetPcd:
self._PcdDict[SetPcd.group('name')] = SetPcd.group('value')
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSize = RegionSizePattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if not RegionSize:
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSizeGuid = RegionSizeGuidPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber])
if not RegionSizeGuid:
RegionLayoutLine = self.CurrentLineNumber + 1
continue
self._PcdDict[RegionSizeGuid.group('base')] = RegionSize.group('base')
self._PcdDict[RegionSizeGuid.group('size')] = RegionSize.group('size')
RegionLayoutLine = self.CurrentLineNumber + 1
if IfList:
raise Warning("Missing !endif", self.FileName, self.CurrentLineNumber)
self.Rewind()
def _CollectMacroPcd(self):
MacroDict = {}
# PCD macro
MacroDict.update(GlobalData.gPlatformPcds)
MacroDict.update(self._PcdDict)
# Lowest priority
MacroDict.update(GlobalData.gPlatformDefines)
if self._CurSection:
# Defines macro
ScopeMacro = self._MacroDict[TAB_COMMON, TAB_COMMON, TAB_COMMON]
if ScopeMacro:
MacroDict.update(ScopeMacro)
# Section macro
ScopeMacro = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if ScopeMacro:
MacroDict.update(ScopeMacro)
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
for Item in GlobalData.BuildOptionPcd:
if isinstance(Item, tuple):
continue
PcdName, TmpValue = Item.split(TAB_EQUAL_SPLIT)
TmpValue = BuildOptionValue(TmpValue, {})
MacroDict[PcdName.strip()] = TmpValue
# Highest priority
return MacroDict
def _EvaluateConditional(self, Expression, Line, Op = None, Value = None):
MacroPcdDict = self._CollectMacroPcd()
if Op == 'eval':
try:
if Value:
return ValueExpression(Expression, MacroPcdDict)(True)
else:
return ValueExpression(Expression, MacroPcdDict)()
except WrnExpression as Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self.FileName, ExtraData=self._CurrentLine(),
Line=Line)
return Excpt.result
except Exception as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
raise Warning("Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file (%s), and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE'], Info[0], Info[1]),
self.FileName, Line)
else:
raise Warning("PCD (%s) is not defined in DSC file (%s)" % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE']),
self.FileName, Line)
else:
raise Warning(str(Excpt), self.FileName, Line)
else:
if Expression.startswith('$(') and Expression[-1] == ')':
Expression = Expression[2:-1]
return Expression in MacroPcdDict
## _IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsToken(self, String, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsKeyword(self, KeyWord, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(KeyWord.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(KeyWord)
if index == 0:
followingChar = self._CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPARATORS:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetExpression(self):
Line = self.Profile.FileLinesList[self.CurrentLineNumber - 1]
Index = len(Line) - 1
while Line[Index] in CR_LB_SET:
Index -= 1
ExpressionString = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:Index+1]
self.CurrentOffsetWithinLine += len(ExpressionString)
ExpressionString = ExpressionString.strip()
return ExpressionString
## _GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def _GetNextWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetNextPcdWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _GetNextToken() method
#
# Get next token unit before a separator
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def _GetNextToken(self):
# Skip leading spaces, if exist.
self._SkipWhiteSpace()
if self._EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while StartLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPARATORS:
self._GetOneChar()
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
self._GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self._Token = self.Profile.FileLinesList[StartLine-1][StartPos: EndPos]
if self._Token.lower() in {TAB_IF, TAB_END_IF, TAB_ELSE_IF, TAB_ELSE, TAB_IF_DEF, TAB_IF_N_DEF, TAB_ERROR, TAB_INCLUDE}:
self._Token = self._Token.lower()
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
## _GetNextGuid() method
#
# Get next token unit before a separator
# If found, the GUID string is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def _GetNextGuid(self):
if not self._GetNextToken():
return False
if GlobalData.gGuidPattern.match(self._Token) is not None:
return True
elif self._Token in GlobalData.gGuidDict:
return True
else:
self._UndoToken()
return False
@staticmethod
def _Verify(Name, Value, Scope):
# value verification only applies to numeric values.
if Scope not in TAB_PCD_NUMERIC_TYPES:
return
ValueNumber = 0
try:
ValueNumber = int(Value, 0)
except:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value is not valid dec or hex number for %s." % Name)
if ValueNumber < 0:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value can't be set to negative value for %s." % Name)
if ValueNumber > MAX_VAL_TYPE[Scope]:
EdkLogger.error("FdfParser", FORMAT_INVALID, "Too large value for %s." % Name)
return True
## _UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def _UndoToken(self):
self._UndoOneChar()
while self._CurrentChar().isspace():
if not self._UndoOneChar():
self._GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPARATORS:
if not self._UndoOneChar():
return
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
return
else:
break
self._GetOneChar()
## _GetNextHexNumber() method
#
# Get next HEX data before a separator
# If found, the HEX data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def _GetNextHexNumber(self):
if not self._GetNextToken():
return False
if GlobalData.gHexPatternAll.match(self._Token):
return True
else:
self._UndoToken()
return False
## _GetNextDecimalNumber() method
#
# Get next decimal data before a separator
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def _GetNextDecimalNumber(self):
if not self._GetNextToken():
return False
if self._Token.isdigit():
return True
else:
self._UndoToken()
return False
def _GetNextPcdSettings(self):
if not self._GetNextWord():
raise Warning.Expected("<PcdTokenSpaceCName>", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self._Token
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected(".", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdCName = self._Token
Fields = []
while self._IsToken(TAB_SPLIT):
if not self._GetNextPcdWord():
raise Warning.Expected("Pcd Fields", self.FileName, self.CurrentLineNumber)
Fields.append(self._Token)
return (pcdCName, pcdTokenSpaceCName,TAB_SPLIT.join(Fields))
## _GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def _GetStringData(self):
QuoteToUse = None
if self._Token.startswith(T_CHAR_DOUBLE_QUOTE) or self._Token.startswith("L\""):
QuoteToUse = T_CHAR_DOUBLE_QUOTE
elif self._Token.startswith(T_CHAR_SINGLE_QUOTE) or self._Token.startswith("L\'"):
QuoteToUse = T_CHAR_SINGLE_QUOTE
else:
return False
self._UndoToken()
self._SkipToToken(QuoteToUse)
currentLineNumber = self.CurrentLineNumber
if not self._SkipToToken(QuoteToUse):
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
self._Token = self._SkippedChars.rstrip(QuoteToUse)
return True
## _SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self._SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def _SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self._SkippedChars = ""
while not self._EndOfFile():
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._SkippedChars += String
return True
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
self.SetFileBufferPos(StartPos)
self._SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## Preprocess() method
#
# Preprocess comment, conditional directive, include directive, replace macro.
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def Preprocess(self):
self._StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self._StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self._StringToList()
for Pos in self._WipeOffArea:
self._ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self._GetDefines():
pass
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.Preprocess()
self._GetError()
#
# Keep processing sections of the FDF until no new sections or a syntax error is found
#
while self._GetFd() or self._GetFv() or self._GetFmp() or self._GetCapsule() or self._GetRule() or self._GetOptionRom():
pass
except Warning as X:
self._UndoToken()
#'\n\tGot Token: \"%s\" from File %s\n' % (self._Token, FileLineTuple[0]) + \
# At this point, the closest parent would be the included file itself
Profile = GetParentAtLine(X.OriginalLineNumber)
if Profile is not None:
X.Message += ' near line %d, column %d: %s' \
% (X.LineNumber, 0, Profile.FileLinesList[X.LineNumber-1])
else:
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.Message += ' near line %d, column %d: %s' \
% (FileLineTuple[1], self.CurrentOffsetWithinLine + 1, self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:].rstrip(TAB_LINE_BREAK).rstrip(T_CHAR_CR))
raise
## SectionParser() method
#
# Parse the file section info
# Exception will be raised if syntax error found
#
# @param self The object pointer
# @param section The section string
def SectionParser(self, section):
S = section.upper()
if not S.startswith("[DEFINES") and not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM.") and not S.startswith('[FMPPAYLOAD.'):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [Rule.], [OptionRom.], [FMPPAYLOAD.])", self.FileName, self.CurrentLineNumber)
## _GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def _GetDefines(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[DEFINES"):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[DEFINES", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
while self._GetNextWord():
# handle the SET statement
if self._Token == 'SET':
self._UndoToken()
self._GetSetStatement(None)
continue
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.startswith(TAB_SECTION_START):
raise Warning.Expected("MACRO value", self.FileName, self.CurrentLineNumber)
Value = self._Token
return False
##_GetError() method
def _GetError(self):
#save the Current information
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
while self._GetNextToken():
if self._Token == TAB_ERROR:
EdkLogger.error('FdfParser', ERROR_STATEMENT, self._CurrentLine().replace(TAB_ERROR, '', 1), File=self.FileName, Line=self.CurrentLineNumber)
self.CurrentLineNumber = CurrentLine
self.CurrentOffsetWithinLine = CurrentOffset
## _GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def _GetFd(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section", self.FileName, self.CurrentLineNumber)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[FD.]", self.FileName, self.CurrentLineNumber)
FdName = self._GetUiName()
if FdName == "":
if len (self.Profile.FdDict) == 0:
FdName = GenFdsGlobalVariable.PlatformName
if FdName == "" and GlobalData.gActivePlatform:
FdName = GlobalData.gActivePlatform.PlatformName
self.Profile.FdNameNotSet = True
else:
raise Warning.Expected("FdName in [FD.] section", self.FileName, self.CurrentLineNumber)
self.CurrentFdName = FdName.upper()
if self.CurrentFdName in self.Profile.FdDict:
raise Warning("Unexpected the same FD name", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FdObj = FD()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
if len (self.Profile.FdDict) > 1 and self.Profile.FdNameNotSet:
raise Warning.Expected("all FDs have their name", self.FileName, self.CurrentLineNumber)
Status = self._GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error", self.FileName, self.CurrentLineNumber)
while self._GetTokenStatements(FdObj):
pass
for Attr in ("BaseAddress", "Size", "ErasePolarity"):
if getattr(FdObj, Attr) is None:
self._GetNextToken()
raise Warning("Keyword %s missing" % Attr, self.FileName, self.CurrentLineNumber)
if not FdObj.BlockSizeList:
FdObj.BlockSizeList.append((1, FdObj.Size, None))
self._GetDefineStatements(FdObj)
self._GetSetStatements(FdObj)
if not self._GetRegionLayout(FdObj):
raise Warning.Expected("region layout", self.FileName, self.CurrentLineNumber)
while self._GetRegionLayout(FdObj):
pass
return True
## _GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def _GetUiName(self):
Name = ""
if self._GetNextWord():
Name = self._Token
return Name
## _GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def _GetCreateFile(self, Obj):
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
FileName = self._Token
Obj.CreateFileName = FileName
return True
def SetPcdLocalation(self,pcdpair):
self.Profile.PcdLocalDict[pcdpair] = (self.Profile.FileName,self.CurrentLineNumber)
## _GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
#
def _GetTokenStatements(self, Obj):
if self._IsKeyword("BaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex base address", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.BaseAddress
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
return True
if self._IsKeyword("Size"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex size", self.FileName, self.CurrentLineNumber)
Size = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Size
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
Obj.Size = int(Size, 0)
return True
if self._IsKeyword("ErasePolarity"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Erase Polarity", self.FileName, self.CurrentLineNumber)
if not self._Token in {"1", "0"}:
raise Warning.Expected("1 or 0 Erase Polarity", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self._Token
return True
return self._GetBlockStatements(Obj)
## _GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetAddressStatements(self, Obj):
if self._IsKeyword("BsBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
BsAddress = int(self._Token, 0)
Obj.BsBaseAddress = BsAddress
if self._IsKeyword("RtBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
RtAddress = int(self._Token, 0)
Obj.RtBaseAddress = RtAddress
## _GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
#
def _GetBlockStatements(self, Obj):
IsBlock = False
while self._GetBlockStatement(Obj):
IsBlock = True
Item = Obj.BlockSizeList[-1]
if Item[0] is None or Item[1] is None:
raise Warning.Expected("block statement", self.FileName, self.CurrentLineNumber)
return IsBlock
## _GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetBlockStatement(self, Obj):
if not self._IsKeyword("BlockSize"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex or Integer block size", self.FileName, self.CurrentLineNumber)
BlockSize = self._Token
BlockSizePcd = None
if self._IsToken(TAB_VALUE_SPLIT):
PcdPair = self._GetNextPcdSettings()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
BlockSize = int(BlockSize, 0)
BlockNumber = None
if self._IsKeyword("NumBlocks"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("block numbers", self.FileName, self.CurrentLineNumber)
BlockNumber = int(self._Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## _GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatements(self, Obj):
while self._GetDefineStatement(Obj):
pass
## _GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatement(self, Obj):
if self._IsKeyword(TAB_DEFINE):
self._GetNextToken()
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
Value = self._Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## _GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatements(self, Obj):
while self._GetSetStatement(Obj):
pass
## _GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatement(self, Obj):
if self._IsKeyword("SET"):
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
if Obj:
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## _CalcRegionExpr(self)
#
# Calculate expression for offset or size of a region
#
# @return: None if invalid expression
# Calculated number if successfully
#
def _CalcRegionExpr(self):
StartPos = self.GetFileBufferPos()
Expr = ''
PairCount = 0
while not self._EndOfFile():
CurCh = self._CurrentChar()
if CurCh == '(':
PairCount += 1
elif CurCh == ')':
PairCount -= 1
if CurCh in '|\r\n' and PairCount == 0:
break
Expr += CurCh
self._GetOneChar()
try:
return int(
ValueExpression(Expr,
self._CollectMacroPcd()
)(True), 0)
except Exception:
self.SetFileBufferPos(StartPos)
return None
## _GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param theFd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetRegionLayout(self, theFd):
Offset = self._CalcRegionExpr()
if Offset is None:
return False
RegionObj = Region()
RegionObj.Offset = Offset
theFd.RegionList.append(RegionObj)
if not self._IsToken(TAB_VALUE_SPLIT):
raise Warning.Expected("'|'", self.FileName, self.CurrentLineNumber)
Size = self._CalcRegionExpr()
if Size is None:
raise Warning.Expected("Region Size", self.FileName, self.CurrentLineNumber)
RegionObj.Size = Size
if not self._GetNextWord():
return True
if not self._Token in {"SET", BINARY_FILE_TYPE_FV, "FILE", "DATA", "CAPSULE", "INF"}:
#
# If next token is a word which is not a valid FV type, it might be part of [PcdOffset[|PcdSize]]
# Or it might be next region's offset described by an expression which starts with a PCD.
# PcdOffset[|PcdSize] or OffsetPcdExpression|Size
#
self._UndoToken()
IsRegionPcd = (RegionSizeGuidPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]) or
RegionOffsetPcdPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]))
if IsRegionPcd:
RegionObj.PcdOffset = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdOffset] = "0x%08X" % (RegionObj.Offset + int(theFd.BaseAddress, 0))
self.SetPcdLocalation(RegionObj.PcdOffset)
self._PcdDict['%s.%s' % (RegionObj.PcdOffset[1], RegionObj.PcdOffset[0])] = "0x%x" % RegionObj.Offset
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self._IsToken(TAB_VALUE_SPLIT):
RegionObj.PcdSize = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdSize] = "0x%08X" % RegionObj.Size
self.SetPcdLocalation(RegionObj.PcdSize)
self._PcdDict['%s.%s' % (RegionObj.PcdSize[1], RegionObj.PcdSize[0])] = "0x%x" % RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self._GetNextWord():
return True
if self._Token == "SET":
self._UndoToken()
self._GetSetStatements(RegionObj)
if not self._GetNextWord():
return True
elif self._Token == BINARY_FILE_TYPE_FV:
self._UndoToken()
self._GetRegionFvType(RegionObj)
elif self._Token == "CAPSULE":
self._UndoToken()
self._GetRegionCapType(RegionObj)
elif self._Token == "FILE":
self._UndoToken()
self._GetRegionFileType(RegionObj)
elif self._Token == "INF":
self._UndoToken()
RegionObj.RegionType = "INF"
while self._IsKeyword("INF"):
self._UndoToken()
ffsInf = self._ParseInfStatement()
if not ffsInf:
break
RegionObj.RegionDataList.append(ffsInf)
elif self._Token == "DATA":
self._UndoToken()
self._GetRegionDataType(RegionObj)
else:
self._UndoToken()
if self._GetRegionLayout(theFd):
return True
raise Warning("A valid region type was not found. "
"Valid types are [SET, FV, CAPSULE, FILE, DATA, INF]. This error occurred",
self.FileName, self.CurrentLineNumber)
return True
## _GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFvType(self, RegionObj):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = BINARY_FILE_TYPE_FV
RegionObj.RegionDataList.append((self._Token).upper())
while self._IsKeyword(BINARY_FILE_TYPE_FV):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append((self._Token).upper())
## _GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionCapType(self, RegionObj):
if not self._IsKeyword("CAPSULE"):
raise Warning.Expected("'CAPSULE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("CAPSULE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFileType(self, RegionObj):
if not self._IsKeyword("FILE"):
raise Warning.Expected("'FILE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FILE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionDataType(self, RegionObj):
if not self._IsKeyword("DATA"):
raise Warning.Expected("Region Data type", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append(DataString)
while self._IsKeyword("DATA"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionDataList.append(DataString)
## _GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def _GetFv(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FV."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvName = self._GetUiName()
self.CurrentFvName = FvName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FvObj = FV(Name=self.CurrentFvName)
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self._GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error", self.FileName, self.CurrentLineNumber)
self._GetDefineStatements(FvObj)
self._GetAddressStatements(FvObj)
while True:
self._GetSetStatements(FvObj)
if not (self._GetBlockStatement(FvObj) or self._GetFvBaseAddress(FvObj) or
self._GetFvForceRebase(FvObj) or self._GetFvAlignment(FvObj) or
self._GetFvAttributes(FvObj) or self._GetFvNameGuid(FvObj) or
self._GetFvExtEntryStatement(FvObj) or self._GetFvNameString(FvObj)):
break
if FvObj.FvNameString == 'TRUE' and not FvObj.FvNameGuid:
raise Warning("FvNameString found but FvNameGuid was not found", self.FileName, self.CurrentLineNumber)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
isInf = self._GetInfStatement(FvObj)
isFile = self._GetFileStatement(FvObj)
if not isInf and not isFile:
break
return True
## _GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def _GetFvAlignment(self, Obj):
if not self._IsKeyword("FvAlignment"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"}:
raise Warning("Unknown alignment value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self._Token
return True
## _GetFvBaseAddress() method
#
# Get BaseAddress for FV
#
# @param self The object pointer
# @param Obj for whom FvBaseAddress is got
# @retval True Successfully find a FvBaseAddress statement
# @retval False Not able to find a FvBaseAddress statement
#
def _GetFvBaseAddress(self, Obj):
if not self._IsKeyword("FvBaseAddress"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV base address value", self.FileName, self.CurrentLineNumber)
if not BaseAddrValuePattern.match(self._Token.upper()):
raise Warning("Unknown FV base address value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self._Token
return True
## _GetFvForceRebase() method
#
# Get FvForceRebase for FV
#
# @param self The object pointer
# @param Obj for whom FvForceRebase is got
# @retval True Successfully find a FvForceRebase statement
# @retval False Not able to find a FvForceRebase statement
#
def _GetFvForceRebase(self, Obj):
if not self._IsKeyword("FvForceRebase"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FvForceRebase value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"}:
raise Warning("Unknown FvForceRebase value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token.upper() in {"TRUE", "1", "0X1", "0X01"}:
Obj.FvForceRebase = True
elif self._Token.upper() in {"FALSE", "0", "0X0", "0X00"}:
Obj.FvForceRebase = False
else:
Obj.FvForceRebase = None
return True
## _GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def _GetFvAttributes(self, FvObj):
IsWordToken = False
while self._GetNextWord():
IsWordToken = True
name = self._Token
if name not in {"ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE", "WEAK_ALIGNMENT", "FvUsedSizeEnable"}:
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self._Token
return IsWordToken
## _GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def _GetFvNameGuid(self, FvObj):
if not self._IsKeyword("FvNameGuid"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FvObj.FvNameGuid = self._Token
return True
def _GetFvNameString(self, FvObj):
if not self._IsKeyword("FvNameString"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {'TRUE', 'FALSE'}:
raise Warning.Expected("TRUE or FALSE for FvNameString", self.FileName, self.CurrentLineNumber)
FvObj.FvNameString = self._Token
return True
def _GetFvExtEntryStatement(self, FvObj):
if not (self._IsKeyword("FV_EXT_ENTRY") or self._IsKeyword("FV_EXT_ENTRY_TYPE")):
return False
if not self._IsKeyword ("TYPE"):
raise Warning.Expected("'TYPE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex FV extension entry type value At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryTypeValue.append(self._Token)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("FILE") and not self._IsKeyword("DATA"):
raise Warning.Expected("'FILE' or 'DATA'", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryType.append(self._Token)
if self._Token == 'DATA':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString = self._Token
DataString += TAB_COMMA_SPLIT
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
FvObj.FvExtEntryData.append(DataString)
if self._Token == 'FILE':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV Extension Entry file path At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryData.append(self._Token)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return True
## _GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def _GetAprioriSection(self, FvObj):
if not self._IsKeyword("APRIORI"):
return False
if not self._IsKeyword("PEI") and not self._IsKeyword("DXE"):
raise Warning.Expected("Apriori file type", self.FileName, self.CurrentLineNumber)
AprType = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
AprSectionObj = AprioriSection()
AprSectionObj.AprioriType = AprType
self._GetDefineStatements(AprSectionObj)
while True:
IsInf = self._GetInfStatement(AprSectionObj)
IsFile = self._GetFileStatement(AprSectionObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
def _ParseInfStatement(self):
if not self._IsKeyword("INF"):
return None
ffsInf = FfsInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if not ffsInf.InfFileName.endswith('.inf'):
raise Warning.Expected(".inf file path", self.FileName, self.CurrentLineNumber)
ffsInf.CurrentLineNum = self.CurrentLineNumber
ffsInf.CurrentLineContent = self._CurrentLine()
#Replace $(SAPCE) with real space
ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ')
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#do case sensitive check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
if self._IsToken(TAB_VALUE_SPLIT):
if self._IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self._IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return ffsInf
## _GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetInfStatement(self, Obj, ForCapsule=False):
ffsInf = self._ParseInfStatement()
if not ffsInf:
return False
if ForCapsule:
myCapsuleFfs = CapsuleFfs()
myCapsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(myCapsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## _GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def _GetInfOptions(self, FfsInfObj):
if self._IsKeyword("FILE_GUID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FfsInfObj.OverrideGuid = self._Token
if self._IsKeyword("RuleOverride"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Rule name", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self._Token
if self._IsKeyword("VERSION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Version", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Version = self._Token
if self._IsKeyword(BINARY_FILE_TYPE_UI):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI name", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Ui = self._Token
if self._IsKeyword("USE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self._Token
if self._GetNextToken():
p = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\))')
if p.match(self._Token) and p.match(self._Token).span()[1] == len(self._Token):
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
return
else:
self._UndoToken()
return
while self._GetNextToken():
if not p.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
## _GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetFileStatement(self, Obj, ForCapsule = False):
if not self._IsKeyword("FILE"):
return False
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
if ForCapsule and self._Token == 'DATA':
self._UndoToken()
self._UndoToken()
return False
FfsFileObj = FileStatement()
FfsFileObj.FvFileType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
if not self._GetNextWord():
raise Warning.Expected("File GUID", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FfsFileObj.NameGuid = self._Token
self._GetFilePart(FfsFileObj)
if ForCapsule:
capsuleFfs = CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## _FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _FileCouldHaveRelocFlag (FileType):
if FileType in {SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE, 'PEI_DXE_COMBO'}:
return True
else:
return False
## _SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _SectionCouldHaveRelocFlag (SectionType):
if SectionType in {BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32}:
return True
else:
return False
## _GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
#
def _GetFilePart(self, FfsFileObj):
self._GetFileOpts(FfsFileObj)
if not self._IsToken("{"):
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
if self._Token == 'RELOCS_STRIPPED':
FfsFileObj.KeepReloc = False
else:
FfsFileObj.KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name or section data", self.FileName, self.CurrentLineNumber)
if self._Token == BINARY_FILE_TYPE_FV:
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self._Token
elif self._Token == "FD":
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self._Token
elif self._Token in {TAB_DEFINE, "APRIORI", "SECTION"}:
self._UndoToken()
self._GetSectionData(FfsFileObj)
elif hasattr(FfsFileObj, 'FvFileType') and FfsFileObj.FvFileType == 'RAW':
self._UndoToken()
self._GetRAWData(FfsFileObj)
else:
FfsFileObj.CurrentLineNum = self.CurrentLineNumber
FfsFileObj.CurrentLineContent = self._CurrentLine()
FfsFileObj.FileName = self._Token.replace('$(SPACE)', ' ')
self._VerifyFile(FfsFileObj.FileName)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
## _GetRAWData() method
#
# Get RAW data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetRAWData(self, FfsFileObj):
FfsFileObj.FileName = []
FfsFileObj.SubAlignment = []
while True:
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if not self._GetNextToken():
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
FileName = self._Token.replace('$(SPACE)', ' ')
if FileName == T_CHAR_BRACE_R:
self._UndoToken()
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
self._VerifyFile(FileName)
File = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir)
FfsFileObj.FileName.append(File.Path)
FfsFileObj.SubAlignment.append(AlignValue)
if self._IsToken(T_CHAR_BRACE_R):
self._UndoToken()
break
if len(FfsFileObj.SubAlignment) == 1:
FfsFileObj.SubAlignment = FfsFileObj.SubAlignment[0]
if len(FfsFileObj.FileName) == 1:
FfsFileObj.FileName = FfsFileObj.FileName[0]
## _GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def _GetFileOpts(self, FfsFileObj):
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
FfsFileObj.KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
if self._IsKeyword("FIXED", True):
FfsFileObj.Fixed = True
if self._IsKeyword("CHECKSUM", True):
FfsFileObj.CheckSum = True
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
FfsFileObj.Alignment = self._Token
## _GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def _GetAlignment(self):
if self._IsKeyword("Align", True):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
return True
return False
## _GetSectionData() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetSectionData(self, FfsFileObj):
self._GetDefineStatements(FfsFileObj)
while True:
IsLeafSection = self._GetLeafSection(FfsFileObj)
IsEncapSection = self._GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## _GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetLeafSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(Obj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
BuildNum = None
if self._IsKeyword("BUILD_NUM"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number value", self.FileName, self.CurrentLineNumber)
BuildNum = self._Token
if self._IsKeyword("VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("version", self.FileName, self.CurrentLineNumber)
VerSectionObj = VerSection()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self._GetStringData():
VerSectionObj.StringData = self._Token
else:
VerSectionObj.FileName = self._Token
Obj.SectionList.append(VerSectionObj)
elif self._IsKeyword(BINARY_FILE_TYPE_UI):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI", self.FileName, self.CurrentLineNumber)
UiSectionObj = UiSection()
UiSectionObj.Alignment = AlignValue
if self._GetStringData():
UiSectionObj.StringData = self._Token
else:
UiSectionObj.FileName = self._Token
Obj.SectionList.append(UiSectionObj)
elif self._IsKeyword("FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name or FV file path", self.FileName, self.CurrentLineNumber)
FvName = self._Token
FvObj = None
if self._IsToken("{"):
FvObj = FV()
FvObj.UiFvName = FvName.upper()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj = FvImageSection()
FvImageSectionObj.Alignment = AlignValue
if FvObj is not None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName.upper()
FvImageSectionObj.FvFileName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self._IsKeyword("PEI_DEPEX_EXP") or self._IsKeyword("DXE_DEPEX_EXP") or self._IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = DepexSection()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(T_CHAR_BRACE_R):
raise Warning.Expected("Depex expression ending '}'", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self._SkippedChars.rstrip(T_CHAR_BRACE_R)
Obj.SectionList.append(DepexSectionObj)
elif self._IsKeyword("SUBTYPE_GUID"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SubTypeGuidValue = None
if not self._GetNextGuid():
raise Warning.Expected("GUID", self.FileName, self.CurrentLineNumber)
else:
SubTypeGuidValue = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
FileName = self._Token
SubTypeGuidSectionObj = SubTypeGuidSection()
SubTypeGuidSectionObj.Alignment = AlignValue
SubTypeGuidSectionObj.SubTypeGuid = SubTypeGuidValue
SubTypeGuidSectionObj.SectFileName = FileName
Obj.SectionList.append(SubTypeGuidSectionObj)
else:
if not self._GetNextWord():
raise Warning.Expected("section type", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self._Token == "COMPRESS" or self._Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self._Token not in {"COMPAT16", BINARY_FILE_TYPE_PE32, BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,\
BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "SUBTYPE_GUID", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown section type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self._Token == BINARY_FILE_TYPE_PE32) and (not self._Token == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = DataSection()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Obj.FvFileType) and self._SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self._Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag%d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_EQUAL_SPLIT):
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self._Token
self._VerifyFile(DataSectionObj.SectFileName)
else:
if not self._GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## _VerifyFile
#
# Check if file exists or not:
# If current phase if GenFds, the file must exist;
# If current phase is AutoGen and the file is not in $(OUTPUT_DIRECTORY), the file must exist
# @param FileName: File path to be verified.
#
def _VerifyFile(self, FileName):
if FileName.replace(TAB_WORKSPACE, '').find('$') != -1:
return
if not GlobalData.gAutoGenPhase or not self._GetMacroValue(TAB_DSC_DEFINES_OUTPUT_DIRECTORY) in FileName:
ErrorCode, ErrorInfo = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
## _GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetCglSection(self, Obj, AlignValue = None):
if self._IsKeyword("COMPRESS"):
type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(CompressSectionObj)
IsEncapSection = self._GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(GuidSectionObj)
IsEncapSection = self._GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## _GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def _GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = "NONE"
AttribDict["AUTH_STATUS_VALID"] = "NONE"
AttribDict["EXTRA_HEADER_SIZE"] = -1
while self._IsKeyword("PROCESSING_REQUIRED") or self._IsKeyword("AUTH_STATUS_VALID") \
or self._IsKeyword("EXTRA_HEADER_SIZE"):
AttribKey = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE(1)/FALSE(0)/Number", self.FileName, self.CurrentLineNumber)
elif AttribKey == "EXTRA_HEADER_SIZE":
Base = 10
if self._Token[0:2].upper() == "0X":
Base = 16
try:
AttribDict[AttribKey] = int(self._Token, Base)
continue
except ValueError:
raise Warning.Expected("Number", self.FileName, self.CurrentLineNumber)
elif self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self._Token
return AttribDict
## _GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
if not self._GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
def _GetFmp(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FMPPAYLOAD."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
self._SkipToToken("[FMPPAYLOAD.", True)
FmpUiName = self._GetUiName().upper()
if FmpUiName in self.Profile.FmpPayloadDict:
raise Warning("Duplicated FMP UI name found: %s" % FmpUiName, self.FileName, self.CurrentLineNumber)
FmpData = CapsulePayload()
FmpData.UiName = FmpUiName
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning("The FMP payload section is empty!", self.FileName, self.CurrentLineNumber)
FmpKeyList = ['IMAGE_HEADER_INIT_VERSION', 'IMAGE_TYPE_ID', 'IMAGE_INDEX', 'HARDWARE_INSTANCE', 'CERTIFICATE_GUID', 'MONOTONIC_COUNT']
while self._Token in FmpKeyList:
Name = self._Token
FmpKeyList.remove(Name)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if Name == 'IMAGE_TYPE_ID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for IMAGE_TYPE_ID.", self.FileName, self.CurrentLineNumber)
FmpData.ImageTypeId = self._Token
elif Name == 'CERTIFICATE_GUID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
FmpData.Certificate_Guid = self._Token
if UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_RSA2048_SHA256_GUID and UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_PKCS7_GUID:
raise Warning("Only support EFI_CERT_TYPE_RSA2048_SHA256_GUID or EFI_CERT_TYPE_PKCS7_GUID for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
else:
if not self._GetNextToken():
raise Warning.Expected("value of %s" % Name, self.FileName, self.CurrentLineNumber)
Value = self._Token
if Name == 'IMAGE_HEADER_INIT_VERSION':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.Version = Value
elif Name == 'IMAGE_INDEX':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.ImageIndex = Value
elif Name == 'HARDWARE_INSTANCE':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.HardwareInstance = Value
elif Name == 'MONOTONIC_COUNT':
if FdfParser._Verify(Name, Value, 'UINT64'):
FmpData.MonotonicCount = Value
if FmpData.MonotonicCount.upper().startswith('0X'):
FmpData.MonotonicCount = int(FmpData.MonotonicCount, 16)
else:
FmpData.MonotonicCount = int(FmpData.MonotonicCount)
if not self._GetNextToken():
break
else:
self._UndoToken()
if (FmpData.MonotonicCount and not FmpData.Certificate_Guid) or (not FmpData.MonotonicCount and FmpData.Certificate_Guid):
EdkLogger.error("FdfParser", FORMAT_INVALID, "CERTIFICATE_GUID and MONOTONIC_COUNT must be work as a pair.")
# Only the IMAGE_TYPE_ID is required item
if FmpKeyList and 'IMAGE_TYPE_ID' in FmpKeyList:
raise Warning("'IMAGE_TYPE_ID' in FMP payload section.", self.FileName, self.CurrentLineNumber)
# get the Image file and Vendor code file
self._GetFMPCapsuleData(FmpData)
if not FmpData.ImageFile:
raise Warning("Missing image file in FMP payload section.", self.FileName, self.CurrentLineNumber)
# check whether more than one Vendor code file
if len(FmpData.VendorCodeFile) > 1:
raise Warning("Vendor code file max of 1 per FMP payload section.", self.FileName, self.CurrentLineNumber)
self.Profile.FmpPayloadDict[FmpUiName] = FmpData
return True
## _GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def _GetCapsule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[CAPSULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Capsule.]", self.FileName, self.CurrentLineNumber)
CapsuleObj = Capsule()
CapsuleName = self._GetUiName()
if not CapsuleName:
raise Warning.Expected("capsule name", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self._Token
self._GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj
return True
## _GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def _GetCapsuleStatements(self, Obj):
self._GetCapsuleTokens(Obj)
self._GetDefineStatements(Obj)
self._GetSetStatements(Obj)
self._GetCapsuleData(Obj)
## _GetCapsuleTokens() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def _GetCapsuleTokens(self, Obj):
if not self._GetNextToken():
return False
while self._Token in {"CAPSULE_GUID", "CAPSULE_HEADER_SIZE", "CAPSULE_FLAGS", "OEM_CAPSULE_FLAGS", "CAPSULE_HEADER_INIT_VERSION"}:
Name = self._Token.strip()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if Name == 'CAPSULE_FLAGS':
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
while self._IsToken(TAB_COMMA_SPLIT):
Value += TAB_COMMA_SPLIT
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value += self._Token.strip()
elif Name == 'OEM_CAPSULE_FLAGS':
Value = self._Token.strip()
if not Value.upper().startswith('0X'):
raise Warning.Expected("hex value starting with 0x", self.FileName, self.CurrentLineNumber)
try:
Value = int(Value, 0)
except ValueError:
raise Warning.Expected("hex string failed to convert to value", self.FileName, self.CurrentLineNumber)
if not 0x0000 <= Value <= 0xFFFF:
raise Warning.Expected("hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
else:
Value = self._Token.strip()
Obj.TokensDict[Name] = Value
if not self._GetNextToken():
return False
self._UndoToken()
## _GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetCapsuleData(self, Obj):
while True:
IsInf = self._GetInfStatement(Obj, True)
IsFile = self._GetFileStatement(Obj, True)
IsFv = self._GetFvStatement(Obj)
IsFd = self._GetFdStatement(Obj)
IsAnyFile = self._GetAnyFileStatement(Obj)
IsAfile = self._GetAfileStatement(Obj)
IsFmp = self._GetFmpStatement(Obj)
if not (IsInf or IsFile or IsFv or IsFd or IsAnyFile or IsAfile or IsFmp):
break
## _GetFMPCapsuleData() method
#
# Get capsule data for FMP capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetFMPCapsuleData(self, Obj):
while True:
IsFv = self._GetFvStatement(Obj, True)
IsFd = self._GetFdStatement(Obj, True)
IsAnyFile = self._GetAnyFileStatement(Obj, True)
if not (IsFv or IsFd or IsAnyFile):
break
## _GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def _GetFvStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FvDict:
raise Warning("FV name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFv = CapsuleFv()
myCapsuleFv.FvName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFv)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFv)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFv)
return True
## _GetFdStatement() method
#
# Get FD for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FD is got
# @retval True Successfully find a FD statement
# @retval False Not able to find a FD statement
#
def _GetFdStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword("FD"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FdDict:
raise Warning("FD name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFd = CapsuleFd()
myCapsuleFd.FdName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFd)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFd)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFd)
return True
def _GetFmpStatement(self, CapsuleObj):
if not self._IsKeyword("FMP_PAYLOAD"):
if not self._IsKeyword("FMP"):
return False
if not self._IsKeyword("PAYLOAD"):
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("payload name after FMP_PAYLOAD =", self.FileName, self.CurrentLineNumber)
Payload = self._Token.upper()
if Payload not in self.Profile.FmpPayloadDict:
raise Warning("This FMP Payload does not exist: %s" % self._Token, self.FileName, self.CurrentLineNumber)
CapsuleObj.FmpPayloadList.append(self.Profile.FmpPayloadDict[Payload])
return True
def _ParseRawFileStatement(self):
if not self._IsKeyword("FILE"):
return None
if not self._IsKeyword("DATA"):
self._UndoToken()
return None
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
AnyFileName = self._Token
self._VerifyFile(AnyFileName)
if not os.path.isabs(AnyFileName):
AnyFileName = mws.join(GenFdsGlobalVariable.WorkSpaceDir, AnyFileName)
return AnyFileName
## _GetAnyFileStatement() method
#
# Get AnyFile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom AnyFile is got
# @retval True Successfully find a Anyfile statement
# @retval False Not able to find a AnyFile statement
#
def _GetAnyFileStatement(self, CapsuleObj, FMPCapsule = False):
AnyFileName = self._ParseRawFileStatement()
if not AnyFileName:
return False
myCapsuleAnyFile = CapsuleAnyFile()
myCapsuleAnyFile.FileName = AnyFileName
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleAnyFile)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleAnyFile)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleAnyFile)
return True
## _GetAfileStatement() method
#
# Get Afile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom Afile is got
# @retval True Successfully find a Afile statement
# @retval False Not able to find a Afile statement
#
def _GetAfileStatement(self, CapsuleObj):
if not self._IsKeyword("APPEND"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Afile name", self.FileName, self.CurrentLineNumber)
AfileName = self._Token
AfileBaseName = os.path.basename(AfileName)
if os.path.splitext(AfileBaseName)[1] not in {".bin", ".BIN", ".Bin", ".dat", ".DAT", ".Dat", ".data", ".DATA", ".Data"}:
raise Warning('invalid binary file type, should be one of "bin",BINARY_FILE_TYPE_BIN,"Bin","dat","DAT","Dat","data","DATA","Data"', \
self.FileName, self.CurrentLineNumber)
if not os.path.isabs(AfileName):
AfileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AfileName)
self._VerifyFile(AfileName)
else:
if not os.path.exists(AfileName):
raise Warning('%s does not exist' % AfileName, self.FileName, self.CurrentLineNumber)
else:
pass
myCapsuleAfile = CapsuleAfile()
myCapsuleAfile.FileName = AfileName
CapsuleObj.CapsuleDataList.append(myCapsuleAfile)
return True
## _GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def _GetRule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[RULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Rule.]", self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Arch = self._SkippedChars.rstrip(TAB_SPLIT)
ModuleType = self._GetModuleType()
TemplateName = ""
if self._IsToken(TAB_SPLIT):
if not self._GetNextWord():
raise Warning.Expected("template name", self.FileName, self.CurrentLineNumber)
TemplateName = self._Token
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
RuleObj = self._GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '':
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() ] = RuleObj
else:
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() + \
TAB_SPLIT + \
TemplateName.upper() ] = RuleObj
return True
## _GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def _GetModuleType(self):
if not self._GetNextWord():
raise Warning.Expected("Module type", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {
SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER,
SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER,
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_UEFI_DRIVER,
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, SUP_MODULE_HOST_APPLICATION,
TAB_DEFAULT, SUP_MODULE_BASE,
EDK_COMPONENT_TYPE_SECURITY_CORE,
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER,
EDK_COMPONENT_TYPE_PIC_PEIM,
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, "PE32_PEIM",
EDK_COMPONENT_TYPE_BS_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER,
EDK_COMPONENT_TYPE_SAL_RT_DRIVER,
EDK_COMPONENT_TYPE_APPLICATION, "ACPITABLE",
SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown Module type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return self._Token
## _GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def _GetFileExtension(self):
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Ext = ""
if self._GetNextToken():
if FileExtensionPattern.match(self._Token):
Ext = self._Token
return TAB_SPLIT + Ext
else:
raise Warning("Unknown file extension '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
else:
raise Warning.Expected("file extension", self.FileName, self.CurrentLineNumber)
## _GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def _GetRuleFileStatements(self):
if not self._IsKeyword("FILE"):
raise Warning.Expected("FILE", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
Type = self._Token.strip().upper()
if Type not in {"RAW", "FREEFORM", SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
"PEI_DXE_COMBO", "DRIVER", SUP_MODULE_DXE_CORE, EDK_COMPONENT_TYPE_APPLICATION,
"FV_IMAGE", "SMM", SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown FV type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("$(NAMED_GUID)"):
if not self._GetNextWord():
NamedGuid = self._CurrentLine()[self.CurrentOffsetWithinLine:].split()[0].strip()
if GlobalData.gGuidPatternEnd.match(NamedGuid):
self.CurrentOffsetWithinLine += len(NamedGuid)
self._Token = NamedGuid
else:
raise Warning.Expected("$(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
NameGuid = self._Token
KeepReloc = None
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Type):
if self._Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
Fixed = False
if self._IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self._IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if self._IsToken("{"):
# Complex file rule expected
NewRule = RuleComplexFile()
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(NewRule)
IsLeaf = self._GetEfiSection(NewRule)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return NewRule
else:
# Simple file rule expected
if not self._GetNextWord():
raise Warning.Expected("leaf section type", self.FileName, self.CurrentLineNumber)
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self._IsKeyword("Fixed", True):
Fixed = True
if self._IsKeyword("CheckSum", True):
CheckSum = True
SectAlignment = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SectAlignment = self._Token
Ext = None
if self._IsToken(TAB_VALUE_SPLIT):
Ext = self._GetFileExtension()
elif not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
NewRule = RuleSimpleFile()
NewRule.SectionType = SectionName
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.SectAlignment = SectAlignment
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
NewRule.FileExtension = Ext
NewRule.FileName = self._Token
return NewRule
## _GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
EfiSectionObj = EfiSection()
if not self._GetNextWord():
CurrentLine = self._CurrentLine()[self.CurrentOffsetWithinLine:].split()[0].strip()
if self._Token == '{' and Obj.FvFileType == "RAW" and TAB_SPLIT in CurrentLine:
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
EfiSectionObj.FileName = self._Token
EfiSectionObj.SectionType = BINARY_FILE_TYPE_RAW
Obj.SectionList.append(EfiSectionObj)
return True
else:
return False
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX, BINARY_FILE_TYPE_GUID}:
self._UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection()
if self._IsKeyword("FV_IMAGE"):
pass
if self._IsToken("{"):
FvObj = FV()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
FvImageSectionObj.FvFileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
FvImageSectionObj.FvFileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("FV file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj.SectionType = SectionName
if not self._GetNextToken():
raise Warning.Expected("file type", self.FileName, self.CurrentLineNumber)
if self._Token == "STRING":
if not self._RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Quoted String", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
EfiSectionObj.StringData = self._Token
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
else:
EfiSectionObj.FileType = self._Token
self._CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self._IsKeyword("Optional"):
if not self._RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self._Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc is not None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
if self._Token.startswith('PCD'):
self._UndoToken()
self._GetNextWord()
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
EfiSectionObj.FileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("section file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## _RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
@staticmethod
def _RuleSectionCouldBeOptional(SectionType):
if SectionType in {BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "RAW", BINARY_FILE_TYPE_SMM_DEPEX}:
return True
else:
return False
## _RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
@staticmethod
def _RuleSectionCouldHaveBuildNum(SectionType):
if SectionType == "VERSION":
return True
else:
return False
## _RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
@staticmethod
def _RuleSectionCouldHaveString(SectionType):
if SectionType in {BINARY_FILE_TYPE_UI, "VERSION"}:
return True
else:
return False
## _CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def _CheckRuleSectionFileType(self, SectionType, FileType):
WarningString = "Incorrect section file type '%s'"
if SectionType == "COMPAT16":
if FileType not in {"COMPAT16", "SEC_COMPAT16"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PE32:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_PE32"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PIC:
if FileType not in {BINARY_FILE_TYPE_PIC, "SEC_PIC"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_TE:
if FileType not in {BINARY_FILE_TYPE_TE, "SEC_TE"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in {BINARY_FILE_TYPE_BIN, "SEC_BIN", "RAW", "ASL", "ACPI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_DXE_DEPEX or SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
if FileType not in {BINARY_FILE_TYPE_DXE_DEPEX, "SEC_DXE_DEPEX", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_UI:
if FileType not in {BINARY_FILE_TYPE_UI, "SEC_UI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in {"VERSION", "SEC_VERSION"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PEI_DEPEX:
if FileType not in {BINARY_FILE_TYPE_PEI_DEPEX, "SEC_PEI_DEPEX"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_GUID:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_GUID"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
## _GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param theRule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetRuleEncapsulationSection(self, theRule):
if self._IsKeyword("COMPRESS"):
Type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
Type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self._GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
GuidValue = self._Token
if self._IsKeyword("$(NAMED_GUID)"):
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Efi sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self._GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(GuidSectionObj)
return True
return False
## _GetOptionRom() method
#
# Get OptionROM section contents and store its data into OptionROM list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a OptionROM
# @retval False Not able to find a OptionROM
#
def _GetOptionRom(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[OPTIONROM."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[OptionRom.", True):
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
OptRomName = self._GetUiName()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
OptRomObj = OPTIONROM(OptRomName)
self.Profile.OptRomDict[OptRomName] = OptRomObj
while True:
isInf = self._GetOptRomInfStatement(OptRomObj)
isFile = self._GetOptRomFileStatement(OptRomObj)
if not isInf and not isFile:
break
return True
## _GetOptRomInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetOptRomInfStatement(self, Obj):
if not self._IsKeyword("INF"):
return False
ffsInf = OptRomInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
self._GetOptRomOverrides (ffsInf)
Obj.FfsList.append(ffsInf)
return True
## _GetOptRomOverrides() method
#
# Get overrides for OptROM INF & FILE
#
# @param self The object pointer
# @param FfsInfObj for whom overrides is got
#
def _GetOptRomOverrides(self, Obj):
if self._IsToken('{'):
Overrides = OverrideAttribs()
while True:
if self._IsKeyword("PCI_VENDOR_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex vendor id", self.FileName, self.CurrentLineNumber)
Overrides.PciVendorId = self._Token
continue
if self._IsKeyword("PCI_CLASS_CODE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex class code", self.FileName, self.CurrentLineNumber)
Overrides.PciClassCode = self._Token
continue
if self._IsKeyword("PCI_DEVICE_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
# Get a list of PCI IDs
Overrides.PciDeviceId = ""
while (self._GetNextHexNumber()):
Overrides.PciDeviceId = "{} {}".format(Overrides.PciDeviceId, self._Token)
if not Overrides.PciDeviceId:
raise Warning.Expected("one or more Hex device ids", self.FileName, self.CurrentLineNumber)
continue
if self._IsKeyword("PCI_REVISION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex revision", self.FileName, self.CurrentLineNumber)
Overrides.PciRevision = self._Token
continue
if self._IsKeyword("PCI_COMPRESS"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE/FALSE for compress", self.FileName, self.CurrentLineNumber)
Overrides.NeedCompress = self._Token.upper() == 'TRUE'
continue
if self._IsToken(T_CHAR_BRACE_R):
break
else:
EdkLogger.error("FdfParser", FORMAT_INVALID, File=self.FileName, Line=self.CurrentLineNumber)
Obj.OverrideAttribs = Overrides
## _GetOptRomFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetOptRomFileStatement(self, Obj):
if not self._IsKeyword("FILE"):
return False
FfsFileObj = OptRomFileStatement()
if not self._IsKeyword("EFI") and not self._IsKeyword(BINARY_FILE_TYPE_BIN):
raise Warning.Expected("Binary type (EFI/BIN)", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileType = self._Token
if not self._GetNextToken():
raise Warning.Expected("File path", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileName = self._Token
if FfsFileObj.FileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(FfsFileObj.FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if FfsFileObj.FileType == 'EFI':
self._GetOptRomOverrides(FfsFileObj)
Obj.FfsList.append(FfsFileObj)
return True
## _GetCapInFd() method
#
# Get Cap list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval CapList List of Capsule in FD
#
def _GetCapInFd (self, FdName):
CapList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'CAPSULE':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".cap"):
continue
if elementRegionData is not None and elementRegionData.upper() not in CapList:
CapList.append(elementRegionData.upper())
return CapList
## _GetReferencedFdCapTuple() method
#
# Get FV and FD list referenced by a capsule image
#
# @param self The object pointer
# @param CapObj Capsule section to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdCapTuple(self, CapObj, RefFdList = [], RefFvList = []):
for CapsuleDataObj in CapObj.CapsuleDataList:
if hasattr(CapsuleDataObj, 'FvName') and CapsuleDataObj.FvName is not None and CapsuleDataObj.FvName.upper() not in RefFvList:
RefFvList.append (CapsuleDataObj.FvName.upper())
elif hasattr(CapsuleDataObj, 'FdName') and CapsuleDataObj.FdName is not None and CapsuleDataObj.FdName.upper() not in RefFdList:
RefFdList.append (CapsuleDataObj.FdName.upper())
elif CapsuleDataObj.Ffs is not None:
if isinstance(CapsuleDataObj.Ffs, FileStatement):
if CapsuleDataObj.Ffs.FvName is not None and CapsuleDataObj.Ffs.FvName.upper() not in RefFvList:
RefFvList.append(CapsuleDataObj.Ffs.FvName.upper())
elif CapsuleDataObj.Ffs.FdName is not None and CapsuleDataObj.Ffs.FdName.upper() not in RefFdList:
RefFdList.append(CapsuleDataObj.Ffs.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(CapsuleDataObj.Ffs, RefFdList, RefFvList)
## _GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def _GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == BINARY_FILE_TYPE_FV:
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".fv"):
continue
if elementRegionData is not None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## _GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FileStatement):
if FfsObj.FvName is not None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName is not None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## _GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def _GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = list(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection):
if SectionObj.FvName is not None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv is not None and SectionObj.Fv.UiFvName is not None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self._GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection) or isinstance(SectionObj, GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
#
# Check the cycle between FV and FD image
#
MaxLength = len (self.Profile.FvDict)
for FvName in self.Profile.FvDict:
LogStr = "\nCycle Reference Checking for FV: %s\n" % FvName
RefFvStack = set(FvName)
FdAnalyzedList = set()
Index = 0
while RefFvStack and Index < MaxLength:
Index = Index + 1
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FV %s contains FD %s\n" % (FvNameFromStack, RefFdName)
FvInFdList = self._GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvStack:
RefFvStack.add(FvNameInFd)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
FdAnalyzedList.add(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s contains FV %s\n" % (FvNameFromStack, RefFvName)
if RefFvName not in RefFvStack:
RefFvStack.add(RefFvName)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
#
# Check the cycle between Capsule and FD image
#
MaxLength = len (self.Profile.CapsuleDict)
for CapName in self.Profile.CapsuleDict:
#
# Capsule image to be checked.
#
LogStr = "\n\n\nCycle Reference Checking for Capsule: %s\n" % CapName
RefCapStack = {CapName}
FdAnalyzedList = set()
FvAnalyzedList = set()
Index = 0
while RefCapStack and Index < MaxLength:
Index = Index + 1
CapNameFromStack = RefCapStack.pop()
if CapNameFromStack.upper() in self.Profile.CapsuleDict:
CapObj = self.Profile.CapsuleDict[CapNameFromStack.upper()]
else:
continue
RefFvList = []
RefFdList = []
self._GetReferencedFdCapTuple(CapObj, RefFdList, RefFvList)
FvListLength = 0
FdListLength = 0
while FvListLength < len (RefFvList) or FdListLength < len (RefFdList):
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "Capsule %s contains FD %s\n" % (CapNameFromStack, RefFdName)
for CapNameInFd in self._GetCapInFd(RefFdName):
LogStr += "FD %s contains Capsule %s\n" % (RefFdName, CapNameInFd)
if CapNameInFd not in RefCapStack:
RefCapStack.append(CapNameInFd)
if CapName in RefCapStack or CapNameFromStack in RefCapStack:
EdkLogger.info(LogStr)
return True
for FvNameInFd in self._GetFvInFd(RefFdName):
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvList:
RefFvList.append(FvNameInFd)
FdAnalyzedList.add(RefFdName)
#
# the number of the parsed FV and FD image
#
FvListLength = len (RefFvList)
FdListLength = len (RefFdList)
for RefFvName in RefFvList:
if RefFvName in FvAnalyzedList:
continue
LogStr += "Capsule %s contains FV %s\n" % (CapNameFromStack, RefFvName)
if RefFvName.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[RefFvName.upper()]
else:
continue
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
FvAnalyzedList.add(RefFvName)
return False
def GetAllIncludedFile (self):
global AllIncludeFileList
return AllIncludeFileList
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError as v:
print("Usage: %s filename" % sys.argv[0])
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning as X:
print(str(X))
else:
print("Success!")
| edk2-master | BaseTools/Source/Python/GenFds/FdfParser.py |
## @file
# Python 'GenFds' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
| edk2-master | BaseTools/Source/Python/GenFds/__init__.py |
## @file
# Complex Rule object for generating FFS
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Rule
from CommonDataClass.FdfClass import RuleComplexFileClassObject
## complex rule
#
#
class RuleComplexFile(RuleComplexFileClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleComplexFileClassObject.__init__(self)
| edk2-master | BaseTools/Source/Python/GenFds/RuleComplexFile.py |
## @file
# process GUIDed section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .GenFdsGlobalVariable import FindExtendTool
from CommonDataClass.FdfClass import GuidSectionClassObject
import sys
from Common import EdkLogger
from Common.BuildToolError import *
from .FvImageSection import FvImageSection
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate GUIDed section
#
#
class GuidSection(GuidSectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
GuidSectionClassObject.__init__(self)
## GenSection() method
#
# Generate GUIDed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile=False):
#
# Generate all section
#
self.KeyStringList = KeyStringList
self.CurrentArchList = GenFdsGlobalVariable.ArchList
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.NameGuid = FfsInf.__ExtendMacro__(self.NameGuid)
self.SectionType = FfsInf.__ExtendMacro__(self.SectionType)
self.CurrentArchList = [FfsInf.CurrentArch]
SectFile = tuple()
SectAlign = []
Index = 0
MaxAlign = None
if Dict is None:
Dict = {}
if self.FvAddr != []:
FvAddrIsSet = True
else:
FvAddrIsSet = False
if self.ProcessRequired in ("TRUE", "1"):
if self.FvAddr != []:
#no use FvAddr when the image is processed.
self.FvAddr = []
if self.FvParentAddr is not None:
#no use Parent Addr when the image is processed.
self.FvParentAddr = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' % (SecNum, Index)
# set base address for inside FvImage
if isinstance(Sect, FvImageSection):
if self.FvAddr != []:
Sect.FvAddr = self.FvAddr.pop(0)
self.IncludeFvSection = True
elif isinstance(Sect, GuidSection):
Sect.FvAddr = self.FvAddr
Sect.FvParentAddr = self.FvParentAddr
ReturnSectList, align = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if isinstance(Sect, GuidSection):
if Sect.IncludeFvSection:
self.IncludeFvSection = Sect.IncludeFvSection
if align is not None:
if MaxAlign is None:
MaxAlign = align
if GenFdsGlobalVariable.GetAlignment (align) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = align
if ReturnSectList != []:
if align is None:
align = "1"
for file in ReturnSectList:
SectFile += (file,)
SectAlign.append(align)
if MaxAlign is not None:
if self.Alignment is None:
self.Alignment = MaxAlign
else:
if GenFdsGlobalVariable.GetAlignment (MaxAlign) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = MaxAlign
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['GUIDED']
OutputFile = os.path.normpath(OutputFile)
ExternalTool = None
ExternalOption = None
if self.NameGuid is not None:
ExternalTool, ExternalOption = FindExtendTool(self.KeyStringList, self.CurrentArchList, self.NameGuid)
#
# If not have GUID , call default
# GENCRC32 section
#
if self.NameGuid is None :
GenFdsGlobalVariable.VerboseLogger("Use GenSection function Generate CRC32 Section")
GenFdsGlobalVariable.GenerateSection(OutputFile, SectFile, Section.Section.SectionType[self.SectionType], InputAlign=SectAlign, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
#or GUID not in External Tool List
elif ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % self.NameGuid)
else:
DummyFile = OutputFile + ".dummy"
#
# Call GenSection with DUMMY section type.
#
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFile, InputAlign=SectAlign, IsMakefile=IsMakefile)
#
# Use external tool process the Output
#
TempFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
'.tmp'
TempFile = os.path.normpath(TempFile)
#
# Remove temp file if its time stamp is older than dummy file
# Just in case the external tool fails at this time but succeeded before
# Error should be reported if the external tool does not generate a new output based on new input
#
if os.path.exists(TempFile) and os.path.exists(DummyFile) and os.path.getmtime(TempFile) < os.path.getmtime(DummyFile):
os.remove(TempFile)
FirstCall = False
CmdOption = '-e'
if ExternalOption is not None:
CmdOption = CmdOption + ' ' + ExternalOption
if not GenFdsGlobalVariable.EnableGenfdsMultiThread:
if self.ProcessRequired not in ("TRUE", "1") and self.IncludeFvSection and not FvAddrIsSet and self.FvParentAddr is not None:
#FirstCall is only set for the encapsulated flash FV image without process required attribute.
FirstCall = True
#
# Call external tool
#
ReturnValue = [1]
if FirstCall:
#first try to call the guided tool with -z option and CmdOption for the no process required guided tool.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, '-z' + ' ' + CmdOption, ReturnValue)
#
# when no call or first call failed, ReturnValue are not 1.
# Call the guided tool with CmdOption
#
if ReturnValue[0] != 0:
FirstCall = False
ReturnValue[0] = 0
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# There is external tool which does not follow standard rule which return nonzero if tool fails
# The output file has to be checked
#
if not os.path.exists(TempFile) :
EdkLogger.error("GenFds", COMMAND_FAILURE, 'Fail to call %s, no output file was generated' % ExternalTool)
FileHandleIn = open(DummyFile, 'rb')
FileHandleIn.seek(0, 2)
InputFileSize = FileHandleIn.tell()
FileHandleOut = open(TempFile, 'rb')
FileHandleOut.seek(0, 2)
TempFileSize = FileHandleOut.tell()
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.ProcessRequired == "NONE" and HeaderLength is None:
if TempFileSize > InputFileSize:
FileHandleIn.seek(0)
BufferIn = FileHandleIn.read()
FileHandleOut.seek(0)
BufferOut = FileHandleOut.read()
if BufferIn == BufferOut[TempFileSize - InputFileSize:]:
HeaderLength = str(TempFileSize - InputFileSize)
#auto sec guided attribute with process required
if HeaderLength is None:
Attribute.append('PROCESSING_REQUIRED')
FileHandleIn.close()
FileHandleOut.close()
if FirstCall and 'PROCESSING_REQUIRED' in Attribute:
# Guided data by -z option on first call is the process required data. Call the guided tool with the real option.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# Call Gensection Add Section Header
#
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength)
else:
#add input file for GenSec get PROCESSING_REQUIRED
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption, IsMakefile=IsMakefile)
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
if self.ProcessRequired == "NONE" and HeaderLength is None:
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, DummyFile=DummyFile, IsMakefile=IsMakefile)
else:
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
if 'PROCESSING_REQUIRED' in Attribute:
# reset guided section alignment to none for the processed required guided data
self.Alignment = None
self.IncludeFvSection = False
self.ProcessRequired = "TRUE"
if IsMakefile and self.Alignment is not None and self.Alignment.strip() == '0':
self.Alignment = '1'
return OutputFileList, self.Alignment
| edk2-master | BaseTools/Source/Python/GenFds/GuidSection.py |
## @file
# process FFS generation from FILE statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from io import BytesIO
from struct import pack
from CommonDataClass.FdfClass import FileStatementClassObject
from Common import EdkLogger
from Common.BuildToolError import GENFDS_ERROR
from Common.Misc import GuidStructureByteArrayToGuidString, SaveFileOnChange
import Common.LongFilePathOs as os
from .GuidSection import GuidSection
from .FvImageSection import FvImageSection
from .Ffs import FdfFvFileTypeToFileType
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import shutil
## generate FFS from FILE
#
#
class FileStatement (FileStatementClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FileStatementClassObject.__init__(self)
self.CurrentLineNum = None
self.CurrentLineContent = None
self.FileName = None
self.InfFileName = None
self.SubAlignment = None
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
# @param FvChildAddr Array of the inside FvImage base address
# @param FvParentAddr Parent Fv base address
# @retval string Generated FFS file name
#
def GenFfs(self, Dict = None, FvChildAddr=[], FvParentAddr=None, IsMakefile=False, FvName=None):
if self.NameGuid and self.NameGuid.startswith('PCD('):
PcdValue = GenFdsGlobalVariable.GetPcdValue(self.NameGuid)
if len(PcdValue) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, '%s NOT defined.' \
% (self.NameGuid))
if PcdValue.startswith('{'):
PcdValue = GuidStructureByteArrayToGuidString(PcdValue)
RegistryGuidStr = PcdValue
if len(RegistryGuidStr) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, 'GUID value for %s in wrong format.' \
% (self.NameGuid))
self.NameGuid = RegistryGuidStr
Str = self.NameGuid
if FvName:
Str += FvName
OutputDir = os.path.join(GenFdsGlobalVariable.FfsDir, Str)
if os.path.exists(OutputDir):
shutil.rmtree(OutputDir)
if not os.path.exists(OutputDir):
os.makedirs(OutputDir)
if Dict is None:
Dict = {}
Dict.update(self.DefineVarDict)
SectionAlignments = None
if self.FvName:
Buffer = BytesIO()
if self.FvName.upper() not in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (self.FvName))
Fv = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName.upper())
FileName = Fv.AddToBuffer(Buffer)
SectionFiles = [FileName]
elif self.FdName:
if self.FdName.upper() not in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
EdkLogger.error("GenFds", GENFDS_ERROR, "FD (%s) is NOT described in FDF file!" % (self.FdName))
Fd = GenFdsGlobalVariable.FdfParser.Profile.FdDict.get(self.FdName.upper())
FileName = Fd.GenFd()
SectionFiles = [FileName]
elif self.FileName:
if hasattr(self, 'FvFileType') and self.FvFileType == 'RAW':
if isinstance(self.FileName, list) and isinstance(self.SubAlignment, list) and len(self.FileName) == len(self.SubAlignment):
FileContent = BytesIO()
MaxAlignIndex = 0
MaxAlignValue = 1
for Index, File in enumerate(self.FileName):
try:
f = open(File, 'rb')
except:
GenFdsGlobalVariable.ErrorLogger("Error opening RAW file %s." % (File))
Content = f.read()
f.close()
AlignValue = 1
if self.SubAlignment[Index]:
AlignValue = GenFdsGlobalVariable.GetAlignment(self.SubAlignment[Index])
if AlignValue > MaxAlignValue:
MaxAlignIndex = Index
MaxAlignValue = AlignValue
FileContent.write(Content)
if len(FileContent.getvalue()) % AlignValue != 0:
Size = AlignValue - len(FileContent.getvalue()) % AlignValue
for i in range(0, Size):
FileContent.write(pack('B', 0xFF))
if FileContent.getvalue() != b'':
OutputRAWFile = os.path.join(GenFdsGlobalVariable.FfsDir, self.NameGuid, self.NameGuid + '.raw')
SaveFileOnChange(OutputRAWFile, FileContent.getvalue(), True)
self.FileName = OutputRAWFile
self.SubAlignment = self.SubAlignment[MaxAlignIndex]
if self.Alignment and self.SubAlignment:
if GenFdsGlobalVariable.GetAlignment (self.Alignment) < GenFdsGlobalVariable.GetAlignment (self.SubAlignment):
self.Alignment = self.SubAlignment
elif self.SubAlignment:
self.Alignment = self.SubAlignment
self.FileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
#Replace $(SAPCE) with real space
self.FileName = self.FileName.replace('$(SPACE)', ' ')
SectionFiles = [GenFdsGlobalVariable.MacroExtend(self.FileName, Dict)]
else:
SectionFiles = []
Index = 0
SectionAlignments = []
for section in self.SectionList:
Index = Index + 1
SecIndex = '%d' %Index
# process the inside FvImage from FvSection or GuidSection
if FvChildAddr != []:
if isinstance(section, FvImageSection):
section.FvAddr = FvChildAddr.pop(0)
elif isinstance(section, GuidSection):
section.FvAddr = FvChildAddr
if FvParentAddr and isinstance(section, GuidSection):
section.FvParentAddr = FvParentAddr
if self.KeepReloc == False:
section.KeepReloc = False
sectList, align = section.GenSection(OutputDir, self.NameGuid, SecIndex, self.KeyStringList, None, Dict)
if sectList != []:
for sect in sectList:
SectionFiles.append(sect)
SectionAlignments.append(align)
#
# Prepare the parameter
#
FfsFileOutput = os.path.join(OutputDir, self.NameGuid + '.ffs')
GenFdsGlobalVariable.GenerateFfs(FfsFileOutput, SectionFiles,
FdfFvFileTypeToFileType.get(self.FvFileType),
self.NameGuid,
Fixed=self.Fixed,
CheckSum=self.CheckSum,
Align=self.Alignment,
SectionAlign=SectionAlignments
)
return FfsFileOutput
| edk2-master | BaseTools/Source/Python/GenFds/FfsFileStatement.py |
## @file
# process UI section generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from .Ffs import SectionSuffix
import subprocess
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import UiSectionClassObject
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate UI section
#
#
class UiSection (UiSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
UiSectionClassObject.__init__(self)
## GenSection() method
#
# Generate UI section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.StringData = FfsInf.__ExtendMacro__(self.StringData)
self.FileName = FfsInf.__ExtendMacro__(self.FileName)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get(BINARY_FILE_TYPE_UI))
if self.StringData is not None :
NameString = self.StringData
elif self.FileName is not None:
if Dict is None:
Dict = {}
FileNameStr = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
FileNameStr = GenFdsGlobalVariable.MacroExtend(FileNameStr, Dict)
FileObj = open(FileNameStr, 'r')
NameString = FileObj.read()
FileObj.close()
else:
NameString = ''
GenFdsGlobalVariable.GenerateSection(OutputFile, None, 'EFI_SECTION_USER_INTERFACE', Ui=NameString, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
| edk2-master | BaseTools/Source/Python/GenFds/UiSection.py |
## @file
# section base class
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from CommonDataClass.FdfClass import SectionClassObject
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os, glob
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## section base class
#
#
class Section (SectionClassObject):
SectionType = {
'RAW' : 'EFI_SECTION_RAW',
'FREEFORM' : 'EFI_SECTION_FREEFORM_SUBTYPE_GUID',
BINARY_FILE_TYPE_PE32 : 'EFI_SECTION_PE32',
BINARY_FILE_TYPE_PIC : 'EFI_SECTION_PIC',
BINARY_FILE_TYPE_TE : 'EFI_SECTION_TE',
'FV_IMAGE' : 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE',
'COMPAT16' : 'EFI_SECTION_COMPATIBILITY16',
BINARY_FILE_TYPE_DXE_DEPEX : 'EFI_SECTION_DXE_DEPEX',
BINARY_FILE_TYPE_PEI_DEPEX : 'EFI_SECTION_PEI_DEPEX',
'GUIDED' : 'EFI_SECTION_GUID_DEFINED',
'COMPRESS' : 'EFI_SECTION_COMPRESSION',
BINARY_FILE_TYPE_UI : 'EFI_SECTION_USER_INTERFACE',
BINARY_FILE_TYPE_SMM_DEPEX : 'EFI_SECTION_SMM_DEPEX'
}
BinFileType = {
BINARY_FILE_TYPE_GUID : '.guid',
'ACPI' : '.acpi',
'ASL' : '.asl' ,
BINARY_FILE_TYPE_UEFI_APP : '.app',
BINARY_FILE_TYPE_LIB : '.lib',
BINARY_FILE_TYPE_PE32 : '.pe32',
BINARY_FILE_TYPE_PIC : '.pic',
BINARY_FILE_TYPE_PEI_DEPEX : '.depex',
'SEC_PEI_DEPEX' : '.depex',
BINARY_FILE_TYPE_TE : '.te',
BINARY_FILE_TYPE_UNI_VER : '.ver',
BINARY_FILE_TYPE_VER : '.ver',
BINARY_FILE_TYPE_UNI_UI : '.ui',
BINARY_FILE_TYPE_UI : '.ui',
BINARY_FILE_TYPE_BIN : '.bin',
'RAW' : '.raw',
'COMPAT16' : '.comp16',
BINARY_FILE_TYPE_FV : '.fv'
}
SectFileType = {
'SEC_GUID' : '.sec' ,
'SEC_PE32' : '.sec' ,
'SEC_PIC' : '.sec',
'SEC_TE' : '.sec',
'SEC_VER' : '.sec',
'SEC_UI' : '.sec',
'SEC_COMPAT16' : '.sec',
'SEC_BIN' : '.sec'
}
ToolGuid = {
'0xa31280ad-0x481e-0x41b6-0x95e8-0x127f-0x4c984779' : 'TianoCompress',
'0xee4e5898-0x3914-0x4259-0x9d6e-0xdc7b-0xd79403cf' : 'LzmaCompress'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
## GenSection() method
#
# virtual function
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
#
def GenSection(self, OutputPath, GuidName, SecNum, keyStringList, FfsInf = None, Dict = None):
pass
## GetFileList() method
#
# Generate compressed section
#
# @param self The object pointer
# @param FfsInf FfsInfStatement object that contains file list
# @param FileType File type to get
# @param FileExtension File extension to get
# @param Dict dictionary contains macro and its value
# @retval tuple (File list, boolean)
#
def GetFileList(FfsInf, FileType, FileExtension, Dict = None, IsMakefile=False, SectionType=None):
IsSect = FileType in Section.SectFileType
if FileExtension is not None:
Suffix = FileExtension
elif IsSect :
Suffix = Section.SectionType.get(FileType)
else:
Suffix = Section.BinFileType.get(FileType)
if FfsInf is None:
EdkLogger.error("GenFds", GENFDS_ERROR, 'Inf File does not exist!')
FileList = []
if FileType is not None:
for File in FfsInf.BinFileList:
if File.Arch == TAB_ARCH_COMMON or FfsInf.CurrentArch == File.Arch:
if File.Type == FileType or (int(FfsInf.PiSpecVersion, 16) >= 0x0001000A \
and FileType == 'DXE_DPEX' and File.Type == BINARY_FILE_TYPE_SMM_DEPEX) \
or (FileType == BINARY_FILE_TYPE_TE and File.Type == BINARY_FILE_TYPE_PE32):
if TAB_STAR in FfsInf.TargetOverrideList or File.Target == TAB_STAR or File.Target in FfsInf.TargetOverrideList or FfsInf.TargetOverrideList == []:
FileList.append(FfsInf.PatchEfiFile(File.Path, File.Type))
else:
GenFdsGlobalVariable.InfLogger ("\nBuild Target \'%s\' of File %s is not in the Scope of %s specified by INF %s in FDF" %(File.Target, File.File, FfsInf.TargetOverrideList, FfsInf.InfFileName))
else:
GenFdsGlobalVariable.VerboseLogger ("\nFile Type \'%s\' of File %s in %s is not same with file type \'%s\' from Rule in FDF" %(File.Type, File.File, FfsInf.InfFileName, FileType))
else:
GenFdsGlobalVariable.InfLogger ("\nCurrent ARCH \'%s\' of File %s is not in the Support Arch Scope of %s specified by INF %s in FDF" %(FfsInf.CurrentArch, File.File, File.Arch, FfsInf.InfFileName))
elif FileType is None and SectionType == BINARY_FILE_TYPE_RAW:
for File in FfsInf.BinFileList:
if File.Ext == Suffix:
FileList.append(File.Path)
if (not IsMakefile and Suffix is not None and os.path.exists(FfsInf.EfiOutputPath)) or (IsMakefile and Suffix is not None):
if not FileList:
SuffixMap = FfsInf.GetFinalTargetSuffixMap()
if Suffix in SuffixMap:
FileList.extend(SuffixMap[Suffix])
#Process the file lists is alphabetical for a same section type
if len (FileList) > 1:
FileList.sort()
return FileList, IsSect
GetFileList = staticmethod(GetFileList)
| edk2-master | BaseTools/Source/Python/GenFds/Section.py |
Subsets and Splits