repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
prospector
|
prospector-master/prospect/utils/prospect_args.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""prospect_args.py - methods to get a default argument parser for prospector.
"""
import argparse
__all__ = ["get_parser", "show_default_args"]
def show_default_args():
parser = get_parser()
parser.print_help()
def get_parser(fitters=["optimize", "emcee", "dynesty"]):
"""Get a default prospector argument parser
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# --- Basic ---
parser.add_argument("--verbose", type=int, default=1,
help="Whether to print lots of stuff")
parser.add_argument("--debug", dest="debug", action="store_true",
help=("If set, halt execution just before optimization and sampling, "
"but after the obs, model, and sps objects have been built."))
parser.set_defaults(debug=False)
parser.add_argument("--outfile", type=str, default="prospector_test_run",
help="Root name (including path) of the output file(s).")
parser.add_argument("--output_pickle", action="store_true",
help="If set, output pickles in addition to HDF5.")
# --- SPS parameters ---
parser.add_argument("--zcontinuous", type=int, default=1,
help=("The type of metallicity parameterization to use. "
"See python-FSPS documentation for details."))
if "optimize" in fitters:
parser = add_optimize_args(parser)
if "emcee" in fitters:
parser = add_emcee_args(parser)
if "dynesty" in fitters:
parser = add_dynesty_args(parser)
return parser
def add_optimize_args(parser):
# --- Optimization ---
parser.add_argument("--optimize", action="store_true",
help="If set, do an optimization before sampling.")
parser.add_argument("--min_method", type=str, default="lm",
help=("The scipy.optimize method to use for minimization."
"One of 'lm' (Levenberg-Marquardt) or 'powell' (powell line-search.)"))
parser.add_argument("--min_opts", type=dict, default={},
help="Minimization parameters. See scipy.optimize.")
parser.add_argument("--nmin", type=int, default=1,
help=("Number of draws from the prior from which to start minimization."
"nmin > 1 can be useful to avoid local minima"))
return parser
def add_emcee_args(parser):
# --- emcee fitting ----
parser.add_argument("--emcee", action="store_true",
help="If set, do ensemble MCMC sampling with emcee.")
parser.add_argument("--nwalkers", type=int, default=64,
help="Number of `emcee` walkers.")
parser.add_argument("--niter", type=int, default=512,
help="Number of iterations in the `emcee` production run")
parser.add_argument("--nburn", type=int, nargs="*", default=[16, 32, 64],
help=("Specify the rounds of burn-in for `emcee` by giving the "
"number of iterations in each round as a list. "
"After each round the walkers are reinitialized based "
"on the locations of the best half of the walkers."))
parser.add_argument("--save_interval", dest="interval", type=float, default=0.2,
help=("Number between 0 and 1 giving the fraction of the `emcee` "
"production run at which to write the current chains to "
"disk. Useful in case an expensive `emcee` run dies."))
parser.add_argument("--restart_from", type=str, default="",
help=("If given, the name of a file that contains a previous "
"`emcee` run from which to try and restart emcee sampling."
"In this case niter gives the number of additional iterations"
" to run for; all other options are ignored "
"(they are taken from the previous run.)"))
parser.add_argument("--ensemble_dispersion", dest="initial_disp", type=float, default=0.1,
help=("Initial dispersion in parameter value for the `emcee` walkers."
" This can be overriden for individual parameters by adding an 'init_disp' "
"key to the parameter specification dictionary for that parameter."))
return parser
def add_dynesty_args(parser):
# --- dynesty parameters ---
parser.add_argument("--dynesty", action="store_true",
help="If set, do nested sampling with dynesty.")
parser.add_argument("--nested_bound", type=str, default="multi",
help=("Method for bounding the prior volume when drawing new points. "
"One of single | multi | balls | cubes"))
parser.add_argument("--nested_method", type=str, default="slice",
help=("Method for drawing new points during sampling. "
"One of unif | rwalk | slice"))
parser.add_argument("--nlive_init", dest="nested_nlive_init", type=int, default=100,
help="Number of live points for the intial nested sampling run.")
parser.add_argument("--nlive_batch", dest="nested_nlive_batch", type=int, default=100,
help="Number of live points for the dynamic nested sampling batches")
parser.add_argument("--nested_dlogz_init", type=float, default=0.05,
help=("Stop the initial run when the remaining evidence is estimated "
"to be less than this."))
parser.add_argument("--nested_maxcall", type=int, default=int(5e7),
help="Maximum number of likelihood calls during nested sampling.")
parser.add_argument("--nested_maxbatch", type=int, default=10,
help="Maximum number of dynamic batches.")
parser.add_argument("--nested_bootstrap", type=int, default=0,
help=("Number of bootstrap resamplings to use when estimating "
"ellipsoid expansion factor."))
parser.add_argument("--nested_posterior_thresh", type=float, default=0.05,
help=("Stop when the fractional scatter in the K-L divergence of the "
"posterior estimates reaches this value"))
return parser
def add_data_args(parser):
# --- data manipulation
# logify_spectrum
# normalize_spectrum
return parser
| 6,814 | 42.407643 | 106 |
py
|
prospector
|
prospector-master/prospect/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
prospector
|
prospector-master/prospect/utils/smoothing.py
|
# Spectral smoothing functionality
# To do:
# 3) add extra zero-padding for FFT algorithms so they don't go funky at the
# edges?
import numpy as np
from numpy.fft import fft, ifft, fftfreq, rfftfreq
__all__ = ["smoothspec", "smooth_wave", "smooth_vel", "smooth_lsf",
"smooth_wave_fft", "smooth_vel_fft", "smooth_fft", "smooth_lsf_fft",
"mask_wave", "resample_wave"]
ckms = 2.998e5
sigma_to_fwhm = 2.355
def smoothspec(wave, spec, resolution=None, outwave=None,
smoothtype="vel", fftsmooth=True,
min_wave_smooth=0, max_wave_smooth=np.inf, **kwargs):
"""
:param wave:
The wavelength vector of the input spectrum, ndarray. Assumed
angstroms.
:param spec:
The flux vector of the input spectrum, ndarray
:param resolution:
The smoothing parameter. Units depend on ``smoothtype``.
:param outwave:
The output wavelength vector. If ``None`` then the input wavelength
vector will be assumed, though if ``min_wave_smooth`` or
``max_wave_smooth`` are also specified, then the output spectrum may
have different length than ``spec`` or ``wave``, or the convolution may
be strange outside of ``min_wave_smooth`` and ``max_wave_smooth``.
Basically, always set ``outwave`` to be safe.
:param smoothtype: (optional default: "vel")
The type of smoothing to do. One of:
* "vel" - velocity smoothing, ``resolution`` units are in km/s
(dispersion not FWHM)
* "R" - resolution smoothing, ``resolution`` is in units of \lambda/
\sigma(\lambda) (where \sigma(\lambda) is dispersion, not FWHM)
* "lambda" - wavelength smoothing. ``resolution`` is in units of \AA
* "lsf" - line-spread function. Use an aribitrary line spread
function, which can be given as a vector the same length as ``wave``
that gives the dispersion (in AA) at each wavelength. Alternatively,
if ``resolution`` is ``None`` then a line-spread function must be
present as an additional ``lsf`` keyword. In this case all
additional keywords as well as the ``wave`` vector will be passed to
this ``lsf`` function.
:param fftsmooth: (optional, default: True)
Switch to use FFTs to do the smoothing, usually resulting in massive
speedups of all algorithms.
:param min_wave_smooth: (optional default: 0)
The minimum wavelength of the input vector to consider when smoothing
the spectrum. If ``None`` then it is determined from the output
wavelength vector and padded by some multiple of the desired
resolution.
:param max_wave_smooth: (optional default: Inf)
The maximum wavelength of the input vector to consider when smoothing
the spectrum. If None then it is determined from the output wavelength
vector and padded by some multiple of the desired resolution.
:param inres: (optional)
If given, this parameter specifies the resolution of the input. This
resolution is subtracted in quadrature from the target output
resolution before the kernel is formed.
In certain cases this can be used to properly switch from resolution
that is constant in velocity to one that is constant in wavelength,
taking into account the wavelength dependence of the input resolution
when defined in terms of lambda. This is possible iff:
* ``fftsmooth`` is False
* ``smoothtype`` is ``"lambda"``
* The optional ``in_vel`` parameter is supplied and True.
The units of ``inres`` should be the same as the units of
``resolution``, except in the case of switching from velocity to
wavelength resolution, in which case the units of ``inres`` should be
in units of lambda/sigma_lambda.
:param in_vel: (optional)
If supplied and True, the ``inres`` parameter is assumed to be in units
of lambda/sigma_lambda. This parameter is ignored **unless** the
``smoothtype`` is ``"lambda"`` and ``fftsmooth`` is False.
:returns flux:
The smoothed spectrum on the `outwave` grid, ndarray.
"""
if smoothtype == 'vel':
linear = False
units = 'km/s'
sigma = resolution
fwhm = sigma * sigma_to_fwhm
Rsigma = ckms / sigma
R = ckms / fwhm
width = Rsigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='vel'"
elif smoothtype == 'R':
linear = False
units = 'km/s'
Rsigma = resolution
sigma = ckms / Rsigma
fwhm = sigma * sigma_to_fwhm
R = ckms / fwhm
width = Rsigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='R'"
# convert inres from Rsigma to sigma (km/s)
try:
kwargs['inres'] = ckms / kwargs['inres']
except(KeyError):
pass
elif smoothtype == 'lambda':
linear = True
units = 'AA'
sigma = resolution
fwhm = sigma * sigma_to_fwhm
Rsigma = None
R = None
width = sigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='lambda'"
elif smoothtype == 'lsf':
linear = True
width = 100
sigma = resolution
else:
raise ValueError("smoothtype {} is not valid".format(smoothtype))
# Mask the input spectrum depending on outwave or the wave_smooth kwargs
mask = mask_wave(wave, width=width, outwave=outwave, linear=linear,
wlo=min_wave_smooth, whi=max_wave_smooth, **kwargs)
w = wave[mask]
s = spec[mask]
if outwave is None:
outwave = wave
# Choose the smoothing method
if smoothtype == 'lsf':
if fftsmooth:
smooth_method = smooth_lsf_fft
if sigma is not None:
# mask the resolution vector
sigma = resolution[mask]
else:
smooth_method = smooth_lsf
if sigma is not None:
# convert to resolution on the output wavelength grid
sigma = np.interp(outwave, wave, resolution)
elif linear:
if fftsmooth:
smooth_method = smooth_wave_fft
else:
smooth_method = smooth_wave
else:
if fftsmooth:
smooth_method = smooth_vel_fft
else:
smooth_method = smooth_vel
# Actually do the smoothing and return
return smooth_method(w, s, outwave, sigma, **kwargs)
def smooth_vel(wave, spec, outwave, sigma, nsigma=10, inres=0, **extras):
"""Smooth a spectrum in velocity space. This is insanely slow, but general
and correct.
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired velocity resolution (km/s), *not* FWHM.
:param nsigma:
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres:
The velocity resolution of the input spectrum (km/s), *not* FWHM.
"""
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq) < 0.0:
raise ValueError("Desired velocity resolution smaller than the value"
"possible for this input spectrum.".format(inres))
# sigma_eff is in units of sigma_lambda / lambda
sigma_eff = np.sqrt(sigma_eff_sq) / ckms
lnwave = np.log(wave)
flux = np.zeros(len(outwave))
for i, w in enumerate(outwave):
x = (np.log(w) - lnwave) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_vel_fft(wavelength, spectrum, outwave, sigma_out, inres=0.0,
**extras):
"""Smooth a spectrum in velocity space, using FFTs. This is fast, but makes
some assumptions about the form of the input spectrum and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum. An assertion error will result
if this is not a regular grid in wavelength.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma_out:
Desired velocity resolution (km/s), *not* FWHM. Scalar or length 1 array.
:param inres:
The velocity resolution of the input spectrum (km/s), dispersion *not*
FWHM.
"""
# The kernel width for the convolution.
sigma = np.sqrt(sigma_out**2 - inres**2)
if sigma <= 0:
return np.interp(outwave, wavelength, spectrum)
# make length of spectrum a power of 2 by resampling
wave, spec = resample_wave(wavelength, spectrum)
# get grid resolution (*not* the resolution of the input spectrum) and make
# sure it's nearly constant. It should be, by design (see resample_wave)
invRgrid = np.diff(np.log(wave))
assert invRgrid.max() / invRgrid.min() < 1.05
dv = ckms * np.median(invRgrid)
# Do the convolution
spec_conv = smooth_fft(dv, spec, sigma)
# interpolate onto output grid
if outwave is not None:
spec_conv = np.interp(outwave, wave, spec_conv)
return spec_conv
def smooth_wave(wave, spec, outwave, sigma, nsigma=10, inres=0, in_vel=False,
**extras):
"""Smooth a spectrum in wavelength space. This is insanely slow, but
general and correct (except for the treatment of the input resolution if it
is velocity)
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units. This can be a
vector of same length as ``wave``, in which case a wavelength dependent
broadening is calculated
:param nsigma: (optional, default=10)
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres: (optional, default: 0.0)
Resolution of the input, in either wavelength units or
lambda/dlambda (c/v). Ignored if <= 0.
:param in_vel: (optional, default: False)
If True, the input spectrum has been smoothed in velocity
space, and ``inres`` is assumed to be in lambda/dlambda.
:returns flux:
The output smoothed flux vector, same length as ``outwave``.
"""
# sigma_eff is in angstroms
if inres <= 0:
sigma_eff_sq = sigma**2
elif in_vel:
# Make an approximate correction for the intrinsic wavelength
# dependent dispersion. This sort of maybe works.
sigma_eff_sq = sigma**2 - (wave / inres)**2
else:
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq < 0):
raise ValueError("Desired wavelength sigma is lower than the value "
"possible for this input spectrum.")
sigma_eff = np.sqrt(sigma_eff_sq)
flux = np.zeros(len(outwave))
for i, w in enumerate(outwave):
x = (wave - w) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_wave_fft(wavelength, spectrum, outwave, sigma_out=1.0,
inres=0.0, **extras):
"""Smooth a spectrum in wavelength space, using FFTs. This is fast, but
makes some assumptions about the input spectrum, and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units.
:param inres:
Resolution of the input, in wavelength units (dispersion not FWHM).
:returns flux:
The output smoothed flux vector, same length as ``outwave``.
"""
# restrict wavelength range (for speed)
# should also make nearest power of 2
wave, spec = resample_wave(wavelength, spectrum, linear=True)
# The kernel width for the convolution.
sigma = np.sqrt(sigma_out**2 - inres**2)
if sigma < 0:
return np.interp(wave, outwave, flux)
# get grid resolution (*not* the resolution of the input spectrum) and make
# sure it's nearly constant. Should be by design (see resample_wave)
Rgrid = np.diff(wave)
assert Rgrid.max() / Rgrid.min() < 1.05
dw = np.median(Rgrid)
# Do the convolution
spec_conv = smooth_fft(dw, spec, sigma)
# interpolate onto output grid
if outwave is not None:
spec_conv = np.interp(outwave, wave, spec_conv)
return spec_conv
def smooth_lsf(wave, spec, outwave, sigma=None, lsf=None, return_kernel=False,
**kwargs):
"""Broaden a spectrum using a wavelength dependent line spread function.
This function is only approximate because it doesn't actually do the
integration over pixels, so for sparsely sampled points you'll have
problems. This function needs to be checked and possibly rewritten.
:param wave:
Input wavelengths. ndarray of shape (nin,)
:param spec:
Input spectrum. ndarray of same shape as ``wave``.
:param outwave:
Output wavelengths, ndarray of shape (nout,)
:param sigma: (optional, default: None)
The dispersion (not FWHM) as a function of wavelength that you want to
apply to the input spectrum. ``None`` or ndarray of same length as
``outwave``. If ``None`` then the wavelength dependent dispersion will be
calculated from the function supplied with the ``lsf`` keyward.
:param lsf:
A function that returns the gaussian dispersion at each wavelength.
This is assumed to be in sigma, not FWHM.
:param kwargs:
Passed to the function supplied in the ``lsf`` keyword.
:param return_kernel: (optional, default: False)
If True, return the kernel used to broaden the spectrum as ndarray of
shape (nout, nin).
:returns newspec:
The broadened spectrum, same length as ``outwave``.
"""
if (lsf is None) and (sigma is None):
return np.interp(outwave, wave, spec)
dw = np.gradient(wave)
if sigma is None:
sigma = lsf(outwave, **kwargs)
kernel = outwave[:, None] - wave[None, :]
kernel = (1 / (sigma * np.sqrt(np.pi * 2))[:, None] *
np.exp(-kernel**2 / (2 * sigma[:, None]**2)) *
dw[None, :])
# should this be axis=0 or axis=1?
kernel = kernel / kernel.sum(axis=1)[:, None]
newspec = np.dot(kernel, spec)
# kernel /= np.trapz(kernel, wave, axis=1)[:, None]
# newspec = np.trapz(kernel * spec[None, :], wave, axis=1)
if return_kernel:
return newspec, kernel
return newspec
def smooth_lsf_fft(wave, spec, outwave, sigma=None, lsf=None, pix_per_sigma=2,
eps=0.25, preserve_all_input_frequencies=False, **kwargs):
"""Smooth a spectrum by a wavelength dependent line-spread function, using
FFTs.
:param wave:
Wavelength vector of the input spectrum.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma: (optional)
Dispersion (in same units as ``wave``) as a function `wave`. ndarray
of same length as ``wave``. If not given, sigma will be computed from
the function provided by the ``lsf`` keyword.
:param lsf: (optional)
Function used to calculate the dispersion as a function of wavelength.
Must be able to take as an argument the ``wave`` vector and any extra
keyword arguments and return the dispersion (in the same units as the
input wavelength vector) at every value of ``wave``. If not provided
then ``sigma`` must be specified.
:param pix_per_sigma: (optional, default: 2)
Number of pixels per sigma of the smoothed spectrum to use in
intermediate interpolation and FFT steps. Increasing this number will
increase the accuracy of the output (to a point), and the run-time, by
preserving all high-frequency information in the input spectrum.
:param preserve_all_input_frequencies: (default: False)
This is a switch to use a very dense sampling of the input spectrum
that preserves all input frequencies. It can significantly increase
the call time for often modest gains...
:param eps: (optional)
Deprecated.
:param **kwargs:
All additional keywords are passed to the function supplied to the
``lsf`` keyword, if present.
:returns flux:
The input spectrum smoothed by the wavelength dependent line-spread
function. Same length as ``outwave``.
"""
# This is sigma vs lambda
if sigma is None:
sigma = lsf(wave, **kwargs)
# Now we need the CDF of 1/sigma, which provides the relationship between x and lambda
# does dw go in numerator or denominator?
# I think numerator but should be tested
dw = np.gradient(wave)
cdf = np.cumsum(dw / sigma)
cdf /= cdf.max()
# Now we create an evenly sampled grid in the x coordinate on the interval [0,1]
# and convert that to lambda using the cdf.
# This should result in some power of two x points, for FFT efficiency
# Furthermore, the number of points should be high enough that the
# resolution is critically sampled. And we want to know what the
# resolution is in this new coordinate.
# There are two possible ways to do this
# 1) Choose a point ~halfway in the spectrum
# half = len(wave) / 2
# Now get the x coordinates of a point eps*sigma redder and bluer
# wave_eps = eps * np.array([-1, 1]) * sigma[halpha]
# x_h_eps = np.interp(wave[half] + wave_eps, wave, cdf)
# Take the differences to get dx and dsigma and ratio to get x per sigma
# x_per_sigma = np.diff(x_h_eps) / (2.0 * eps) #x_h_epsilon - x_h
# 2) Get for all points (slower?):
sigma_per_pixel = (dw / sigma)
x_per_pixel = np.gradient(cdf)
x_per_sigma = np.nanmedian(x_per_pixel / sigma_per_pixel)
N = pix_per_sigma / x_per_sigma
# Alternatively, just use the smallest dx of the input, divided by two for safety
# Assumes the input spectrum is critically sampled.
# And does not actually give x_per_sigma, so that has to be determined anyway
if preserve_all_input_frequencies:
# preserve more information in the input spectrum, even when way higher
# frequency than the resolution of the output. Leads to slightly more
# accurate output, but with a substantial time hit
N = max(N, 1.0 / np.nanmin(x_per_pixel))
# Now find the smallest power of two that divides the interval (0, 1) into
# segments that are smaller than dx
nx = int(2**np.ceil(np.log2(N)))
# now evenly sample in the x coordinate
x = np.linspace(0, 1, nx)
dx = 1.0 / nx
# And now we get the spectrum at the lambda coordinates of the even grid in x
lam = np.interp(x, cdf, wave)
newspec = np.interp(lam, wave, spec)
# And now we convolve.
# If we did not know sigma in terms of x we could estimate it here
# from the resulting sigma(lamda(x)) / dlambda(x):
# dlam = np.gradient(lam)
# sigma_x = np.median(lsf(lam, **kwargs) / dlam)
# But the following just uses the fact that we know x_per_sigma (duh).
spec_conv = smooth_fft(dx, newspec, x_per_sigma)
# and interpolate back to the output wavelength grid.
return np.interp(outwave, lam, spec_conv)
def smooth_fft(dx, spec, sigma):
"""Basic math for FFT convolution with a gaussian kernel.
:param dx:
The wavelength or velocity spacing, same units as sigma
:param sigma:
The width of the gaussian kernel, same units as dx
:param spec:
The spectrum flux vector
"""
# The Fourier coordinate
ss = rfftfreq(len(spec), d=dx)
# Make the fourier space taper; just the analytical fft of a gaussian
taper = np.exp(-2 * (np.pi ** 2) * (sigma ** 2) * (ss ** 2))
ss[0] = 0.01 # hack
# Fourier transform the spectrum
spec_ff = np.fft.rfft(spec)
# Multiply in fourier space
ff_tapered = spec_ff * taper
# Fourier transform back
spec_conv = np.fft.irfft(ff_tapered)
return spec_conv
def mask_wave(wavelength, width=1, wlo=0, whi=np.inf, outwave=None,
nsigma_pad=20.0, linear=False, **extras):
"""Restrict wavelength range (for speed) but include some padding based on
the desired resolution.
"""
# Base wavelength limits
if outwave is not None:
wlim = np.array([outwave.min(), outwave.max()])
else:
wlim = np.squeeze(np.array([wlo, whi]))
# Pad by nsigma * sigma_wave
if linear:
wlim += nsigma_pad * width * np.array([-1, 1])
else:
wlim *= (1 + nsigma_pad / width * np.array([-1, 1]))
mask = (wavelength > wlim[0]) & (wavelength < wlim[1])
return mask
def resample_wave(wavelength, spectrum, linear=False):
"""Resample spectrum, so that the number of elements is the next highest
power of two. This uses np.interp. Note that if the input wavelength grid
did not critically sample the spectrum then there is no gaurantee the
output wavelength grid will.
"""
wmin, wmax = wavelength.min(), wavelength.max()
nw = len(wavelength)
nnew = int(2.0**(np.ceil(np.log2(nw))))
if linear:
Rgrid = np.diff(wavelength) # in same units as ``wavelength``
w = np.linspace(wmin, wmax, nnew)
else:
Rgrid = np.diff(np.log(wavelength)) # actually 1/R
lnlam = np.linspace(np.log(wmin), np.log(wmax), nnew)
w = np.exp(lnlam)
# Make sure the resolution really is nearly constant
#assert Rgrid.max() / Rgrid.min() < 1.05
s = np.interp(w, wavelength, spectrum)
return w, s
def subtract_input_resolution(res_in, res_target, smoothtype_in, smoothtype_target, wave=None):
"""Subtract the input resolution (in quadrature) from a target output
resolution to get the width of the kernel that will convolve the input to
the output. Assumes all convolutions are with gaussians.
"""
if smoothtype_in == "R":
width_in = 1.0 / res_in
else:
width_in = res_in
if smoothtype_target == "R":
width_target = 1.0 / res_target
else:
width_target = res_target
if smoothtype_in == smoothtype_target:
dwidth_sq = width_target**2 - width_in**2
elif (smoothtype_in == "vel") & (smoothype_target == "lambda"):
dwidth_sq = width_target**2 - (wave * width_in / ckms)**2
elif (smoothtype_in == "R") & (smoothype_target == "lambda"):
dwidth_sq = width_target**2 - (wave * width_in)**2
elif (smoothtype_in == "lambda") & (smoothtype_target == "vel"):
dwidth_sq = width_target**2 - (ckms * width_in / wave)**2
elif (smoothtype_in == "lambda") & (smoothtype_target == "R"):
dwidth_sq = width_target**2 - (width_in / wave)**2
elif (smoothtype_in == "R") & (smoothtype_target == "vel"):
print("srsly?")
return None
elif (smoothtype_in == "vel") & (smoothtype_target == "R"):
print("srsly?")
return None
if np.any(dwidth_sq <= 0):
print("Warning: Desired resolution is better than input resolution")
dwidth_sq = np.clip(dwidth_sq, 0, np.inf)
if smoothtype_target == "R":
return 1.0 / np.sqrt(dwidth_sq)
else:
return np.sqrt(dwidth_sq)
return delta_width
| 24,665 | 36.259819 | 95 |
py
|
prospector
|
prospector-master/prospect/likelihood/likelihood.py
|
import time, sys, os
import numpy as np
from scipy.linalg import LinAlgError
__all__ = ["lnlike_spec", "lnlike_phot", "chi_spec", "chi_phot", "write_log"]
def lnlike_spec(spec_mu, obs=None, spec_noise=None, f_outlier_spec=0.0, **vectors):
"""Calculate the likelihood of the spectroscopic data given the
spectroscopic model. Allows for the use of a gaussian process
covariance matrix for multiplicative residuals.
:param spec_mu:
The mean model spectrum, in linear or logarithmic units, including
e.g. calibration and sky emission.
:param obs: (optional)
A dictionary of the observational data, including the keys
*``spectrum`` a numpy array of the observed spectrum, in linear or
logarithmic units (same as ``spec_mu``).
*``unc`` the uncertainty of same length as ``spectrum``
*``mask`` optional boolean array of same length as ``spectrum``
*``wavelength`` if using a GP, the metric that is used in the
kernel generation, of same length as ``spectrum`` and typically
giving the wavelength array.
:param spec_noise: (optional)
A NoiseModel object with the methods `compute` and `lnlikelihood`.
If ``spec_noise`` is supplied, the `wavelength` entry in the obs
dictionary must exist.
:param f_outlier_spec: (optional)
The fraction of spectral pixels which are considered outliers
by the mixture model
:param vectors: (optional)
A dictionary of vectors of same length as ``wavelength`` giving
possible weghting functions for the kernels
:returns lnlikelhood:
The natural logarithm of the likelihood of the data given the mean
model spectrum.
"""
if obs['spectrum'] is None:
return 0.0
mask = obs.get('mask', slice(None))
vectors['mask'] = mask
vectors['wavelength'] = obs['wavelength']
delta = (obs['spectrum'] - spec_mu)[mask]
var = (obs['unc'][mask])**2
if spec_noise is not None:
try:
spec_noise.compute(**vectors)
if (f_outlier_spec == 0.0):
return spec_noise.lnlikelihood(delta)
# disallow (correlated noise model + mixture model)
# and redefine errors
assert spec_noise.Sigma.ndim == 1
var = spec_noise.Sigma
except(LinAlgError):
return np.nan_to_num(-np.inf)
lnp = -0.5*( (delta**2/var) + np.log(2*np.pi*var) )
if (f_outlier_spec == 0.0):
return lnp.sum()
else:
var_bad = var * (vectors["nsigma_outlier_spec"]**2)
lnp_bad = -0.5*( (delta**2/var_bad) + np.log(2*np.pi*var_bad) )
lnp_tot = np.logaddexp(lnp + np.log(1-f_outlier_spec), lnp_bad + np.log(f_outlier_spec))
return lnp_tot.sum()
def lnlike_phot(phot_mu, obs=None, phot_noise=None, f_outlier_phot=0.0, **vectors):
"""Calculate the likelihood of the photometric data given the spectroscopic
model. Allows for the use of a gaussian process covariance matrix.
:param phot_mu:
The mean model sed, in linear flux units (i.e. maggies).
:param obs: (optional)
A dictionary of the observational data, including the keys
*``maggies`` a numpy array of the observed SED, in linear flux
units
*``maggies_unc`` the uncertainty of same length as ``maggies``
*``phot_mask`` optional boolean array of same length as
``maggies``
*``filters`` optional list of sedpy.observate.Filter objects,
necessary if using fixed filter groups with different gp
amplitudes for each group.
If not supplied then the obs dictionary given at initialization will
be used.
:param phot_noise: (optional)
A ``prospect.likelihood.NoiseModel`` object with the methods
``compute()`` and ``lnlikelihood()``. If not supplied a simple chi^2
likelihood will be evaluated.
:param f_outlier_phot: (optional)
The fraction of photometric bands which are considered outliers
by the mixture model
:param vectors:
A dictionary of possibly relevant vectors of same length as maggies
that will be passed to the NoiseModel object for constructing weighted
covariance matrices.
:returns lnlikelhood:
The natural logarithm of the likelihood of the data given the mean
model spectrum.
"""
if obs['maggies'] is None:
return 0.0
mask = obs.get('phot_mask', slice(None))
delta = (obs['maggies'] - phot_mu)[mask]
var = (obs['maggies_unc'][mask])**2
if phot_noise is not None:
filternames = [f.name for f in obs['filters']]
vectors['mask'] = mask
vectors['filternames'] = np.array(filternames)
try:
phot_noise.compute(**vectors)
if (f_outlier_phot == 0.0):
return phot_noise.lnlikelihood(delta)
# disallow (correlated noise model + mixture model)
# and redefine errors
assert phot_noise.Sigma.ndim == 1
var = phot_noise.Sigma
except(LinAlgError):
return np.nan_to_num(-np.inf)
# simple noise model
lnp = -0.5*( (delta**2/var) + np.log(2*np.pi*var) )
if (f_outlier_phot == 0.0):
return lnp.sum()
else:
var_bad = var * (vectors["nsigma_outlier_phot"]**2)
lnp_bad = -0.5*( (delta**2/var_bad) + np.log(2*np.pi*var_bad) )
lnp_tot = np.logaddexp(lnp + np.log(1-f_outlier_phot), lnp_bad + np.log(f_outlier_phot))
return lnp_tot.sum()
def chi_phot(phot_mu, obs, **extras):
"""Return a vector of chi values, for use in non-linear least-squares
algorithms.
:param phot_mu:
Model photometry, same units as the photometry in `obs`.
:param obs:
An observational data dictionary, with the keys ``"maggies"`` and
``"maggies_unc"``. If ``"maggies"`` is None then an empty array is
returned.
:returns chi:
An array of noise weighted residuals, same length as the number of
unmasked phtometric points.
"""
if obs['maggies'] is None:
return np.array([])
mask = obs.get('phot_mask', slice(None))
delta = (obs['maggies'] - phot_mu)[mask]
unc = obs['maggies_unc'][mask]
chi = delta / unc
return chi
def chi_spec(spec_mu, obs, **extras):
"""Return a vector of chi values, for use in non-linear least-squares
algorithms.
:param spec_mu:
Model spectroscopy, same units as the photometry in `obs`.
:param obs:
An observational data dictionary, with the keys ``"spectrum"`` and
``"unc"``. If ``"spectrum"`` is None then an empty array is returned.
Optinally a ``"mask"`` boolean vector may be supplied that will be used
to index the residual vector.
:returns chi:
An array of noise weighted residuals, same length as the number of
unmasked spectroscopic points.
"""
if obs['spectrum'] is None:
return np.array([])
mask = obs.get('mask', slice(None))
delta = (obs['spectrum'] - spec_mu)[mask]
unc = obs['unc'][mask]
chi = delta / unc
return chi
def write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2):
"""Write all sorts of documentary info for debugging.
"""
print(theta)
print('model calc = {0}s, lnlike calc = {1}'.format(d1, d2))
fstring = 'lnp = {0}, lnp_spec = {1}, lnp_phot = {2}'
values = [lnp_spec + lnp_phot + lnp_prior, lnp_spec, lnp_phot]
print(fstring.format(*values))
| 7,824 | 35.737089 | 100 |
py
|
prospector
|
prospector-master/prospect/likelihood/kernels.py
|
import numpy as np
__all__ = ["Kernel", "Uncorrelated", "ExpSquared", "Matern", "PhotoCal"]
class Kernel(object):
def __init__(self, parnames=[], name=''):
"""
:param parnames:
A list of names of the kernel params, used to alias the intrinsic
parameter names. This way different instances of the same kernel
can have different parameter names.
"""
if len(parnames) == 0:
parnames = self.kernel_params
assert len(parnames) == len(self.kernel_params)
self.param_alias = dict(zip(self.kernel_params, parnames))
self.params = {}
self.name = name
def __repr__(self):
return '{}({})'.format(self.__class__, self.param_alias.items())
def update(self, **kwargs):
"""Take a dictionary of parameters, pick out the properly named
parameters according to the alias, and put them in the param state
dictionary.
"""
for k in self.kernel_params:
self.params[k] = kwargs[self.param_alias[k]]
def __call__(self, metric, weights=None, ndim=2, **extras):
"""Return a covariance matrix, given a metric. Optionally, multiply
the output kernel by a weight function to induce non-stationarity.
"""
k = self.construct_kernel(metric)
if ndim != k.ndim:
# Either promote to 2 dimensions or demote to 1.
# The latter should never happen...
k = np.diag(k)
if weights is None:
return k
elif ndim == 2:
Sigma = weights[None, :] * k * weights[:, None]
else:
Sigma = k * weights**2
return Sigma
class Uncorrelated(Kernel):
# Simple uncorrelated noise model
ndim = 1
kernel_params = ['amplitude']
def construct_kernel(self, metric):
s = metric.shape[0]
jitter = self.params['amplitude']**2 * np.ones(s)
if metric.ndim == 2:
return np.diag(jitter)
elif metric.ndim == 1:
return jitter
else:
raise(NotImplementedError)
class ExpSquared(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct an exponential squared covariance matrix.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = a**2 * np.exp(-(metric[:, None] - metric[None, :])**2 / (2 * l**2))
return Sigma
class Matern(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct a Matern kernel covariance matrix, for \nu=3/2.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = np.sqrt(3) * np.abs(metric[:, None] - metric[None, :]) / l
Sigma = a**2 * (1 + Sigma) * np.exp(-Sigma)
return Sigma
class PhotoCal(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'filter_names']
def construct_kernel(self, metric):
""" This adds correlated noise in specified bands of photometry
"""
k = np.array([f in self.params["filter_names"] for f in metric])
K = k[:, None] * k[None, :] # select off-diagonal elements
return K * self.params["amplitude"]**2
| 3,359 | 29.545455 | 83 |
py
|
prospector
|
prospector-master/prospect/likelihood/__init__.py
|
from .likelihood import *
from .noise_model import *
__all__ = ["lnlike_spec", "lnlike_phot", "NoiseModel"]
| 110 | 17.5 | 54 |
py
|
prospector
|
prospector-master/prospect/likelihood/noise_model.py
|
import numpy as np
from scipy.linalg import cho_factor, cho_solve
__all__ = ["NoiseModel"]
class NoiseModel(object):
def __init__(self, metric_name='', mask_name='mask', kernels=[],
weight_by=[]):
assert len(kernels) == len(weight_by)
self.kernels = kernels
self.weight_names = weight_by
self.metric_name = metric_name
self.mask_name = mask_name
def update(self, **params):
[k.update(**params) for k in self.kernels]
def construct_covariance(self, **vectors):
"""Construct a covariance matrix from a metric, a list of kernel
objects, and a list of weight vectors (of same length as the metric)
"""
metric = vectors[self.metric_name]
mask = vectors.get('mask', slice(None))
# 1 = uncorrelated errors, 2 = covariance matrix, >2 undefined
ndmax = np.array([k.ndim for k in self.kernels]).max()
Sigma = np.zeros(ndmax * [metric[mask].shape[0]])
weight_vectors = self.get_weights(**vectors)
for i, (kernel, wght) in enumerate(zip(self.kernels, weight_vectors)):
Sigma += kernel(metric[mask], weights=wght, ndim=ndmax)
return Sigma
def get_weights(self, **vectors):
"""From a dictionary of vectors that give weights, pull the vectors
that correspond to each kernel, as stored in the `weight_names`
attribute. A None vector will result in None weights
"""
mask = vectors.get(self.mask_name, slice(None))
wghts = []
for w in self.weight_names:
if vectors[w] is None:
wghts += [None]
else:
wghts.append(vectors[w][mask])
return wghts
def compute(self, check_finite=False, **vectors):
"""Build and cache the covariance matrix, and if it is 2-d factorize it
and cache that. Also cache ``log_det``.
"""
self.Sigma = self.construct_covariance(**vectors)
if self.Sigma.ndim > 1:
self.factorized_Sigma = cho_factor(self.Sigma, overwrite_a=True,
check_finite=check_finite)
self.log_det = 2 * np.sum(np.log(np.diag(self.factorized_Sigma[0])))
assert np.isfinite(self.log_det)
else:
self.log_det = np.sum(np.log(self.Sigma))
def lnlikelihood(self, residual, check_finite=False, **extras):
"""Compute the ln of the likelihood, using the current factorized
covariance matrix.
:param residual: ndarray, shape (nwave,)
Vector of residuals (y_data - mean_model).
"""
n = len(residual)
assert n == self.Sigma.shape[0]
if self.Sigma.ndim > 1:
first_term = np.dot(residual, cho_solve(self.factorized_Sigma,
residual, check_finite=check_finite))
else:
first_term = np.dot(residual**2, 1.0/self.Sigma)
lnlike = -0.5 * (first_term + self.log_det + n * np.log(2.*np.pi))
return lnlike
| 3,071 | 36.925926 | 80 |
py
|
prospector
|
prospector-master/scripts/prospector_parse.py
|
import sys
from copy import deepcopy
import numpy as np
from prospect.utils import prospect_args
from prospect.fitting import fit_model
from prospect.io import write_results as writer
def build_model(zred=0.0, add_neb=True, **extras):
"""Instantiate and return a ProspectorParams model subclass (SedModel). In
this example the model Is a simple parameteric SFH (delay-tau) with a Kriek
& Conroy attenuation model (fixed slope) and a fixed dust emission
parameters. Nebular emission is optionally added.
:param zred: (optional, default: 0.1)
The redshift of the model
:param add_neb: (optional, default: False)
If True, turn on nebular emission and add relevant parameters to the
model.
:returns mod:
A SedModel instance
"""
# --- Get a basic delay-tau SFH parameter set. ---
from prospect.models import SedModel
from prospect.models.templates import TemplateLibrary
model_params = TemplateLibrary["parametric_sfh"]
# --- Augment the basic model ----
model_params.update(TemplateLibrary["burst_sfh"])
model_params.update(TemplateLibrary["dust_emission"])
if add_neb:
model_params.update(TemplateLibrary["nebular"])
# Switch to Kriek and Conroy 2013 for dust
model_params["dust_type"] = {'N': 1, 'isfree': False,
'init': 4, 'prior': None}
model_params["dust_index"] = {'N': 1, 'isfree': False,
'init': 0.0, 'prior': None}
# --- Set dispersions for emcee ---
model_params["mass"]["init_disp"] = 1e8
model_params["mass"]["disp_floor"] = 1e7
# --- Set initial values ---
model_params["zred"]["init"] = zred
return SedModel(model_params)
def build_sps(zcontinuous=1, **extras):
"""Instantiate and return the Stellar Population Synthesis object. In this
case, with the parameteric SFH model, we want to use the CSPSpecBasis.
:param zcontinuous: (default: 1)
python-fsps parameter controlling how metallicity interpolation of the
SSPs is acheived. A value of `1` is recommended.
:returns sps:
An *sps* object.
"""
from prospect.sources import CSPSpecBasis
sps = CSPSpecBasis(zcontinuous=zcontinuous,
compute_vega_mags=False)
return sps
def build_obs(filterlist=["sdss_r0"], snr=10,
add_noise=True, seed=0, **run_params):
"""Build a mock observation
"""
from sedpy import observate
filters = observate.load_filters(filterlist)
mock = {"wavelength": None, "spectrum": None, "filters": filters}
# Build the mock model
sp = build_sps(**run_params)
mod = build_model(**run_params)
spec, phot, x = mod.mean_model(mod.theta, mock, sps=sp)
# Add to dict with uncertainties
pnoise_sigma = phot / snr
mock['maggies'] = phot.copy()
mock['maggies_unc'] = pnoise_sigma
# And add noise
if add_noise:
if int(seed) > 0:
np.random.seed(int(seed))
pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
mock['maggies'] += pnoise
# Ancillary info
mock['true_spectrum'] = spec.copy()
mock['true_maggies'] = phot.copy()
mock['mock_params'] = deepcopy(mod.params)
mock['mock_snr_phot'] = snr
mock['phot_wave'] = np.array([f.wave_effective for f in mock['filters']])
obs = mock
return obs
def build_noise(**run_params):
"""
"""
return None, None
def build_all(**kwargs):
return (build_obs(**kwargs), build_model(**kwargs),
build_sps(**kwargs), build_noise(**kwargs))
def setup_h5(emcee=False, outfile=None, model=None, obs=None, **extras):
"""If fitting with emcee, open an hdf5 file and write model, data, and meta
parameters to the file. Emcee can then write incrementally to the open
file. If not fitting with emcee, just get a timestampped filename.
:param emcee: (optional, default: False)
Boolean switch indicating whether emcee sampling is to be performed.
:returns hfile:
If `emcee` is True, this is an open :py:class:`h5py.File` handle.
Otherwise, it is the timestamped default hdf5 filename
"""
import os, time
# Try to set up an HDF5 file and write basic info to it
outroot = "{0}_{1}".format(outfile, int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
halt('Target output directory {} does not exist, please make it.'.format(odir))
hfilename = '{}_mcmc.h5'.format(outroot)
if not emcee:
return hfilename
else:
import h5py
hfile = h5py.File(hfilename, "a")
print("Writing to file {}".format(hfilename))
writer.write_h5_header(hfile, run_params, model)
writer.write_obs_to_h5(hfile, obs)
return hfile
if __name__=='__main__':
# - Parser with default arguments -
parser = prospect_args.get_parser()
# - Add custom arguments -
parser.add_argument('--zred', type=float, default=0.1,
help="redshift for the model")
parser.add_argument('--add_neb', action="store_true",
help="If set, add nebular emission in the model")
parser.add_argument('--add_noise', action="store_true",
help="If set, noise up the mock")
parser.add_argument('--snr', type=float, default=20,
help="S/N ratio for the mock photometry")
args = parser.parse_args()
run_params = vars(args)
obs, model, sps, noise = build_all(**run_params)
run_params["param_file"] = __file__
hfile = setup_h5(model=model, obs=obs, **run_params)
output = fit_model(obs, model, sps, noise, **run_params)
writer.write_hdf5(hfile, run_params, model, obs,
output["sampling"][0], output["optimization"][0],
tsample=output["sampling"][1],
toptimize=output["optimization"][1])
try:
hfile.close()
except(AttributeError):
pass
| 6,109 | 32.571429 | 87 |
py
|
prospector
|
prospector-master/scripts/prospector_restart.py
|
#!/usr/local/bin/python
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect.io import read_results as pr
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'restart_from': '', 'niter': 1024}
clargs = model_setup.parse_args(sargv, argdict=argdict)
# ----------
# Result object and Globals
# ----------
result, global_obs, global_model = pr.results_from(clargs["restart_from"])
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
assert is_emcee, "Result file does not have a chain of the proper shape."
# SPS Model instance (with libraries check)
sps = pr.get_sps(result)
run_params = result["run_params"]
run_params.update(clargs)
# Noise model (this should be doable via read_results)
from prospect.models.model_setup import import_module_from_string
param_file = (result['run_params'].get('param_file', ''),
result.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
spec_noise, phot_noise = user_module.load_gp(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, residuals=False,
verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
# Calculate prior probability and exit if not within prior
lnp_prior = model.prior_product(theta)
if not np.isfinite(lnp_prior):
return -np.infty
# Generate mean model
t1 = time.time()
try:
spec, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
d1 = time.time() - t1
# Return chi vectors for least-squares optimization
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': spec, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
t2 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
d2 = time.time() - t2
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec
# -----------------
# MPI pool. This must be done *after* lnprob and
# chi2 are defined since slaves will only see up to
# sys.exit()
# ------------------
try:
from emcee.utils import MPIPool
pool = MPIPool(debug=False, loadbalance=True)
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
except(ImportError, ValueError):
pool = None
print('Not using MPI')
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
# --------------
# Master branch
# --------------
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obsdat = global_obs
postkwargs = {}
# make zeros into tiny numbers
initial_theta = model.rectify_theta(model.initial_theta)
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{}_restart_{}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
halt('Target output directory {} does not exist, please make it.'.format(odir))
try:
import h5py
hfilename = outroot + '_mcmc.h5'
hfile = h5py.File(hfilename, "a")
print("Writing to file {}".format(hfilename))
write_results.write_h5_header(hfile, run_params, model)
write_results.write_obs_to_h5(hfile, obsdat)
except(ImportError):
hfile = None
# -----------------------------------------
# Initial guesses from end of last chain
# -----------------------------------------
initial_positions = result["chain"][:, -1, :]
guesses = None
initial_center = initial_positions.mean(axis=0)
# ---------------------
# Sampling
# -----------------------
if rp['verbose']:
print('emcee sampling...')
tstart = time.time()
out = fitting.restart_emcee_sampler(lnprobfn, initial_positions,
postkwargs=postkwargs,
pool=pool, hdf5=hfile, **rp)
esampler = out
edur = time.time() - tstart
if rp['verbose']:
print('done emcee in {0}s'.format(edur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
print("Writing to {}".format(outroot))
if rp.get("output_pickles", False):
write_results.write_pickles(rp, model, obsdat, esampler, guesses,
outroot=outroot, toptimize=0, tsample=edur,
sampling_initial_center=initial_center)
if hfile is None:
hfile = hfilename
write_results.write_hdf5(hfile, rp, model, obsdat, esampler, guesses,
toptimize=0, tsample=edur,
sampling_initial_center=initial_center)
try:
hfile.close()
except:
pass
halt('Finished')
| 7,080 | 30.471111 | 87 |
py
|
prospector
|
prospector-master/scripts/prospector_dynesty.py
|
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log
from dynesty.dynamicsampler import stopping_function, weight_function, _kld_error
from dynesty.utils import *
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'param_file': ''}
clargs = model_setup.parse_args(sargv, argdict=argdict)
run_params = model_setup.get_run_params(argv=sargv, **clargs)
# --------------
# Globals
# --------------
# GP instances as global
spec_noise, phot_noise = model_setup.load_gp(**run_params)
# Model as global
global_model = model_setup.load_model(**run_params)
# Obs as global
global_obs = model_setup.load_obs(**run_params)
# SPS Model instance as global
sps = model_setup.load_sps(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
lnp_prior = model.prior_product(theta, nested=True)
if np.isfinite(lnp_prior):
# Generate mean model
try:
mu, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': mu, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
lnp_spec = lnlike_spec(mu, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
return lnp_phot + lnp_spec + lnp_prior
else:
return -np.infty
def prior_transform(u, model=None):
if model is None:
model = global_model
return model.prior_transform(u)
pool = None
nprocs = 1
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obs = global_obs
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{0}_{1}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
badout = 'Target output directory {} does not exist, please make it.'.format(odir)
halt(badout)
# -------
# Sample
# -------
if rp['verbose']:
print('dynesty sampling...')
tstart = time.time() # time it
dynestyout = fitting.run_dynesty_sampler(lnprobfn, prior_transform, model.ndim,
pool=pool, queue_size=nprocs,
stop_function=stopping_function,
wt_function=weight_function,
**rp)
ndur = time.time() - tstart
print('done dynesty in {0}s'.format(ndur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
if rp.get("output_pickles", False):
# Write the dynesty result object as a pickle
import pickle
with open(outroot + '_dns.pkl', 'w') as f:
pickle.dump(dynestyout, f)
# Write the model as a pickle
partext = write_results.paramfile_string(**rp)
write_results.write_model_pickle(outroot + '_model', model, powell=None,
paramfile_text=partext)
# Write HDF5
hfile = outroot + '_mcmc.h5'
write_results.write_hdf5(hfile, rp, model, obs, dynestyout,
None, tsample=ndur)
| 5,286 | 29.738372 | 90 |
py
|
prospector
|
prospector-master/scripts/prospector.py
|
#!/usr/local/bin/python
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'param_file': ''}
clargs = model_setup.parse_args(sargv, argdict=argdict)
run_params = model_setup.get_run_params(argv=sargv, **clargs)
# --------------
# Globals
# --------------
# GP instances as global
spec_noise, phot_noise = model_setup.load_gp(**run_params)
# Model as global
global_model = model_setup.load_model(**run_params)
# Obs as global
global_obs = model_setup.load_obs(**run_params)
# SPS Model instance as global
sps = model_setup.load_sps(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, noise=None, sps=sps,
residuals=False, verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
# Calculate prior probability and exit if not within prior
lnp_prior = model.prior_product(theta)
if not np.isfinite(lnp_prior):
return -np.infty
# Generate mean model
t1 = time.time()
try:
spec, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
d1 = time.time() - t1
# Return chi vectors for least-squares optimization
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': spec, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
t2 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
d2 = time.time() - t2
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec
def chisqfn(theta, model, obs):
"""Negative of lnprobfn for minimization, and also handles passing in
keyword arguments which can only be postional arguments when using scipy
minimize.
"""
return -lnprobfn(theta, model=model, obs=obs)
def chivecfn(theta):
"""Return the residuals instead of a posterior probability or negative
chisq, for use with least-squares optimization methods
"""
return lnprobfn(theta, residuals=True)
# -----------------
# MPI pool. This must be done *after* lnprob and
# chi2 are defined since slaves will only see up to
# sys.exit()
# ------------------
try:
from emcee.utils import MPIPool
pool = MPIPool(debug=False, loadbalance=True)
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
except(ImportError, ValueError):
pool = None
print('Not using MPI')
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
# --------------
# Master branch
# --------------
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obsdat = global_obs
chi2args = [None, None]
postkwargs = {}
# make zeros into tiny numbers
initial_theta = model.rectify_theta(model.initial_theta)
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{0}_{1}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
halt('Target output directory {} does not exist, please make it.'.format(odir))
try:
import h5py
hfilename = outroot + '_mcmc.h5'
hfile = h5py.File(hfilename, "a")
print("Writing to file {}".format(hfilename))
write_results.write_h5_header(hfile, run_params, model)
write_results.write_obs_to_h5(hfile, obsdat)
except(ImportError):
hfile = None
# -----------------------------------------
# Initial guesses using minimization
# -----------------------------------------
if rp['verbose']:
print('Starting minimization...')
if not np.isfinite(model.prior_product(model.initial_theta.copy())):
halt("Halting: initial parameter position has zero prior probability.")
nmin = rp.get('nmin', 1)
if pool is not None:
nmin = max([nmin, pool.size])
if bool(rp.get('do_powell', False)):
from prospect.fitting.fitting import run_minimize
powell_opt = {'ftol': rp['ftol'], 'xtol': 1e-6, 'maxfev': rp['maxfev']}
guesses, pdur, best = run_minimize(obsdat, model, sps, noise=None, lnprobfn=lnprobfn,
min_method='powell', min_opts={"options": powell_opt},
nmin=nmin, pool=pool)
initial_center = fitting.reinitialize(guesses[best].x, model,
edge_trunc=rp.get('edge_trunc', 0.1))
initial_prob = -guesses[best]['fun']
if rp['verbose']:
print('done Powell in {0}s'.format(pdur))
print('best Powell guess:{0}'.format(initial_center))
elif bool(rp.get('do_levenberg', False)):
from prospect.fitting.fitting import run_minimize
lm_opt = {"xtol": 5e-16, "ftol": 5e-16}
guesses, pdur, best = run_minimize(obsdat, model, sps, noise=None, lnprobfn=lnprobfn,
min_method='lm', min_opts=lm_opt,
nmin=nmin, pool=pool)
initial_center = fitting.reinitialize(guesses[best].x, model,
edge_trunc=rp.get('edge_trunc', 0.1))
initial_prob = None
if rp['verbose']:
print('done L-M in {0}s'.format(pdur))
print('best L-M guess:{0}'.format(initial_center))
else:
if rp['verbose']:
print('No minimization requested.')
guesses = None
pdur = 0.0
initial_center = initial_theta.copy()
initial_prob = None
# ---------------------
# Sampling
# -----------------------
if rp['verbose']:
print('emcee sampling...')
tstart = time.time()
out = fitting.run_emcee_sampler(lnprobfn, initial_center, model,
postkwargs=postkwargs, prob0=initial_prob,
pool=pool, hdf5=hfile, **rp)
esampler, burn_p0, burn_prob0 = out
edur = time.time() - tstart
if rp['verbose']:
print('done emcee in {0}s'.format(edur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
print("Writing to {}".format(outroot))
if rp.get("output_pickles", False):
write_results.write_pickles(rp, model, obsdat, esampler, guesses,
outroot=outroot, toptimize=pdur, tsample=edur,
sampling_initial_center=initial_center,
post_burnin_center=burn_p0,
post_burnin_prob=burn_prob0)
if hfile is None:
hfile = hfilename
write_results.write_hdf5(hfile, rp, model, obsdat, esampler, guesses,
toptimize=pdur, tsample=edur,
sampling_initial_center=initial_center,
post_burnin_center=burn_p0,
post_burnin_prob=burn_prob0)
try:
hfile.close()
except:
pass
halt('Finished')
| 9,248 | 32.632727 | 97 |
py
|
prospector
|
prospector-master/demo/demo_mock_params.py
|
import time, sys
from copy import deepcopy
import numpy as np
from sedpy.observate import load_filters
from prospect import prospect_args
from prospect.fitting import fit_model
from prospect.io import write_results as writer
# Here we are going to put together some filter names
# All these filters are available in sedpy. If you want to use other filters,
# add their transmission profiles to sedpy/sedpy/data/filters/ with appropriate
# names (and format). See sedpy documentation
galex = ['galex_FUV', 'galex_NUV']
sdss = ['sdss_{0}0'.format(b) for b in 'ugriz']
twomass = ['twomass_{}'.format(b) for b in ['J', 'H', 'Ks']]
spitzer = ['spitzer_irac_ch'+n for n in '1234']
# --------------
# RUN_PARAMS
# When running as a script with argparsing, these are ignored. Kept here for backwards compatibility.
# --------------
run_params = {'verbose': True,
'debug': False,
'outfile': 'output/demo_mock',
'output_pickles': False,
# Optimization parameters
'do_powell': False,
'ftol': 0.5e-5, 'maxfev': 5000,
'do_levenberg': True,
'nmin': 10,
# emcee Fitter parameters
'nwalkers': 64,
'nburn': [32, 32, 64],
'niter': 256,
'interval': 0.25,
'initial_disp': 0.1,
# dynesty Fitter parameters
'nested_bound': 'multi', # bounding method
'nested_sample': 'unif', # sampling method
'nested_nlive_init': 100,
'nested_nlive_batch': 100,
'nested_bootstrap': 0,
'nested_dlogz_init': 0.05,
'nested_weight_kwargs': {"pfrac": 1.0},
'nested_stop_kwargs': {"post_thresh": 0.05},
# Mock data parameters
'snr': 20.0,
'add_noise': False,
'filterset': galex + sdss + twomass,
# Input mock model parameters
'mass': 1e10,
'logzsol': -0.5,
'tage': 12.,
'tau': 3.,
'dust2': 0.3,
'zred': 0.1,
'add_neb': False,
# SPS parameters
'zcontinuous': 1,
}
# --------------
# Model Definition
# --------------
def build_model(zred=0.0, add_neb=True, **extras):
"""Instantiate and return a ProspectorParams model subclass.
:param zred: (optional, default: 0.0)
The redshift of the model
:param add_neb: (optional, default: False)
If True, turn on nebular emission and add relevant parameters to the
model.
"""
from prospect.models.templates import TemplateLibrary
from prospect.models import priors, sedmodel
# --- Get a basic delay-tau SFH parameter set. ---
# This has 5 free parameters:
# "mass", "logzsol", "dust2", "tage", "tau"
# And two fixed parameters
# "zred"=0.1, "sfh"=4
# See the python-FSPS documentation for details about most of these
# parameters. Also, look at `TemplateLibrary.describe("parameteric")` to
# view the parameters, their initial values, and the priors in detail
model_params = TemplateLibrary["parametric_sfh"]
# --- Adjust the basic model ----
# Add burst parameters (fixed to zero be default)
model_params.update(TemplateLibrary["burst_sfh"])
# Add dust emission parameters (fixed)
model_params.update(TemplateLibrary["dust_emission"])
# Add nebular emission parameters and turn nebular emission on
if add_neb:
model_params.update(TemplateLibrary["nebular"])
# --- Set dispersions for emcee ---
model_params["mass"]["init_disp"] = 1e8
model_params["mass"]["disp_floor"] = 1e7
# --- Complexify dust attenuation ---
# Switch to Kriek and Conroy 2013
model_params["dust_type"] = {'N': 1, 'isfree': False,
'init': 4, 'prior': None}
# Slope of the attenuation curve, expressed as the index of the power-law
# that modifies the base Kriek & Conroy/Calzetti shape.
# I.e. a value of zero is basically calzetti with a 2175AA bump
model_params["dust_index"] = {'N': 1, 'isfree': False,
'init': 0.0, 'prior': None}
# --- Set initial values ---
model_params["zred"]["init"] = zred
return sedmodel.SedModel(model_params)
# ------------------
# Observational Data
# ------------------
def build_obs(snr=10.0, filterset=["sdss_g0", "sdss_r0"],
add_noise=True, **kwargs):
"""Make a mock dataset. Feel free to add more complicated kwargs, and put
other things in the run_params dictionary to control how the mock is
generated.
:param snr:
The S/N of the phock photometry. This can also be a vector of same
lngth as the number of filters.
:param filterset:
A list of `sedpy` filter names. Mock photometry will be generated
for these filters.
:param add_noise: (optional, boolean, default: True)
If True, add a realization of the noise to the mock spectrum
"""
from prospect.utils.obsutils import fix_obs
# We'll put the mock data in this dictionary, just as we would for real
# data. But we need to know which bands (and wavelengths if doing
# spectroscopy) in which to generate mock data.
mock = {}
mock['wavelength'] = None # No spectrum
mock['spectrum'] = None # No spectrum
mock['filters'] = load_filters(filterset)
# We need the models to make a mock
sps = build_sps(**kwargs)
mod = build_model(**kwargs)
# Now we get the mock params from the kwargs dict
params = {}
for p in mod.params.keys():
if p in kwargs:
params[p] = np.atleast_1d(kwargs[p])
# And build the mock
mod.params.update(params)
spec, phot, _ = mod.mean_model(mod.theta, mock, sps=sps)
# Now store some ancillary, helpful info;
# this information is not required to run a fit.
mock['true_spectrum'] = spec.copy()
mock['true_maggies'] = phot.copy()
mock['mock_params'] = deepcopy(mod.params)
mock['mock_snr'] = snr
mock["phot_wave"] = np.array([f.wave_effective for f in mock["filters"]])
# And store the photometry, adding noise if desired
pnoise_sigma = phot / snr
if add_noise:
pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
mock['maggies'] = phot + pnoise
else:
mock['maggies'] = phot.copy()
mock['maggies_unc'] = pnoise_sigma
mock['phot_mask'] = np.ones(len(phot), dtype=bool)
# This ensures all required keys are present
mock = fix_obs(mock)
return mock
# --------------
# SPS Object
# --------------
def build_sps(zcontinuous=1, **extras):
"""Instantiate and return the Stellar Population Synthesis object.
:param zcontinuous: (default: 1)
python-fsps parameter controlling how metallicity interpolation of the
SSPs is acheived. A value of `1` is recommended.
* 0: use discrete indices (controlled by parameter "zmet")
* 1: linearly interpolate in log Z/Z_\sun to the target metallicity
(the parameter "logzsol".)
* 2: convolve with a metallicity distribution function at each age.
The MDF is controlled by the parameter "pmetals"
"""
from prospect.sources import CSPSpecBasis
sps = CSPSpecBasis(zcontinuous=zcontinuous,
compute_vega_mags=False)
return sps
# -----------------
# Noise Model
# ------------------
def build_noise(**extras):
return None, None
# -----------
# Everything
# ------------
def build_all(**kwargs):
return (build_obs(**kwargs), build_model(**kwargs),
build_sps(**kwargs), build_noise(**kwargs))
if __name__ == '__main__':
# - Parser with default arguments -
parser = prospect_args.get_parser()
# - Add custom arguments -
parser.add_argument('--zred', type=float, default=0.1,
help="Redshift for the model (and mock).")
parser.add_argument('--add_neb', action="store_true",
help="If set, add nebular emission in the model (and mock).")
parser.add_argument('--add_noise', action="store_true",
help="If set, noise up the mock.")
parser.add_argument('--snr', type=float, default=20,
help="S/N ratio for the mock photometry.")
parser.add_argument('--filterset', type=str, nargs="*",
default=galex + sdss + twomass,
help="Names of filters through which to produce photometry.")
parser.add_argument('--tage', type=float, default=12.,
help="Age of the mock, Gyr.")
parser.add_argument('--tau', type=float, default=3.,
help="SFH timescale parameter of the mock, Gyr.")
parser.add_argument('--dust2', type=float, default=0.3,
help="Dust attenuation V band optical depth")
parser.add_argument('--logzsol', type=float, default=-0.5,
help="Metallicity of the mock; log(Z/Z_sun)")
parser.add_argument('--mass', type=float, default=1e10,
help="Stellar mass of the mock; solar masses formed")
args = parser.parse_args()
run_params = vars(args)
obs, model, sps, noise = build_all(**run_params)
run_params["sps_libraries"] = sps.ssp.libraries
run_params["param_file"] = __file__
print(model)
if args.debug:
sys.exit()
#hfile = setup_h5(model=model, obs=obs, **run_params)
hfile = "{0}_{1}_mcmc.h5".format(args.outfile, int(time.time()))
output = fit_model(obs, model, sps, noise, **run_params)
writer.write_hdf5(hfile, run_params, model, obs,
output["sampling"][0], output["optimization"][0],
tsample=output["sampling"][1],
toptimize=output["optimization"][1],
sps=sps)
try:
hfile.close()
except(AttributeError):
pass
| 10,099 | 35.071429 | 102 |
py
|
prospector
|
prospector-master/demo/demo_params.py
|
import time, sys
import numpy as np
from sedpy.observate import load_filters
from prospect import prospect_args
from prospect.fitting import fit_model
from prospect.io import write_results as writer
# --------------
# RUN_PARAMS
# When running as a script with argparsing, these are ignored. Kept here for backwards compatibility.
# --------------
run_params = {'verbose': True,
'debug': False,
'outfile': 'demo_galphot',
'output_pickles': False,
# Optimization parameters
'do_powell': False,
'ftol': 0.5e-5, 'maxfev': 5000,
'do_levenberg': True,
'nmin': 10,
# emcee fitting parameters
'nwalkers': 128,
'nburn': [16, 32, 64],
'niter': 512,
'interval': 0.25,
'initial_disp': 0.1,
# dynesty Fitter parameters
'nested_bound': 'multi', # bounding method
'nested_sample': 'unif', # sampling method
'nested_nlive_init': 100,
'nested_nlive_batch': 100,
'nested_bootstrap': 0,
'nested_dlogz_init': 0.05,
'nested_weight_kwargs': {"pfrac": 1.0},
'nested_stop_kwargs': {"post_thresh": 0.1},
# Obs data parameters
'objid': 0,
'phottable': 'demo_photometry.dat',
'luminosity_distance': 1e-5, # in Mpc
# Model parameters
'add_neb': False,
'add_duste': False,
# SPS parameters
'zcontinuous': 1,
}
# --------------
# Model Definition
# --------------
def build_model(object_redshift=0.0, fixed_metallicity=None, add_duste=False,
add_neb=False, luminosity_distance=0.0, **extras):
"""Construct a model. This method defines a number of parameter
specification dictionaries and uses them to initialize a
`models.sedmodel.SedModel` object.
:param object_redshift:
If given, given the model redshift to this value.
:param add_dust: (optional, default: False)
Switch to add (fixed) parameters relevant for dust emission.
:param add_neb: (optional, default: False)
Switch to add (fixed) parameters relevant for nebular emission, and
turn nebular emission on.
:param luminosity_distance: (optional)
If present, add a `"lumdist"` parameter to the model, and set it's
value (in Mpc) to this. This allows one to decouple redshift from
distance, and fit, e.g., absolute magnitudes (by setting
luminosity_distance to 1e-5 (10pc))
"""
from prospect.models.templates import TemplateLibrary
from prospect.models import priors, sedmodel
# --- Get a basic delay-tau SFH parameter set. ---
# This has 5 free parameters:
# "mass", "logzsol", "dust2", "tage", "tau"
# And two fixed parameters
# "zred"=0.1, "sfh"=4
# See the python-FSPS documentation for details about most of these
# parameters. Also, look at `TemplateLibrary.describe("parametric_sfh")` to
# view the parameters, their initial values, and the priors in detail.
model_params = TemplateLibrary["parametric_sfh"]
# Add lumdist parameter. If this is not added then the distance is
# controlled by the "zred" parameter and a WMAP9 cosmology.
if luminosity_distance > 0:
model_params["lumdist"] = {"N": 1, "isfree": False,
"init": luminosity_distance, "units":"Mpc"}
# Adjust model initial values (only important for optimization or emcee)
model_params["dust2"]["init"] = 0.1
model_params["logzsol"]["init"] = -0.3
model_params["tage"]["init"] = 13.
model_params["mass"]["init"] = 1e8
# If we are going to be using emcee, it is useful to provide an
# initial scale for the cloud of walkers (the default is 0.1)
# For dynesty these can be skipped
model_params["mass"]["init_disp"] = 1e7
model_params["tau"]["init_disp"] = 3.0
model_params["tage"]["init_disp"] = 5.0
model_params["tage"]["disp_floor"] = 2.0
model_params["dust2"]["disp_floor"] = 0.1
# adjust priors
model_params["dust2"]["prior"] = priors.TopHat(mini=0.0, maxi=2.0)
model_params["tau"]["prior"] = priors.LogUniform(mini=1e-1, maxi=10)
model_params["mass"]["prior"] = priors.LogUniform(mini=1e6, maxi=1e10)
# Change the model parameter specifications based on some keyword arguments
if fixed_metallicity is not None:
# make it a fixed parameter
model_params["logzsol"]["isfree"] = False
#And use value supplied by fixed_metallicity keyword
model_params["logzsol"]['init'] = fixed_metallicity
if object_redshift != 0.0:
# make sure zred is fixed
model_params["zred"]['isfree'] = False
# And set the value to the object_redshift keyword
model_params["zred"]['init'] = object_redshift
if add_duste:
# Add dust emission (with fixed dust SED parameters)
model_params.update(TemplateLibrary["dust_emission"])
if add_neb:
# Add nebular emission (with fixed parameters)
model_params.update(TemplateLibrary["nebular"])
# Now instantiate the model using this new dictionary of parameter specifications
model = sedmodel.SedModel(model_params)
return model
# --------------
# Observational Data
# --------------
# Here we are going to put together some filter names
galex = ['galex_FUV', 'galex_NUV']
spitzer = ['spitzer_irac_ch'+n for n in '1234']
bessell = ['bessell_'+n for n in 'UBVRI']
sdss = ['sdss_{0}0'.format(b) for b in 'ugriz']
# The first filter set is Johnson/Cousins, the second is SDSS. We will use a
# flag in the photometry table to tell us which set to use for each object
# (some were not in the SDSS footprint, and therefore have Johnson/Cousins
# photometry)
#
# All these filters are available in sedpy. If you want to use other filters,
# add their transmission profiles to sedpy/sedpy/data/filters/ with appropriate
# names (and format)
filtersets = (galex + bessell + spitzer,
galex + sdss + spitzer)
def build_obs(objid=0, phottable='demo_photometry.dat',
luminosity_distance=None, **kwargs):
"""Load photometry from an ascii file. Assumes the following columns:
`objid`, `filterset`, [`mag0`,....,`magN`] where N >= 11. The User should
modify this function (including adding keyword arguments) to read in their
particular data format and put it in the required dictionary.
:param objid:
The object id for the row of the photomotery file to use. Integer.
Requires that there be an `objid` column in the ascii file.
:param phottable:
Name (and path) of the ascii file containing the photometry.
:param luminosity_distance: (optional)
The Johnson 2013 data are given as AB absolute magnitudes. They can be
turned into apparent magnitudes by supplying a luminosity distance.
:returns obs:
Dictionary of observational data.
"""
# Writes your code here to read data. Can use FITS, h5py, astropy.table,
# sqlite, whatever.
# e.g.:
# import astropy.io.fits as pyfits
# catalog = pyfits.getdata(phottable)
from prospect.utils.obsutils import fix_obs
# Here we will read in an ascii catalog of magnitudes as a numpy structured
# array
with open(phottable, 'r') as f:
# drop the comment hash
header = f.readline().split()[1:]
catalog = np.genfromtxt(phottable, comments='#',
dtype=np.dtype([(n, np.float) for n in header]))
# Find the right row
ind = catalog['objid'] == float(objid)
# Here we are dynamically choosing which filters to use based on the object
# and a flag in the catalog. Feel free to make this logic more (or less)
# complicated.
filternames = filtersets[int(catalog[ind]['filterset'])]
# And here we loop over the magnitude columns
mags = [catalog[ind]['mag{}'.format(i)] for i in range(len(filternames))]
mags = np.array(mags)
# And since these are absolute mags, we can shift to any distance.
if luminosity_distance is not None:
dm = 25 + 5 * np.log10(luminosity_distance)
mags += dm
# Build output dictionary.
obs = {}
# This is a list of sedpy filter objects. See the
# sedpy.observate.load_filters command for more details on its syntax.
obs['filters'] = load_filters(filternames)
# This is a list of maggies, converted from mags. It should have the same
# order as `filters` above.
obs['maggies'] = np.squeeze(10**(-mags/2.5))
# HACK. You should use real flux uncertainties
obs['maggies_unc'] = obs['maggies'] * 0.07
# Here we mask out any NaNs or infs
obs['phot_mask'] = np.isfinite(np.squeeze(mags))
# We have no spectrum.
obs['wavelength'] = None
obs['spectrum'] = None
# Add unessential bonus info. This will be stored in output
#obs['dmod'] = catalog[ind]['dmod']
obs['objid'] = objid
# This ensures all required keys are present and adds some extra useful info
obs = fix_obs(obs)
return obs
# --------------
# SPS Object
# --------------
def build_sps(zcontinuous=1, compute_vega_mags=False, **extras):
from prospect.sources import CSPSpecBasis
sps = CSPSpecBasis(zcontinuous=zcontinuous,
compute_vega_mags=compute_vega_mags)
return sps
# -----------------
# Noise Model
# ------------------
def build_noise(**extras):
return None, None
# -----------
# Everything
# ------------
def build_all(**kwargs):
return (build_obs(**kwargs), build_model(**kwargs),
build_sps(**kwargs), build_noise(**kwargs))
if __name__ == '__main__':
# - Parser with default arguments -
parser = prospect_args.get_parser()
# - Add custom arguments -
parser.add_argument('--object_redshift', type=float, default=0.0,
help=("Redshift for the model"))
parser.add_argument('--add_neb', action="store_true",
help="If set, add nebular emission in the model (and mock).")
parser.add_argument('--add_duste', action="store_true",
help="If set, add dust emission to the model.")
parser.add_argument('--luminosity_distance', type=float, default=1e-5,
help=("Luminosity distance in Mpc. Defaults to 10pc "
"(for case of absolute mags)"))
parser.add_argument('--phottable', type=str, default="demo_photometry.dat",
help="Names of table from which to get photometry.")
parser.add_argument('--objid', type=int, default=0,
help="zero-index row number in the table to fit.")
args = parser.parse_args()
run_params = vars(args)
obs, model, sps, noise = build_all(**run_params)
run_params["sps_libraries"] = sps.ssp.libraries
run_params["param_file"] = __file__
print(model)
if args.debug:
sys.exit()
#hfile = setup_h5(model=model, obs=obs, **run_params)
hfile = "{0}_{1}_mcmc.h5".format(args.outfile, int(time.time()))
output = fit_model(obs, model, sps, noise, **run_params)
writer.write_hdf5(hfile, run_params, model, obs,
output["sampling"][0], output["optimization"][0],
tsample=output["sampling"][1],
toptimize=output["optimization"][1],
sps=sps)
try:
hfile.close()
except(AttributeError):
pass
| 11,700 | 36.503205 | 102 |
py
|
prospector
|
prospector-master/demo/timing.py
|
from copy import deepcopy
import timeit, time, sys
import numpy as np
import fsps
from prospect.sources import FastStepBasis, CSPBasis
sps = fsps.StellarPopulation(zcontinuous=1)
libs = [l.upper() for l in sps.libraries]
def get_model(sps, **kwargs):
try:
# For SSPBasis derived objects
sps.update(**kwargs)
except(AttributeError):
# For StellarPopulation and CSPBasis objects
for k, v in kwargs.iteritems():
try:
sps.params[k] = v
except:
pass
out = sps.get_spectrum(tage=sps.params['tage'])
return out
def call_duration(sps, ntry, **params):
# build cached SSPs without getting charged for the time.
junk = [get_model(sps, logzsol=[z], **params) for z in np.linspace(-1, 0, 12)]
#print('done_setup')
ts = time.time()
for i in range(ntry):
_ = get_model(sps, logzsol=[np.random.uniform(-0.8, -0.2)], **params)
dur = time.time() - ts
return dur / ntry
def make_agebins(nbin=5, minage=7.0, **extras):
tuniv = 13.7e9
allages = np.linspace(minage, np.log10(tuniv), nbin)
allages = np.insert(allages, 0, 0.)
agebins = np.array([allages[:-1], allages[1:]]).T
return agebins
if __name__ == "__main__":
step_params = {'agebins':[[]],
'mass': [],
'tage': np.array([13.7]),
'pmetals': np.array([-99])
}
csp_params = {'tage': np.array([10.0]),
'sfh': np.array([4.0]),
'mass': np.array([1.0]),
'pmetals': np.array([-99])
}
w = ['WITHOUT', 'WITH']
ntry = 100
zlist = [1, 2]
nlist = [False, True]
print("Using {} isochrones and {} spectra.\nAsking for single ages.".format(*libs))
# FSPS
string = "StellarPopulation takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
params = deepcopy(csp_params)
for zcont in zlist:
print("\n")
for neb in nlist:
sps = fsps.StellarPopulation(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
print(string.format(dur, w[int(neb)], zcont))
# CSP
string = "CSPBasis takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
params = deepcopy(csp_params)
for zcont in zlist:
print("\n")
for neb in nlist:
sps = CSPBasis(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
print(string.format(dur, w[int(neb)], zcont))
# Step SFH
nbin = 10
params = deepcopy(step_params)
params['agebins'] = make_agebins(nbin)
params['mass'] = np.ones(nbin) * 1.0
string = "FastStepBasis ({} bins) takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
for zcont in zlist:
print("\n")
for neb in nlist:
sps = FastStepBasis(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
#print(sps.params, sps.ssp.params['add_neb_emission'])
print(string.format(nbin, dur, w[int(neb)], zcont))
# sys.exit()
# Now time calls for random Z (which always causes dirtiness=1)
#setup = "from __main__ import test; import numpy as np"
#call = "out=get_model(sps, logzsol=[np.random.uniform(-1, 0)], **params)"
#dur = timeit.timeit(call, setup=setup, number=100)
| 3,522 | 29.634783 | 102 |
py
|
prospector
|
prospector-master/demo/mpi_hello_world.py
|
# This is a short script to test the MPI implementation with the pattern used
# by Prospector. However, try/except blocks are minimized to enable more
# useful error messages, and the code is simple enough to *only* test the MPI
# implementation.
# Invoke with:
# mpirun -np 4 python mpi_hello_world.py
import numpy as np
import sys
from mpi4py import MPI
def speak(i):
print("I am core {} with task {}".format(pool.rank, i))
return i, pool.rank
from emcee.utils import MPIPool
pool = MPIPool(debug=False, loadbalance=True)
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
if __name__ == "__main__":
result = pool.map(speak, np.arange(10))
print(result)
pool.close()
| 760 | 24.366667 | 77 |
py
|
prospector
|
prospector-master/misc/fdot.py
|
# script to calculate the fractional change in SSP flux as a function
# of time.
import matplotlib.pyplot as pl
import numpy as np
import fsps
sps = fsps.StellarPopulation(zcontinuous=0)
# compile all metallicities
for i, z in enumerate(sps.zlegend):
w, s = sps.get_spectrum(zmet=i+1)
spec, mass, lbol = sps.all_ssp_spec(peraa=True)
wmin, wmax, amin, amax, zmet = 1.5e3, 2e4, 0.01, 10, 4
ages = 10**(sps.ssp_ages-9)
waves = sps.wavelengths
gwave = (waves < wmax) & (waves > wmin)
gage = (ages < amax) & (ages > amin)
fdot = np.diff(spec, axis=1)
fbar = (spec[:,:-1,:] + spec[:,1:,:])/2.0
pl.imshow(np.squeeze((fdot/fbar)[np.ix_(gwave, gage, [zmet])]),
interpolation='nearest', aspect='auto')
| 717 | 22.16129 | 69 |
py
|
prospector
|
prospector-master/misc/timings_pyfsps.py
|
#compare a lookup table of spectra at ages and metallicities to
#calls to fsps.sps.get_spectrum() for different metallicities
import time, os, subprocess, re, sys
import numpy as np
#import matplotlib.pyplot as pl
import fsps
from prospect import sources as sps_basis
from prospect.models import sedmodel
def run_command(cmd):
"""
Open a child process, and return its exit status and stdout.
"""
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = [s for s in child.stdout]
w = child.wait()
return os.WEXITSTATUS(w), out
# Check to make sure that the required environment variable is present.
try:
ev = os.environ["SPS_HOME"]
except KeyError:
raise ImportError("You need to have the SPS_HOME environment variable")
# Check the SVN revision number.
cmd = ["svnversion", ev]
stat, out = run_command(" ".join(cmd))
fsps_vers = int(re.match("^([0-9])+", out[0]).group(0))
sps = fsps.StellarPopulation(zcontinuous=True)
print('FSPS version = {}'.format(fsps_vers))
print('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths)))
print('single age')
def spec_from_fsps(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage'])
#print(spec.shape)
return time.time()-t0
def mags_from_fsps(z, t, s):
t0 = time.time()
sps.params['zred']=t
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0)
#print(spec.shape)
return time.time()-t0
def spec_from_ztinterp(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
sps.params['imf3'] = s
spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True)
#print(spec.shape)
return time.time()-t0
if sys.argv[1] == 'mags':
from_fsps = mags_from_fsps
print('timing get_mags')
print('nbands = {}'.format(len(sps.get_mags(tage=1.0))))
elif sys.argv[1] == 'spec':
from_fsps = spec_from_fsps
print('timing get_spectrum')
elif sys.argv[1] == 'ztinterp':
from_fsps = spec_from_ztinterp
print('timing get_spectrum')
elif sys.argv[1] == 'sedpy':
from sedpy import observate
nbands = len(sps.get_mags(tage=1.0))
fnames = nbands * ['sdss_r0']
filters = observate.load_filters(fnames)
def mags_from_sedpy(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True,
tage = sps.params['tage'])
mags = observate.getSED(wave, spec, filters)
return time.time()-t0
from_fsps = mags_from_sedpy
sps.params['add_neb_emission'] = False
sps.params['smooth_velocity'] = True
sps.params['sfh'] = 0
ntry = 30
zz = np.random.uniform(-1,0,ntry)
tt = np.random.uniform(0.1,4,ntry)
ss = np.random.uniform(1,2.5,ntry)
#make sure all z's already compiled
_ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]]
all_dur = []
print('no neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('no neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
sps.params['add_neb_emission'] = True
print('neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
| 4,274 | 29.319149 | 85 |
py
|
prospector
|
prospector-master/misc/ztest.py
|
import numpy as np
import matplotlib.pyplot as pl
import fsps
ztype = [False, 0, 1, 2]
zdlabel= ['ind', 'cont', 'MDF']
sps = [fsps.StellarPopulation(zcontinuous=z) for z in ztype]
tage = 1.0
p = 3.0
zmet = 3
sfig, sax = pl.subplots()
rfig, rax = pl.subplots()
sax.set_xlim(1e3, 2e4)
rax.set_xlim(1e3, 2e4)
rax.set_xscale('log')
sax.set_xscale('log')
spec = []
for zt, sp in zip(ztype, sps):
sp.params['zmet'] = zmet
sp.params['pmetals'] = p
sp.params['logzsol'] = np.log10(sp.zlegend[sp.params['zmet']]/0.019)
w, s = sp.get_spectrum(tage = tage, peraa=True)
spec.append(s)
lbl = '{}_logz{}_p{}'.format(zdlabel[zt], sp.params['logzsol'], sp.params['pmetals'])
sax.plot(w, s, label=lbl)
rax.plot(w, s/spec[0], label=lbl)
rax.legend()
sax.legend()
rfig.show()
sfig.show()
| 809 | 22.823529 | 89 |
py
|
prospector
|
prospector-master/misc/timing_smoothspec.py
|
import numpy as np
import matplotlib.pyplot as pl
import fsps, time, sys
from sedpy.observate import vac2air, lsf_broaden
from scipy import sparse
def lsf_broaden_sparse(wave, spec, lsf=None, outwave=None,
return_kernel=False, fwhm=False, dyr = 1e-4, **kwargs):
"""Broaden a spectrum using a wavelength dependent line spread
function. This function is only approximate because it doesn't
actually do the integration over pixels, so for sparsely sampled
points you'll have problems.
:param wave:
input wavelengths
:param lsf:
A function that returns the gaussian dispersion at each
wavelength. This is assumed to be in simga unless ``fwhm`` is
``True``
:param outwave:
Optional output wavelengths
:param kwargs:
Passed to lsf()
:returns newspec:
The broadened spectrum
"""
if outwave is None:
outwave = wave
if lsf is None:
return np.interp(outwave, wave, spec)
dw = np.gradient(wave)
sigma = lsf(wave, **kwargs)
if fwhm:
sigma = sigma/2.35
kernel = outwave[:,None] - wave[None,:]
kernel = (1/(sigma * np.sqrt(np.pi * 2))[None, :] *
np.exp(-kernel**2/(2*sigma[None,:]**2)) *
dw[None,:])
kernel[kernel < kernel.max()*dyr] = 0
skernel = sparse.csr_matrix(kernel)
skernel = skernel/skernel.sum(axis=1)
newspec = skernel.dot(spec)
if return_kernel:
return newspec, kernel
return newspec
def lsf(wave, sigma_smooth=0, **extras):
return np.zeros(len(wave)) + sigma_smooth
def smoothspec(inwave, spec, lsf, dosparse=False,
min_wave_smooth=1e3, max_wave_smooth=1e4,
**kwargs):
smask = (inwave > min_wave_smooth) & (inwave < max_wave_smooth)
ospec = spec.copy()
if dosparse:
ospec[smask] = lsf_broaden_sparse(inwave[smask],
spec[smask], lsf, **kwargs)
else:
ospec[smask] = lsf_broaden(inwave[smask], spec[smask], lsf, **kwargs)
return ospec
if __name__ == "__main__":
sps = fsps.StellarPopulation()
sps.params['smooth_velocity'] = False
wave, spec = sps.get_spectrum(tage=1.0, peraa=True)
ns = 10
sigma = np.random.uniform(1, 3, size=ns)
fsps_dur, bsfh_dur, bsfh_sparse_dur = [], [], []
for s in sigma:
t = time.time()
ospec_fsps = sps.smoothspec(wave, spec,s)
fsps_dur.append(time.time()-t)
t = time.time()
ospec_bsfh = smoothspec(wave, spec, lsf, sigma_smooth=s)
bsfh_dur.append(time.time()-t)
t = time.time()
ospec_bsfh_sparse = smoothspec(wave, spec, lsf, sigma_smooth=s, dosparse=True)
bsfh_sparse_dur.append(time.time()-t)
bsfh_dur = np.array(bsfh_dur)
bsfh_sparse_dur = np.array(bsfh_sparse_dur)
fsps_dur = np.array(fsps_dur)
pl.figure()
pl.plot(wave, spec)
pl.plot(wave, ospec_bsfh)
pl.plot(wave, ospec_fsps)
pl.plot(wave, ospec_bsfh_sparse)
pl.xlim(1e3,1e4)
| 3,077 | 30.731959 | 86 |
py
|
prospector
|
prospector-master/misc/test_compsp.py
|
# Test the analytic full SFR integrals implemented in SSPBasis against the
# COMPSP implementations of same.
import sys, os, time
import numpy as np
import matplotlib.pyplot as pl
import fsps
from prospect.sources import CompositeSFH
sfhtype = {1:'tau', 4: 'delaytau', 5: 'simha'}
compute_vega_mags = False
zcontinuous = 1
sps = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags,
zcontinuous=zcontinuous)
mysps = CompositeSFH(sfh_type='tau', interp_type='logarithmic', flux_interp='linear',
compute_vega_mags=compute_vega_mags, zcontinuous=zcontinuous)
mysps.configure()
sspages = np.insert(mysps.logage, 0, 0)
mysps.mint_log=-3
wlo = 1e3
whi = 1.2e4
def main():
tage = 1.4
# vary sf_trunc
sf_trunc = tage * np.linspace(0.90, 1.02, 9)
figlist = test_taumodel_sft(values=sf_trunc, tau=1.0,
sf_slope=10.0, tage=tage, sfh=4)
figlist[1].axes[0].set_xlim(5, 9)
figlist[0].savefig('figures/test_sft_spec.pdf')
figlist[1].savefig('figures/test_sft_wght.pdf')
# vary sf_trunc, simha SFH
sf_trunc = tage * np.linspace(0.90, 1.02, 9)
figlist = test_taumodel_sft(values=sf_trunc, tau=4.2,
sf_slope=10.0, tage=tage, sfh=5)
figlist[1].axes[0].set_xlim(5, 10.5)
figlist[0].savefig('figures/test_sft_simha_spec.pdf')
figlist[1].savefig('figures/test_sft_simha_wght.pdf')
#sys.exit()
# vary sf_slope
figlist = test_taumodel_sfslope()
figlist[1].axes[0].set_xlim(5, 10.5)
figlist[0].savefig('figures/test_sfslope_spec.pdf')
figlist[1].savefig('figures/test_sfslope_wght.pdf')
# vary tage
figlist = test_taumodel_tage(sfh=1)
figlist[0].savefig('figures/test_tage_spec.pdf')
figlist[1].savefig('figures/test_tage_wght.pdf')
# vary tau
figlist = test_taumodel_tau(sfh=1)
figlist[0].savefig('figures/test_tau_spec.pdf')
figlist[1].savefig('figures/test_tau_wght.pdf')
#pl.show()
def test_mint_convergence():
"""Test convergence of most recent bin.
"""
sfh_params = {'tage': 1e9, 'tau':14e9}
# set up an array of minimum values, and get the weights for each minimum value
mint = np.linspace(-4, 2, 100)
w = np.zeros([len(mint), len(mysps.logage)+1])
for i, m in enumerate(mint):
mysps.update(**sfh_params)
mysps.mint_log = m
mysps.configure()
w[i,:] = mysps.all_ssp_weights
# Plot summed weight for zeroth and 1st SSP
pl.figure()
zero = (w[:,0] + w[:,1])
pl.plot(mint, zero, '-o')
pl.yscale('log')
pl.show()
# Plot fractional weight error (relative to smallest tmin) as a function of tmin
pl.figure()
pl.plot(np.log10(mint), 1 - w[:,0]/w[0,0], '-o')
pl.plot(np.log10(mint), 1 - w[:,1]/w[0,1], '-o')
pl.show()
def test_taumodel_dust(values=np.linspace(0.0, 4.0, 9),
tage=3.0, tau=1.0, sfh=1):
pname = r'$\tau_V$'
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
#wfig, wax = pl.subplots()
for dust2 in values:
sps.params['tau'] = tau
sps.params['tage'] = tage
sps.params['dust2'] = dust2
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9, 'dust2': dust2}
w, spec = sps.get_spectrum(tage=tage, peraa=True)
mw, myspec, mstar = mysps.get_galaxy_spectrum(**sfh_params)
rax.plot(mw, myspec / spec, label=r'{}={:4.2f}'.format(pname, dust2))
dax.plot(mw, spec - myspec, label=r'{}={:4.2f}'.format(pname, dust2))
#wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, dust2))
rax.set_xlim(1e3, 1e7)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(1e3, 1e7)
dax.set_ylabel('FSPS - pro')
[ax.set_xscale('log') for ax in [rax, dax]]
[ax.text(0.1, 0.8, r'$\tau_{{SF}}={}, tage={}$'.format(tau, tage), transform=ax.transAxes)
for ax in [rax, dax]]
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax]]
#wax.set_yscale('log')
#wax.set_xlabel('log t$_{lookback}$')
#wax.set_ylabel('weight')
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax]]
return [sfig]
def test_taumodel_tau(values=10**np.linspace(-1, 1, 9),
tage=10.0, sfh=1):
"""Test (delayed-) tau models
"""
pname = r'$\tau$'
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
wfig, wax = pl.subplots()
for tau in values:
sps.params['tau'] = tau
sps.params['tage'] = tage
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9}
w, spec = sps.get_spectrum(tage=tage, peraa=True)
mw, myspec, mstar = mysps.get_galaxy_spectrum(**sfh_params)
rax.plot(mw, myspec / spec, label=r'{}={:4.2f}'.format(pname, tau))
dax.plot(mw, spec - myspec, label=r'{}={:4.2f}'.format(pname, tau))
wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, tau))
rax.set_xlim(wlo, whi)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(wlo, whi)
dax.set_ylabel('FSPS - pro')
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax, wax]]
wax.set_yscale('log')
wax.set_xlabel('log t$_{lookback}$')
wax.set_ylabel('weight')
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax, wax]]
return [sfig, wfig]
def test_taumodel_tage(values=10**np.linspace(np.log10(0.11), 1, 9),
tau=1.0, sf_slope=0.0, sfh=1):
"""Test (delayed-) tau models
"""
pname = 'tage'
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
wfig, wax = pl.subplots()
for tage in values:
sps.params['tau'] = tau
sps.params['tage'] = tage
#sps.params['sf_slope'] = sf_slope
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9}
w, spec = sps.get_spectrum(tage=tage, peraa=True)
mw, myspec, mstar = mysps.get_galaxy_spectrum(**sfh_params)
rax.plot(mw, myspec / spec, label=r'{}={:4.2f}'.format(pname, tage))
dax.plot(mw, spec - myspec, label=r'{}={:4.2f}'.format(pname, tage))
wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, tage))
rax.set_xlim(wlo, whi)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(wlo, whi)
dax.set_ylabel('FSPS - pro')
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax, wax]]
wax.set_yscale('log')
wax.set_xlabel('log t$_{lookback}$')
wax.set_ylabel('weight')
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax, wax]]
return [sfig, wfig]
def test_taumodel_sft(values=11 - 10**np.linspace(np.log10(0.11), 1, 9),
tau=1.0, sf_slope=0, tage=11.0, sfh=1):
"""Test (delayed-) tau models with a truncation
"""
pname = 'sf_trunc'
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
wfig, wax = pl.subplots()
for sf_trunc in values:
sps.params['tau'] = tau
sps.params['tage'] = tage
sps.params['sf_trunc'] = sf_trunc
sps.params['sf_slope'] = sf_slope
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9, 'sf_trunc': sf_trunc*1e9,
'sf_slope': sf_slope / 1e9}
w, spec = sps.get_spectrum(tage=tage, peraa=True)
mw, myspec, mstar = mysps.get_galaxy_spectrum(**sfh_params)
rax.plot(mw, myspec / spec, label=r'{}={:4.2f}'.format(pname, sf_trunc))
dax.plot(mw, spec - myspec, label=r'{}={:4.2f}'.format(pname, sf_trunc))
wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, sf_trunc))
#print(mysps.all_ssp_weights.sum())
if tage > sf_trunc:
wax.axvline(np.log10((tage - sf_trunc) * 1e9), linestyle=':', color='k')
rax.set_xlim(wlo, whi)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(wlo, whi)
dax.set_ylabel('FSPS - pro')
[ax.text(0.1, 0.8, '$tau_{{SF}}={}, tage={}$\n sf_slope={:3.1f}'.format(tau, tage, sf_slope), transform=ax.transAxes)
for ax in [rax, dax, wax]]
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax, wax]]
wax.set_yscale('log')
wax.set_xlabel('log t$_{lookback}$')
wax.set_ylabel('weight')
logttrunc = np.log10((tage - values) * 1e9)
wax.set_xlim(logttrunc.min() - 0.5, logttrunc.max() + 0.5)
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax, wax]]
return [sfig, wfig]
def test_taumodel_sfslope(values=np.linspace(-10, 10, 9),
tau=10.0, sf_trunc=10.0, tage=11.0, sfh=5):
"""Test (delayed-) tau models
"""
pname = 'sf_slope'
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
wfig, wax = pl.subplots()
for sf_slope in values:
sps.params['tau'] = tau
sps.params['tage'] = tage
sps.params['sf_trunc'] = sf_trunc
sps.params['sf_slope'] = sf_slope
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9, 'sf_trunc': sf_trunc*1e9,
'sf_slope': sf_slope / 1e9}
w, spec = sps.get_spectrum(tage=tage, peraa=True)
mw, myspec, mstar = mysps.get_galaxy_spectrum(**sfh_params)
rax.plot(mw, myspec / spec, label=r'{}={:4.2f}'.format(pname, sf_slope))
dax.plot(mw, spec - myspec, label=r'{}={:4.2f}'.format(pname, sf_slope))
wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, sf_slope))
wax.axvline(np.log10((tage - sf_trunc) * 1e9), linestyle=':', color='k')
#print(mysps.all_ssp_weights.sum())
rax.set_xlim(wlo, whi)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(wlo, whi)
dax.set_ylabel('FSPS - pro')
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax, wax]]
[ax.text(0.1, 0.8, '$tau_{{SF}}={}, tage={}$\n sf_trunc={}'.format(tau, tage, sf_trunc), transform=ax.transAxes)
for ax in [rax, dax, wax]]
wax.set_yscale('log')
wax.set_xlabel('log t$_{lookback}$')
wax.set_ylabel('weight')
logttrunc = np.log10((tage - values) * 1e9)
wax.set_xlim(logttrunc.min() - 0.5, logttrunc.max() + 0.5)
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax, wax]]
return [sfig, wfig]
if __name__ == "__main__":
main()
| 10,770 | 37.605735 | 121 |
py
|
prospector
|
prospector-master/misc/test_stepsfh.py
|
import matplotlib.pyplot as pl
import numpy as np
from bsfh.sources import ssp_basis
sps = ssp_basis.StepSFHBasis(interp_type='logarithmic')
nbin = 7
ages = np.linspace(7, 10, nbin+1)
params = {'agebins': np.array([ages[:-1], ages[1:]]).T,
'mass': 10**ages[1:] - 10**ages[:-1] }
sps.update(**params)
w = sps.ssp_weights
pl.plot(sps.logage, w)
agebins = sps.params['agebins']
masses = sps.params['mass']
#w = np.zeros(len(self.logage))
for (t1, t2), mass in zip(agebins, masses):
print(t1, t2, mass)
pl.axvline(t1, linestyle=':')
pl.plot(sps.logage, sps.bin_weights(t1, t2)[1:])
spec, phot, x = sps.get_spectrum(**params)
| 651 | 22.285714 | 55 |
py
|
prospector
|
prospector-master/misc/diagnostics.py
|
#Take the results from MCMC fitting of clusters
# and make diagnostic plots, or derive predictions for
# observables, etc..
import numpy as np
import matplotlib.pyplot as pl
import triangle
import pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def diagnostic_plots(sample_file, sps, model_file=None,
powell_file=None, inmod=None,
showpars=None,
nspec=5, thin=10, start=0, outname=None):
"""
Plots a number of diagnostics. These include:
spectrum -
the observed spectrum, the spectra produced from a given number of samples of the
posterior parameter space, the spectrum produced from marginalized means of each
parameter, the spectrum at the initial position from Powell minimization, and the
applied calibration model.
spectrum_blue -
same as above but for the blue region of the spectrum
sed -
as for spectrum, but f_nu at the effective wavelength of the
filters is shown instead.
stars -
just the stellar dust model for samples of the posterior.
spectrum_residuals -
plots of spectrum residuals for a given number of samples of the posterior
sed_residuals -
broadband photometry residuals, in units of f_nu
x_vs_step -
the evolution of the walkers in each parameter as a function of iteration
lnp_vs_step -
the evolution of the walkers in likelihood
triangle -
a corner plot of parameter covariances
"""
#read results and set up model
if outname is None:
outname = sample_file#''.join(sample_file.split('.')[:-1])
sample_results, pr, model = read_pickles(sample_file, model_file=model_file,
powell_file=powell_file, inmod=inmod)
for k, v in model.params.iteritems():
try:
sps.params[k] = v
except KeyError:
pass
## Plot spectra and SEDs
##
#rindex = model_obs(sample_results, sps, photflag=0, outname=outname, nsample=nspec,
# wlo=3400, whi =10e3, start=start)
#_ = model_obs(sample_results, sps, photflag=0, outname=outname, rindex=rindex,
# wlo=3600, whi=4450, extraname='_blue', start=start)
#_ = model_obs(sample_results, sps, photflag=1, outname=outname, nsample=15,
# wlo=2500, whi=8.5e3, start=start)
#stellar_pop(sample_results, sps, outname=outname, nsample=nspec,
# wlo=3500, whi=9.5e3, start=start,
# alpha = 0.5, color = 'green')
## Plot spectral and SED residuals
##
#residuals(sample_results, sps, photflag=0, outname=outname, nsample=nspec,
# linewidth=0.5, alpha=0.3, color='blue', marker=None, start=start, rindex=rindex)
#residuals(sample_results, sps, photflag=1, outname = outname, nsample = 15,
# linewidth=0.5, alpha=0.3, color='blue', marker='o', start=start, rindex=rindex)
## Plot parameters versus step
##
param_evol(sample_results, outname=outname, showpars=showpars)
## Plot lnprob vs step (with a zoom-in)
##
pl.figure()
pl.clf()
nwalk = sample_results['lnprobability'].shape[0]
for j in range(nwalk):
pl.plot(sample_results['lnprobability'][j,:])
pl.ylabel('lnP')
pl.xlabel('step #')
pl.savefig('{0}.lnP_vs_step.png'.format(outname))
pl.close()
#yl = sample_results['lnprobability'].max() + np.array([-3.0 * sample_results['lnprobability'][:,-1].std(), 10])
#pl.ylim(yl[0], yl[1])
#pl.savefig('{0}.lnP_vs_step_zoom.png'.format(outname))
#pl.close()
## Triangle plot
##
subtriangle(sample_results, outname=outname,
showpars=showpars,
start=start, thin=thin)
return outname, sample_results, model, pr
def model_comp(theta, model, sps, photflag=0, inlog=True):
"""
Generate and return various components of the total model for a
given set of parameters
"""
obs, _, _ = obsdict(model.obs, photflag=photflag)
mask = obs['mask']
mu = model.mean_model(theta, sps=sps)[photflag][mask]
spec = obs['spectrum'][mask]
wave = obs['wavelength'][mask]
if photflag == 0:
cal = model.calibration()[mask]
try:
#model.gp.sigma = obs['unc'][mask]/mu
s = model.params['gp_jitter']
a = model.params['gp_amplitude']
l = model.params['gp_length']
model.gp.factor(s, a, l, check_finite = False, force=True)
if inlog:
mu = np.log(mu)
delta = model.gp.predict(spec - mu - cal)
else:
delta = model.gp.predict(spec - mu*cal)
except:
delta = 0
else:
mask = np.ones(len(obs['wavelength']), dtype= bool)
cal = np.ones(len(obs['wavelength']))
delta = np.zeros(len(obs['wavelength']))
return mu, cal, delta, mask, wave
def model_obs(sample_results, sps, photflag=0, outname=None,
start=0, rindex =None, nsample=10,
wlo=3500, whi=9e3, extraname=''):
"""
Plot the observed spectrum and overlay samples of the model
posterior, including different components of that model.
"""
title = ['Spectrum', 'SED (photometry)']
start = np.min([start, sample_results['chain'].shape[1]])
flatchain = sample_results['chain'][:,start:,:]
flatchain = flatchain.reshape(flatchain.shape[0] * flatchain.shape[1],
flatchain.shape[2])
# draw samples
if rindex is None:
rindex = np.random.uniform(0, flatchain.shape[0], nsample).astype( int )
else:
nsample = len(rindex)
# set up the observation dictionary for spectrum or SED
obs, outn, marker = obsdict(sample_results, photflag)
# set up plot window and plot data
pl.figure()
pl.axhline( 0, linestyle = ':', color ='black')
pl.plot(obs['wavelength'], obs['spectrum'],
marker=marker, linewidth=0.5,
color='blue', label='observed')
# plot the minimization result
theta = sample_results['initial_center']
ypred, res, cal, mask, spop = model_components(theta, sample_results, obs, sps, photflag=photflag)
pl.plot(obs['wavelength'][mask], ypred + res,
marker=marker, alpha=0.5, linewidth=0.3,
color='cyan', label='minimization result')
# loop over drawn samples and plot the model components
label = ['full model', 'calib.', 'GP']
for i in range(nsample):
theta = flatchain[rindex[i],:]
ypred, res, cal, mask, spop = model_components(theta, sample_results, obs, sps, photflag=photflag)
pl.plot(obs['wavelength'][mask], np.zeros(mask.sum()) + res,
linewidth=0.5, alpha=0.5, color='red', label=label[2])
pl.plot(obs['wavelength'], cal * sample_results['model'].params.get('linescale', 1.0),
linewidth=0.5, color='magenta', label=label[1])
pl.plot(obs['wavelength'][mask], ypred + res,
marker=marker, alpha=0.5 , color='green', label=label[0])
label = 3 * [None]
pl.legend(loc=0, fontsize='small')
pl.xlim(wlo, whi)
pl.xlabel(r'$\AA$')
pl.ylabel('Rate')
pl.title(title[photflag])
if outname is not None:
pl.savefig('{0}.{1}{2}.png'.format(outname, outn, extraname), dpi=300)
pl.close()
return rindex
def stellar_pop(sample_results, sps, outname=None, normalize_by=None,
start=0, rindex=None, nsample=10,
wlo=3500, whi=9e3, extraname='', **kwargs):
"""
Plot samples of the posterior for just the stellar population and
dust model.
"""
start = np.min([start, sample_results['chain'].shape[1]])
flatchain = sample_results['chain'][:,start:,:]
flatchain = flatchain.reshape(flatchain.shape[0] * flatchain.shape[1],
flatchain.shape[2])
# draw samples
if rindex is None:
rindex = np.random.uniform(0, flatchain.shape[0], nsample).astype( int )
# set up the observation dictionary for spectrum or SED
obs, outn, marker = obsdict(sample_results, 0)
# set up plot window
pl.figure()
pl.axhline( 0, linestyle=':', color='black')
# loop over drawn samples and plot the model components
label = ['Stars & Dust']
xl = ''
for i in range(nsample):
theta = flatchain[rindex[i],:]
ypred, res, cal, mask, spop = model_components(theta, sample_results, obs, sps, photflag=0)
if normalize_by is not None:
spop /= spop[normalize_by]
xl = '/C'
pl.plot(obs['wavelength'], spop,
label = label[0], **kwargs)
label = 3 * [None]
pl.legend(loc = 0, fontsize = 'small')
pl.xlim(wlo, whi)
pl.xlabel(r'$\AA$')
pl.ylabel(r'L$_\lambda {0}$ (L$_\odot/\AA$)'.format(xl))
if outname is not None:
pl.savefig('{0}.{1}{2}.png'.format(outname, 'stars', extraname), dpi=300)
pl.close()
def residuals(sample_results, sps, photflag=0, outname=None,
nsample=5, rindex=None, start=0,
wlo=3600, whi=7500, **kwargs):
"""
Plot residuals of the observations from samples of the model
posterior. This is done in terms of relative, uncertainty
normalized, and absolute residuals. Extra keywords are passed to
plot().
"""
start = np.min([start, sample_results['chain'].shape[1]])
flatchain = sample_results['chain'][:,start:,:]
flatchain = flatchain.reshape(flatchain.shape[0] * flatchain.shape[1],
flatchain.shape[2])
# draw samples
if rindex is None:
rindex = np.random.uniform(0, flatchain.shape[0], nsample).astype( int )
nsample = len(rindex)
# set up the observation dictionary for spectrum or SED
obs, outn, marker = obsdict(sample_results, photflag)
# set up plot window
fig, axes = pl.subplots(3,1)
# draw guidelines
[a.axhline( int(i==0), linestyle=':', color='black') for i,a in enumerate(axes)]
axes[0].set_ylabel('obs/model')
axes[0].set_ylim(0.5,1.5)
axes[0].set_xticklabels([])
axes[1].set_ylabel(r'(obs-model)/$\sigma$')
axes[1].set_ylim(-10,10)
axes[1].set_xticklabels([])
axes[2].set_ylabel(r'(obs-model)')
axes[2].set_xlabel(r'$\AA$')
# loop over the drawn samples
for i in range(nsample):
theta = flatchain[rindex[i],:]
ypred, res, cal, mask, spop = model_components(theta, sample_results, obs, sps, photflag=photflag)
wave, ospec, mod = obs['wavelength'][mask], obs['spectrum'][mask], (ypred + res)
axes[0].plot(wave, ospec / mod, **kwargs)
axes[1].plot(wave, (ospec - mod) / obs['unc'][mask], **kwargs)
axes[2].plot(wave, (ospec - mod), **kwargs)
if photflag == 0:
[a.set_xlim(wlo,whi) for a in axes]
fig.subplots_adjust(hspace =0)
if outname is not None:
fig.savefig('{0}.{1}_residuals.png'.format(outname, outn), dpi=300)
pl.close()
def obsdict(inobs, photflag):
"""
Return a dictionary of observational data, generated depending on
whether you're matching photometry or spectroscopy.
"""
obs = inobs.copy()
if photflag == 0:
outn = 'spectrum'
marker = None
elif photflag == 1:
outn = 'sed'
marker = 'o'
obs['wavelength'] = np.array([f.wave_effective for f in obs['filters']])
obs['spectrum'] = 10**(0-0.4 * obs['mags'])
obs['unc'] = obs['mags_unc'] * obs['spectrum']
obs['mask'] = obs['mags_unc'] > 0
return obs, outn, marker
def param_evol(sample_results, outname=None, showpars=None, start=0):
"""
Plot the evolution of each parameter value with iteration #, for
each chain.
"""
chain = sample_results['chain'][:,start:,:]
nwalk = chain.shape[0]
parnames = np.array(theta_labels(sample_results['model'].theta_desc))
#restrict to desired parameters
if showpars is not None:
ind_show = np.array([p in showpars for p in parnames], dtype= bool)
parnames = parnames[ind_show]
chain = chain[:,:,ind_show]
#set up plot windows
ndim = len(parnames)
nx = int(np.floor(np.sqrt(ndim)))
ny = int(np.ceil(ndim*1.0/nx))
sz = np.array([nx,ny])
factor = 3.0 # size of one side of one panel
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05*factor # w/hspace size
plotdim = factor * sz + factor *(sz-1)* whspace
dim = lbdim + plotdim + trdim
fig, axes = pl.subplots(nx, ny, figsize = (dim[1], dim[0]))
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb[1], bottom=lb[0], right=tr[1], top=tr[0],
wspace=whspace, hspace=whspace)
#sequentially plot the chains in each parameter
for i in range(ndim):
ax = axes.flatten()[i]
for j in range(nwalk):
ax.plot(chain[j,:,i])
ax.set_title(parnames[i])
if outname is not None:
fig.savefig('{0}.x_vs_step.png'.format(outname))
pl.close()
def theta_labels(desc):
"""
Using the theta_desc parameter dictionary, return a list of the model
parameter names that has the same aorder as the sampling chain array
"""
label, index = [], []
for p in desc.keys():
nt = desc[p]['N']
name = p
if p is 'amplitudes':
name = 'A'
if nt is 1:
label.append(name)
index.append(desc[p]['i0'])
else:
for i in xrange(nt):
label.append(name+'{0}'.format(i+1))
index.append(desc[p]['i0']+i)
return [l for (i,l) in sorted(zip(index,label))]
def sample_photometry(sample_results, sps, filterlist,
start=0, wthin=16, tthin=10):
chain, model = sample_results['chain'], sample_results['model']
for k, v in model.sps_fixed_params.iteritems():
sps.params[k] = v
model.filters = filterlist
nwalkers, nt, ndim = chain.shape
wit = range(0,nwalkers,wthin) #walkers to use
tit = range(start, nt, thin) #time steps to use
phot = np.zeros( len(wit), len(tit), len(filterlist)) #build storage
for i in wit:
for j in tit:
s, p, m = model.model(chain[i,j,:], sps=sps)
phot[i,j,:] = p
#mass[i,j] = m
return phot, wit, tit
## All this because scipy changed
# the name of one class, which shouldn't even be a class.
renametable = {
'Result': 'OptimizeResult',
}
def mapname(name):
if name in renametable:
return renametable[name]
return name
def mapped_load_global(self):
module = mapname(self.readline()[:-1])
name = mapname(self.readline()[:-1])
klass = self.find_class(module, name)
self.append(klass)
def load(file):
unpickler = pickle.Unpickler(file)
unpickler.dispatch[pickle.GLOBAL] = mapped_load_global
return unpickler.load()
| 15,450 | 35.875895 | 116 |
py
|
prospector
|
prospector-master/misc/test_sft.py
|
import sys, os, time
import numpy as np
import matplotlib.pyplot as pl
import fsps
from prospect.sources import CompositeSFH
from sedpy import observate
sfhtype = {1:'tau', 4: 'delaytau', 5: 'simha'}
# build FSPS and Prospector sps objects
zcontinuous = 1
sps = fsps.StellarPopulation(zcontinuous=zcontinuous)
tres = np.round(len(sps.ssp_ages) / 94.)
mysps = CompositeSFH(sfh_type='tau', interp_type='logarithmic', mint_log=5.45,
flux_interp='linear', zcontinuous=zcontinuous)
mysps.configure()
# Save the Prospector SSP time axis
sspages = np.insert(mysps.logage, 0, mysps.mint_log)
# Set up some parameters that cause trouble in FSPS
pname = 'tage' # the parameter that will vary
badsimha = {'logtau': [1.34, 0.62], 'delt_trunc': [0.91, 0.98],
'sf_tanslope': [1.24, -1.54], }
i, sfh = 1, 5
ages = np.linspace(1.4, 1.7, 20)
tau = 10**badsimha['logtau'][i]
delt_trunc = badsimha['delt_trunc'][i]
sf_slope = np.tan(badsimha['sf_tanslope'][i])
# filters to project onto
filters = observate.load_filters(['galex_FUV', 'sdss_r0'])
# set up output for spectra
spec = np.zeros([len(ages), len(sps.wavelengths)])
myspec = np.zeros([len(ages), len(sps.wavelengths)])
# Set the SFH type for both sps objects
sps.params['sfh'] = sfh
mysps.sfh_type = sfhtype[sfh]
mysps.configure()
# nstantiate figures and axes
sfig, saxes = pl.subplots(2, 1, figsize=(11, 8.5))
rax, dax = saxes
wfig, wax = pl.subplots()
# Loop over the varying parameter
for i, tage in enumerate(ages):
# Set FSPS parameters, and get ans store spectrum
sf_trunc = tage * delt_trunc
sps.params['tau'] = tau
sps.params['tage'] = tage
sps.params['sf_slope'] = sf_slope
sps.params['sf_trunc'] = sf_trunc
w, s = sps.get_spectrum(tage=tage, peraa=True)
spec[i, :] = s
# Set up Pro parameters, with unit conversions, get spectrum, and store it.
sfh_params = {'tage': tage*1e9, 'tau': tau*1e9,
'sf_slope': -sf_slope / 1e9, 'sf_trunc': sf_trunc*1e9}
mw, mys, mstar = mysps.get_galaxy_spectrum(**sfh_params)
myspec[i, :] = mys
# Do some plotting for each age
wax.plot(sspages, mysps.all_ssp_weights, '-o', label=r'{}={:4.2f}'.format(pname, tage))
rax.plot(mw, mys / s, label=r'{}={:4.2f}'.format(pname, tage))
dax.plot(mw, s - mys, label=r'{}={:4.2f}'.format(pname, tage))
# Get synthetic photometry for both sps objects
mags = observate.getSED(sps.wavelengths, spec, filterlist=filters)
mymags = observate.getSED(sps.wavelengths, myspec, filterlist=filters)
# Plot mags vs age
iband = 0
fig, ax = pl.subplots()
ax.plot(ages, mags[:, iband], '-o', label='FSPS, tres={}'.format(int(tres)))
ax.plot(ages, mymags[:, iband], '-o', label='Pro')
ax.set_xlabel('tage (Gyr)')
ax.set_ylabel(r'$M_{{AB}}$ ({})'.format(filters[iband].name))
ax.text(0.1, 0.85, r'$\tau_{{SF}}={:4.2f}, \, \Delta t_{{trunc}}={:4.2f}$'.format(tau, delt_trunc),
transform=ax.transAxes)
ax.legend(loc=0)
fig.savefig('figures/sft_compare.pdf')
# Prettify axes
rax.set_xlim(1e3, 2e4)
rax.set_ylabel('pro / FSPS')
dax.set_xlim(1e3, 2e4)
dax.set_ylabel('FSPS - pro')
[ax.legend(loc=0, prop={'size': 10}) for ax in [rax, dax, wax]]
wax.set_yscale('log')
wax.set_xlabel('log t$_{lookback}$')
wax.set_ylabel('weight')
[ax.set_title('SFH={} ({} model)'.format(sfh, sfhtype[sfh]))
for ax in [rax, wax]]
rax.set_ylim(0, 10)
pl.show()
| 3,383 | 32.84 | 99 |
py
|
prospector
|
prospector-master/doc/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# prospector documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 8 16:26:26 2018.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
#'sphinx.napoleon',
'sphinx.ext.mathjax',
# 'sphinx.ext.doctest',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'prospector'
copyright = '2014-2020, Benjamin Johnson and Contributors'
author = 'Benjamin Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
autodoc_mock_imports = ["sedpy", "h5py"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_options = {"logo_only": True}
# html_theme_options = {}
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = 'prospector v0.4'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo_name.png"
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.png"
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
html_last_updated_fmt = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'prospectordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'prospector.tex', 'prospector Documentation',
# 'Benjamin Johnson', 'manual'),]
| 5,534 | 30.99422 | 80 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/src/main.py
|
import argparse
from pathlib import Path
from tqdm import tqdm
import random
import wandb
import numpy as np
import os
import pickle
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from agg import Aggregator
from model import Model, TextEncoder, UserEncoder
from data import TrainDataset, NewsDataset, UserDataset
from metrics import evaluation_split
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--wandb_entity", type=str)
parser.add_argument(
"--mode", type=str, default="train", choices=["train", "test", "predict"]
)
parser.add_argument(
"--data_path",
type=str,
default=os.getenv("AMLT_DATA_DIR", "../data"),
help="path to downloaded raw adressa dataset",
)
parser.add_argument(
"--out_path",
type=str,
default=os.getenv("AMLT_OUTPUT_DIR", "../output"),
help="path to downloaded raw adressa dataset",
)
parser.add_argument(
"--data",
type=str,
default="mind",
choices=["mind", "adressa"],
help="decide which dataset for preprocess",
)
parser.add_argument("--bert_type", type=str, default="bert-base-uncased")
parser.add_argument(
"--trainable_layers", type=int, nargs="+", default=[6, 7, 8, 9, 10, 11]
)
parser.add_argument("--user_lr", type=float, default=0.00005)
parser.add_argument("--news_lr", type=float, default=0.00005)
parser.add_argument("--user_num", type=int, default=50)
parser.add_argument("--max_his_len", type=float, default=50)
parser.add_argument(
"--npratio",
type=int,
default=20,
help="randomly sample neg_num negative impression for every positive behavior",
)
parser.add_argument("--max_train_steps", type=int, default=2000)
parser.add_argument("--validation_steps", type=int, default=100)
parser.add_argument("--name", type=str, default="efficient-fedrec")
args = parser.parse_args()
return args
def process_news_grad(candidate_info, his_info):
news_grad = {}
candidate_news, candidate_vecs, candidate_grad = candidate_info
his, his_vecs, his_grad = his_info
candidate_news, candaidate_grad = (
candidate_news.reshape(-1,),
candidate_grad.reshape(-1, 400),
)
his, his_grad = his.reshape(-1,), his_grad.reshape(-1, 400)
for nid, grad in zip(his, his_grad):
if nid in news_grad:
news_grad[nid] += grad
else:
news_grad[nid] = grad
for nid, grad in zip(candidate_news, candaidate_grad):
if nid in news_grad:
news_grad[nid] += grad
else:
news_grad[nid] = grad
return news_grad
def process_user_grad(model_param, sample_num, user_sample):
user_grad = {}
for name, param in model_param:
user_grad[name] = param.grad * (sample_num / user_sample)
return user_grad
def collect_users_nids(train_sam, users, user_indices, nid2index):
user_nids = [0]
user_sample = 0
for user in users:
sids = user_indices[user]
user_sample += len(sids)
for idx in sids:
_, pos, neg, his, _ = train_sam[idx]
user_nids.extend([nid2index[i] for i in list(set([pos] + neg + his))])
return list(set(user_nids)), user_sample
def train_on_step(
agg, model, args, user_indices, user_num, train_sam, nid2index, news_index, device
):
# sample users
users = random.sample(user_indices.keys(), user_num)
nids, user_sample = collect_users_nids(train_sam, users, user_indices, nid2index)
agg.gen_news_vecs(nids)
train_ds = TrainDataset(
args, train_sam, users, user_indices, nid2index, agg, news_index
)
train_dl = DataLoader(train_ds, batch_size=16384, shuffle=True, num_workers=0)
model.train()
loss = 0
for cnt, batch_sample in enumerate(train_dl):
model.user_encoder.load_state_dict(agg.user_encoder.state_dict())
optimizer = optim.SGD(model.parameters(), lr=args.user_lr)
candidate_news, candidate_news_vecs, his, his_vecs, label = batch_sample
candidate_news_vecs = candidate_news_vecs.to(device)
his_vecs = his_vecs.to(device)
sample_num = his_vecs.shape[0]
label = label.to(device)
# compute gradients for user model and news representations
candidate_news_vecs.requires_grad = True
his_vecs.requires_grad = True
bz_loss, y_hat = model(candidate_news_vecs, his_vecs, label)
loss += bz_loss.detach().cpu().numpy()
optimizer.zero_grad()
bz_loss.backward()
candaidate_grad = candidate_news_vecs.grad.detach().cpu() * (
sample_num / user_sample
)
candidate_vecs = candidate_news_vecs.detach().cpu().numpy()
candidate_news = candidate_news.numpy()
his_grad = his_vecs.grad.detach().cpu() * (sample_num / user_sample)
his_vecs = his_vecs.detach().cpu().numpy()
his = his.numpy()
news_grad = process_news_grad(
[candidate_news, candidate_vecs, candaidate_grad], [his, his_vecs, his_grad]
)
user_grad = process_user_grad(
model.user_encoder.named_parameters(), sample_num, user_sample
)
agg.collect(news_grad, user_grad)
loss = loss / (cnt + 1)
agg.update()
return loss
def validate(args, agg, valid_sam, nid2index, news_index, device):
agg.gen_news_vecs(list(range(len(news_index))))
agg.user_encoder.eval()
user_dataset = UserDataset(args, valid_sam, agg.news_vecs, nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
with torch.no_grad():
for his in tqdm(user_dl):
his = his.to(device)
user_vec = agg.user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
val_scores = evaluation_split(agg.news_vecs, user_vecs, valid_sam, nid2index)
val_auc, val_mrr, val_ndcg, val_ndcg10 = [
np.mean(i) for i in list(zip(*val_scores))
]
return val_auc, val_mrr, val_ndcg, val_ndcg10
def test(args, data_path, out_model_path, out_path, device):
with open(data_path / "test_sam_uid.pkl", "rb") as f:
test_sam = pickle.load(f)
with open(data_path / "bert_test_nid2index.pkl", "rb") as f:
test_nid2index = pickle.load(f)
test_news_index = np.load(data_path / "bert_test_news_index.npy", allow_pickle=True)
text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
user_encoder = UserEncoder().to(device)
ckpt = torch.load(out_model_path / f"{args.name}-{args.data}.pkl")
text_encoder.load_state_dict(ckpt["text_encoder"])
user_encoder.load_state_dict(ckpt["user_encoder"])
test_news_dataset = NewsDataset(test_news_index)
news_dl = DataLoader(
test_news_dataset, batch_size=512, shuffle=False, num_workers=0
)
news_vecs = []
text_encoder.eval()
for news in tqdm(news_dl):
news = news.to(device)
news_vec = text_encoder(news).detach().cpu().numpy()
news_vecs.append(news_vec)
news_vecs = np.concatenate(news_vecs)
user_dataset = UserDataset(args, test_sam, news_vecs, test_nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
user_encoder.eval()
for his in tqdm(user_dl):
his = his.to(device)
user_vec = user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
test_scores = evaluation_split(news_vecs, user_vecs, test_sam, test_nid2index)
test_auc, test_mrr, test_ndcg, test_ndcg10 = [
np.mean(i) for i in list(zip(*test_scores))
]
with open(out_path / f"log.txt", "a") as f:
f.write(
f"test auc: {test_auc:.4f}, mrr: {test_mrr:.4f}, ndcg5: {test_ndcg:.4f}, ndcg10: {test_ndcg10:.4f}\n"
)
def predict(args, data_path, out_model_path, out_path, device):
with open(data_path / "test_sam_uid.pkl", "rb") as f:
test_sam = pickle.load(f)
with open(data_path / "bert_test_nid2index.pkl", "rb") as f:
test_nid2index = pickle.load(f)
test_news_index = np.load(data_path / "bert_test_news_index.npy", allow_pickle=True)
text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
user_encoder = UserEncoder().to(device)
ckpt = torch.load(out_model_path / f"{args.name}-{args.data}.pkl")
text_encoder.load_state_dict(ckpt["text_encoder"])
user_encoder.load_state_dict(ckpt["user_encoder"])
test_news_dataset = NewsDataset(test_news_index)
news_dl = DataLoader(
test_news_dataset, batch_size=512, shuffle=False, num_workers=0
)
news_vecs = []
text_encoder.eval()
for news in tqdm(news_dl):
news = news.to(device)
news_vec = text_encoder(news).detach().cpu().numpy()
news_vecs.append(news_vec)
news_vecs = np.concatenate(news_vecs)
user_dataset = UserDataset(args, test_sam, news_vecs, test_nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
user_encoder.eval()
for his in tqdm(user_dl):
his = his.to(device)
user_vec = user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
pred_lines = []
for i in tqdm(range(len(test_sam))):
impr_id, poss, negs, _, _ = test_sam[i]
user_vec = user_vecs[i]
news_ids = [test_nid2index[i] for i in poss + negs]
news_vec = news_vecs[news_ids]
y_score = np.multiply(news_vec, user_vec)
y_score = np.sum(y_score, axis=1)
pred_rank = (np.argsort(np.argsort(y_score)[::-1]) + 1).tolist()
pred_rank = '[' + ','.join([str(i) for i in pred_rank]) + ']'
pred_lines.append((int(impr_id), ' '.join([impr_id, pred_rank])+ '\n'))
pred_lines.sort(key=lambda x: x[0])
pred_lines = [x[1] for x in pred_lines]
with open(out_path / 'prediction.txt', 'w') as f:
f.writelines(pred_lines)
if __name__ == "__main__":
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0")
torch.cuda.set_device(device)
if args.mode == "train":
wandb.init(
project=f"{args.name}-{args.data}", config=args, entity=args.wandb_entity
)
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
out_model_path.mkdir(exist_ok=True, parents=True)
# load preprocessed data
with open(data_path / "bert_nid2index.pkl", "rb") as f:
nid2index = pickle.load(f)
news_index = np.load(data_path / "bert_news_index.npy", allow_pickle=True)
with open(data_path / "train_sam_uid.pkl", "rb") as f:
train_sam = pickle.load(f)
with open(data_path / "valid_sam_uid.pkl", "rb") as f:
valid_sam = pickle.load(f)
with open(data_path / "user_indices.pkl", "rb") as f:
user_indices = pickle.load(f)
news_dataset = NewsDataset(news_index)
agg = Aggregator(args, news_dataset, news_index, device)
model = Model().to(device)
best_auc = 0
for step in range(args.max_train_steps):
loss = train_on_step(
agg,
model,
args,
user_indices,
args.user_num,
train_sam,
nid2index,
news_index,
device,
)
wandb.log({"train loss": loss}, step=step + 1)
if (step + 1) % args.validation_steps == 0:
val_auc, val_mrr, val_ndcg, val_ndcg10 = validate(
args, agg, valid_sam, nid2index, news_index, device
)
wandb.log(
{
"valid auc": val_auc,
"valid mrr": val_mrr,
"valid ndcg@5": val_ndcg,
"valid ndcg@10": val_ndcg10,
},
step=step + 1,
)
with open(out_path / f"log.txt", "a") as f:
f.write(
f"[{step}] round auc: {val_auc:.4f}, mrr: {val_mrr:.4f}, ndcg5: {val_ndcg:.4f}, ndcg10: {val_ndcg10:.4f}\n"
)
if val_auc > best_auc:
best_auc = val_auc
wandb.run.summary["best_auc"] = best_auc
torch.save(
{
"text_encoder": agg.text_encoder.state_dict(),
"user_encoder": agg.user_encoder.state_dict(),
},
out_model_path / f"{args.name}-{args.data}.pkl",
)
with open(out_path / f"log.txt", "a") as f:
f.write(f"[{step}] round save model\n")
elif args.mode == "test":
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
test(args, data_path, out_model_path, out_path, device)
elif args.mode == "predict":
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
predict(args, data_path, out_model_path, out_path, device)
| 13,768 | 33.946701 | 131 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/src/agg.py
|
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from model import TextEncoder, UserEncoder
import torch.optim as optim
from data import NewsPartDataset
class NewsUpdatorDataset(Dataset):
def __init__(self, news_index, news_ids, news_grads):
self.news_index = news_index
self.news_grads = news_grads
self.news_ids = news_ids
def __len__(self):
return len(self.news_ids)
def __getitem__(self, idx):
nid = self.news_ids[idx]
return self.news_index[nid], self.news_grads[idx]
class Aggregator:
def __init__(self, args, news_dataset, news_index, device):
self.device = device
self.text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
self.user_encoder = UserEncoder().to(device)
self.news_optimizer = optim.Adam(self.text_encoder.parameters(), lr=args.news_lr)
self.user_optimizer = optim.Adam(self.user_encoder.parameters(), lr=args.user_lr)
for param in self.text_encoder.bert.parameters():
param.requires_grad = False
for index, layer in enumerate(self.text_encoder.bert.encoder.layer):
if index in args.trainable_layers:
for param in layer.parameters():
param.requires_grad = True
if -1 in args.trainable_layers:
for param in self.text_encoder.bert.embeddings.parameters():
param.requires_grad = True
self.news_dataset = news_dataset
self.news_index = news_index
self.time = 0
self.cnt = 0
self._init_grad_param()
def _init_grad_param(self):
self.news_grads = {}
self.user_optimizer.zero_grad()
self.news_optimizer.zero_grad()
def gen_news_vecs(self, nids):
self.text_encoder.eval()
news_ds = NewsPartDataset(self.news_index, nids)
news_dl = DataLoader(news_ds, batch_size=2048, shuffle=False, num_workers=0)
news_vecs = np.zeros((len(self.news_index), 400), dtype='float32')
with torch.no_grad():
for nids, news in news_dl:
news = news.to(self.device)
news_vec = self.text_encoder(news).detach().cpu().numpy()
news_vecs[nids.numpy()] = news_vec
if np.isnan(news_vecs).any():
raise ValueError("news_vecs contains nan")
self.news_vecs = news_vecs
return news_vecs
def get_news_vecs(self, idx):
return self.news_vecs[idx]
def update(self):
self.update_user_grad()
self.update_news_grad()
self._init_grad_param()
self.cnt += 1
def average_update_time(self):
return self.time / self.cnt
def update_news_grad(self):
self.text_encoder.train()
self.news_optimizer.zero_grad()
news_ids, news_grads = [], []
for nid in self.news_grads:
news_ids.append(nid)
news_grads.append(self.news_grads[nid])
news_up_ds = NewsUpdatorDataset(self.news_index, news_ids, news_grads)
news_up_dl = DataLoader(news_up_ds, batch_size=128, shuffle=False, num_workers=0)
for news_index, news_grad in news_up_dl:
news_index = news_index.to(self.device)
news_grad = news_grad.to(self.device)
news_vecs = self.text_encoder(news_index)
news_vecs.backward(news_grad)
self.news_optimizer.step()
self.news_optimizer.zero_grad()
def update_user_grad(self):
self.user_optimizer.step()
self.user_optimizer.zero_grad()
def check_news_vec_same(self, nids, news_vecs):
assert (self.get_news_vecs(nids) == news_vecs).all(), "News vecs are not the same"
def collect(self, news_grad, user_grad):
# update user model params
for name, param in self.user_encoder.named_parameters():
if param.grad is None:
param.grad = user_grad[name]
else:
param.grad += user_grad[name]
# update news model params
for nid in news_grad:
if nid in self.news_grads:
self.news_grads[nid] += news_grad[nid]
else:
self.news_grads[nid] = news_grad[nid]
| 4,402 | 34.224 | 90 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/src/model.py
|
import torch
from torch import nn
import torch.nn.functional as F
from transformers import BertModel
import numpy as np
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context, attn
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model # 300
self.n_heads = n_heads # 20
self.d_k = d_k # 20
self.d_v = d_v # 20
self.W_Q = nn.Linear(d_model, d_k * n_heads) # 300, 400
self.W_K = nn.Linear(d_model, d_k * n_heads) # 300, 400
self.W_V = nn.Linear(d_model, d_v * n_heads) # 300, 400
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, attn_mask=None):
batch_size, seq_len, _ = Q.size()
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1,2)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).expand(batch_size, seq_len, seq_len)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
context, attn = ScaledDotProductAttention(self.d_k)(q_s, k_s, v_s, attn_mask)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return context
class AdditiveAttention(nn.Module):
def __init__(self, d_h, hidden_size=200):
super(AdditiveAttention, self).__init__()
self.att_fc1 = nn.Linear(d_h, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha)
x = torch.reshape(x, (bz, -1)) # (bz, 400)
return x
class TextEncoder(nn.Module):
def __init__(self,
bert_type="bert-base-uncased",
word_embedding_dim=400,
dropout_rate=0.2,
enable_gpu=True):
super(TextEncoder, self).__init__()
self.dropout_rate = 0.2
self.bert = BertModel.from_pretrained(bert_type,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
self.additive_attention = AdditiveAttention(self.bert.config.hidden_size,
self.bert.config.hidden_size//2)
self.fc = nn.Linear(self.bert.config.hidden_size, word_embedding_dim)
def forward(self, text):
# text batch, 2, word
tokens = text[:,0,:]
atts = text[:,1,:]
text_vector = self.bert(tokens, attention_mask=atts)[0]
text_vector = self.additive_attention(text_vector)
text_vector = self.fc(text_vector)
return text_vector
class UserEncoder(nn.Module):
def __init__(self,
news_embedding_dim=400,
num_attention_heads=20,
query_vector_dim=200
):
super(UserEncoder, self).__init__()
self.dropout_rate = 0.2
self.multihead_attention = MultiHeadAttention(news_embedding_dim,
num_attention_heads, 20, 20)
self.additive_attention = AdditiveAttention(news_embedding_dim,
query_vector_dim)
def forward(self, clicked_news_vecs):
clicked_news_vecs = F.dropout(clicked_news_vecs, p=self.dropout_rate, training=self.training)
multi_clicked_vectors = self.multihead_attention(
clicked_news_vecs, clicked_news_vecs, clicked_news_vecs
)
pos_user_vector = self.additive_attention(multi_clicked_vectors)
user_vector = pos_user_vector
return user_vector
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.user_encoder = UserEncoder()
self.criterion = nn.CrossEntropyLoss()
def forward(self, candidate_vecs, clicked_news_vecs, targets, compute_loss=True):
user_vector = self.user_encoder(clicked_news_vecs)
score = torch.bmm(candidate_vecs, user_vector.unsqueeze(-1)).squeeze(dim=-1)
if compute_loss:
loss = self.criterion(score, targets)
return loss, score
else:
return score
| 5,484 | 37.356643 | 101 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/src/data.py
|
import random
import numpy as np
from torch.utils.data import Dataset, DataLoader
def newsample(nnn, ratio):
if ratio > len(nnn):
return nnn + ["<unk>"] * (ratio - len(nnn))
else:
return random.sample(nnn, ratio)
class TrainDataset(Dataset):
def __init__(self, args, samples, users, user_indices, nid2index, agg, news_index):
self.news_index = news_index
self.nid2index = nid2index
self.agg = agg
self.samples = []
self.args = args
for user in users:
self.samples.extend([samples[i] for i in user_indices[user]])
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
# pos, neg, his, neg_his
_, pos, neg, his, _ = self.samples[idx]
neg = newsample(neg, self.args.npratio)
candidate_news = np.array([self.nid2index[n] for n in [pos] + neg])
candidate_news_vecs = self.agg.get_news_vecs(candidate_news)
his = np.array([self.nid2index[n] for n in his] + [0] * (self.args.max_his_len - len(his)))
his_vecs = self.agg.get_news_vecs(his)
label = np.array(0)
return candidate_news, candidate_news_vecs, his, his_vecs, label
class NewsDataset(Dataset):
def __init__(self, news_index):
self.news_index = news_index
def __len__(self):
return len(self.news_index)
def __getitem__(self, idx):
return self.news_index[idx]
class NewsPartDataset(Dataset):
def __init__(self, news_index, nids):
self.news_index = news_index
self.nids = nids
def __len__(self):
return len(self.nids)
def __getitem__(self, idx):
nid = self.nids[idx]
return nid, self.news_index[nid]
class UserDataset(Dataset):
def __init__(self,
args,
samples,
news_vecs,
nid2index):
self.samples = samples
self.args = args
self.news_vecs = news_vecs
self.nid2index = nid2index
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
_, poss, negs, his, _ = self.samples[idx]
his = [self.nid2index[n] for n in his] + [0] * (self.args.max_his_len - len(his))
his = self.news_vecs[his]
return his
class NewsUpdatorDataset(Dataset):
def __init__(self, news_index, news_ids, news_grads):
self.news_index = news_index
self.news_grads = news_grads
self.news_ids = news_ids
def __len__(self):
return len(self.news_ids)
def __getitem__(self, idx):
nid = self.news_ids[idx]
return self.news_index[nid], self.news_grads[idx]
| 2,760 | 28.063158 | 99 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/src/metrics.py
|
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def compute_amn(y_true, y_score):
auc = roc_auc_score(y_true,y_score)
mrr = mrr_score(y_true,y_score)
ndcg5 = ndcg_score(y_true,y_score,5)
ndcg10 = ndcg_score(y_true,y_score,10)
return auc, mrr, ndcg5, ndcg10
def evaluation_split(news_vecs, user_vecs, samples, nid2index):
all_rslt = []
for i in tqdm(range(len(samples))):
_, poss, negs, _, _ = samples[i]
user_vec = user_vecs[i]
y_true = [1] * len(poss) + [0] * len(negs)
news_ids = [nid2index[i] for i in poss + negs]
news_vec = news_vecs[news_ids]
y_score = np.multiply(news_vec, user_vec)
y_score = np.sum(y_score, axis=1)
try:
all_rslt.append(compute_amn(y_true, y_score))
except Exception as e:
print(e)
return np.array(all_rslt)
| 1,488 | 30.680851 | 63 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/preprocess/adressa_raw.py
|
# This script is used to construct training, validation and test dataset of adressa.
# We follow existing works[1][2] to split the dataset.
# [1]
# [2]
import json
import pickle
import argparse
import numpy as np
from tqdm import tqdm
from pathlib import Path
from collections import defaultdict
from sklearn.model_selection import train_test_split
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--adressa_path",
type=str,
default="../raw/adressa/raw/one_week",
help="path to downloaded raw adressa dataset",
)
parser.add_argument(
"--out_path",
type=str,
default="../raw/adressa/",
help="path to save processed dataset, default in ../raw/adressa",
)
parser.add_argument(
"--neg_num",
type=int,
default=20,
help="randomly sample neg_num negative impression for every positive behavior",
)
args = parser.parse_args()
return args
def process_news(adressa_path):
news_title = {}
for file in adressa_path.iterdir():
with open(file, "r") as f:
for l in tqdm(f):
event_dict = json.loads(l.strip("\n"))
if "id" in event_dict and "title" in event_dict:
if event_dict["id"] not in news_title:
news_title[event_dict["id"]] = event_dict["title"]
else:
assert news_title[event_dict["id"]] == event_dict["title"]
nid2index = {k: v for k, v in zip(news_title.keys(), range(1, len(news_title) + 1))}
return news_title, nid2index
def write_news_files(news_title, nid2index, out_path):
# Output with MIND format
news_lines = []
for nid in tqdm(news_title):
nindex = nid2index[nid]
title = news_title[nid]
news_line = "\t".join([str(nindex), "", "", title, "", "", "", ""]) + "\n"
news_lines.append(news_line)
for stage in ["train", "valid", "test"]:
file_path = out_path / stage
file_path.mkdir(exist_ok=True, parents=True)
with open(out_path / stage / "news.tsv", "w", encoding="utf-8") as f:
f.writelines(news_lines)
class UserInfo:
def __init__(self, train_day=6, test_day=7):
self.click_news = []
self.click_time = []
self.click_days = []
self.train_news = []
self.train_time = []
self.train_days = []
self.test_news = []
self.test_time = []
self.test_days = []
self.train_day = train_day
self.test_day = test_day
def update(self, nindex, time, day):
if day == self.train_day:
self.train_news.append(nindex)
self.train_time.append(time)
self.train_days.append(day)
elif day == self.test_day:
self.test_news.append(nindex)
self.test_time.append(time)
self.test_days.append(day)
else:
self.click_news.append(nindex)
self.click_time.append(time)
self.click_days.append(day)
def sort_click(self):
self.click_news = np.array(self.click_news, dtype="int32")
self.click_time = np.array(self.click_time, dtype="int32")
self.click_days = np.array(self.click_days, dtype="int32")
self.train_news = np.array(self.train_news, dtype="int32")
self.train_time = np.array(self.train_time, dtype="int32")
self.train_days = np.array(self.train_days, dtype="int32")
self.test_news = np.array(self.test_news, dtype="int32")
self.test_time = np.array(self.test_time, dtype="int32")
self.test_days = np.array(self.test_days, dtype="int32")
order = np.argsort(self.train_time)
self.train_time = self.train_time[order]
self.train_days = self.train_days[order]
self.train_news = self.train_news[order]
order = np.argsort(self.test_time)
self.test_time = self.test_time[order]
self.test_days = self.test_days[order]
self.test_news = self.test_news[order]
order = np.argsort(self.click_time)
self.click_time = self.click_time[order]
self.click_days = self.click_days[order]
self.click_news = self.click_news[order]
def process_users(adressa_path):
uid2index = {}
user_info = defaultdict(UserInfo)
for file in adressa_path.iterdir():
with open(file, "r") as f:
for l in tqdm(f):
event_dict = json.loads(l.strip("\n"))
if "id" in event_dict and "title" in event_dict:
nindex = nid2index[event_dict["id"]]
uid = event_dict["userId"]
if uid not in uid2index:
uid2index[uid] = len(uid2index)
uindex = uid2index[uid]
click_time = int(event_dict["time"])
day = int(file.name[-1])
user_info[uindex].update(nindex, click_time, day)
return uid2index, user_info
def construct_behaviors(uindex, click_news, train_news, test_news, neg_num):
p = np.ones(len(news_title) + 1, dtype="float32")
p[click_news] = 0
p[train_news] = 0
p[test_news] = 0
p[0] = 0
p /= p.sum()
train_his_news = [str(i) for i in click_news.tolist()]
train_his_line = " ".join(train_his_news)
for nindex in train_news:
neg_cand = np.random.choice(
len(news_title) + 1, size=neg_num, replace=False, p=p
).tolist()
cand_news = " ".join(
[f"{str(nindex)}-1"] + [f"{str(nindex)}-0" for nindex in neg_cand]
)
train_behavior_line = f"null\t{uindex}\tnull\t{train_his_line}\t{cand_news}\n"
train_lines.append(train_behavior_line)
test_his_news = [str(i) for i in click_news.tolist() + train_news.tolist()]
test_his_line = " ".join(test_his_news)
for nindex in test_news:
neg_cand = np.random.choice(
len(news_title) + 1, size=neg_num, replace=False, p=p
).tolist()
cand_news = " ".join(
[f"{str(nindex)}-1"] + [f"{str(nindex)}-0" for nindex in neg_cand]
)
test_behavior_line = f"null\t{uindex}\tnull\t{test_his_line}\t{cand_news}\n"
test_lines.append(test_behavior_line)
if __name__ == "__main__":
args = parse_args()
adressa_path = Path(args.adressa_path)
out_path = Path(args.out_path)
news_title, nid2index = process_news(adressa_path)
write_news_files(news_title, nid2index, out_path)
uid2index, user_info = process_users(adressa_path)
for uid in tqdm(user_info):
user_info[uid].sort_click()
train_lines = []
test_lines = []
for uindex in tqdm(user_info):
uinfo = user_info[uindex]
click_news = uinfo.click_news
train_news = uinfo.train_news
test_news = uinfo.test_news
construct_behaviors(uindex, click_news, train_news, test_news, args.neg_num)
test_split_lines, valid_split_lines = train_test_split(
test_lines, test_size=0.2, random_state=2021
)
with open(out_path / "train" / "behaviors.tsv", "w", encoding="utf-8") as f:
f.writelines(train_lines)
with open(out_path / "valid" / "behaviors.tsv", "w", encoding="utf-8") as f:
f.writelines(valid_split_lines)
with open(out_path / "test" / "behaviors.tsv", "w", encoding="utf-8") as f:
f.writelines(test_split_lines)
| 7,496 | 32.172566 | 88 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/preprocess/user_process.py
|
from pathlib import Path
from tqdm import tqdm
from collections import defaultdict
import os
import pickle
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw_path",
type=str,
default="../raw/",
help="path to raw mind dataset or parsed ",
)
parser.add_argument(
"--out_path",
type=str,
default="../data/",
help="path to save processed dataset, default in ../raw/mind/preprocess",
)
parser.add_argument(
"--data",
type=str,
default="mind",
choices=["mind", "adressa"],
help="decide which dataset for preprocess"
)
parser.add_argument(
"--npratio",
type=int,
default=4
)
parser.add_argument(
"--max_his_len", type=int, default=50
)
parser.add_argument("--min_word_cnt", type=int, default=3)
parser.add_argument("--max_title_len", type=int, default=30)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
raw_path = Path(args.raw_path) / args.data
out_path = Path(args.out_path) / args.data
user_imprs = defaultdict(list)
# read user impressions
for l in tqdm(open(raw_path / "train" / "behaviors.tsv", "r")):
imp_id, uid, t, his, imprs = l.strip("\n").split("\t")
his = his.split()
imprs = [i.split("-") for i in imprs.split(" ")]
neg_imp = [i[0] for i in imprs if i[1] == "0"]
pos_imp = [i[0] for i in imprs if i[1] == "1"]
user_imprs[uid].append([imp_id, his, pos_imp, neg_imp, 0, uid])
for l in tqdm(open(raw_path / "valid" / "behaviors.tsv", "r")):
imp_id, uid, t, his, imprs = l.strip("\n").split("\t")
his = his.split()
imprs = [i.split("-") for i in imprs.split(" ")]
neg_imp = [i[0] for i in imprs if i[1] == "0"]
pos_imp = [i[0] for i in imprs if i[1] == "1"]
user_imprs[uid].append([imp_id, his, pos_imp, neg_imp, 1, uid])
if os.path.exists(raw_path / "test"):
if args.data == "adressa":
for l in tqdm(open(raw_path / "test" / "behaviors.tsv", "r")):
imp_id, uid, t, his, imprs = l.strip("\n").split("\t")
his = his.split()
imprs = [i.split("-") for i in imprs.split(" ")]
neg_imp = [i[0] for i in imprs if i[1] == "0"]
pos_imp = [i[0] for i in imprs if i[1] == "1"]
user_imprs[uid].append([imp_id, his, pos_imp, neg_imp, 2, uid])
else:
# MIND test dataset do not contains labels, need to test on condalab
for l in tqdm(open(raw_path / "test" / "behaviors.tsv", "r")):
imp_id, uid, t, his, imprs = l.strip("\n").split("\t")
his = his.split()
imprs = imprs.split(" ")
user_imprs[uid].append([imp_id, his, imprs, [], 2, uid])
train_samples = []
valid_samples = []
test_samples = []
user_indices = defaultdict(list)
index = 0
for uid in tqdm(user_imprs):
for impr in user_imprs[uid]:
imp_id, his, poss, negs, is_valid, uid = impr
his = his[-args.max_his_len:]
if is_valid == 0:
for pos in poss:
train_samples.append([imp_id, pos, negs, his, uid])
user_indices[uid].append(index)
index += 1
elif is_valid == 1:
valid_samples.append([imp_id, poss, negs, his, uid])
else:
test_samples.append([imp_id, poss, negs, his, uid])
print(len(train_samples), len(valid_samples), len(test_samples))
with open(out_path / "train_sam_uid.pkl", "wb") as f:
pickle.dump(train_samples, f)
with open(out_path / "valid_sam_uid.pkl", "wb") as f:
pickle.dump(valid_samples, f)
with open(out_path / "test_sam_uid.pkl", "wb") as f:
pickle.dump(test_samples, f)
with open(out_path / "user_indices.pkl", "wb") as f:
pickle.dump(user_indices, f)
train_user_samples = 0
for uid in tqdm(user_indices):
train_user_samples += len(user_indices[uid])
print(train_user_samples / len(user_indices))
| 4,275 | 32.669291 | 81 |
py
|
Efficient-FedRec
|
Efficient-FedRec-main/preprocess/news_process.py
|
from transformers import BertTokenizer
from pathlib import Path
from tqdm import tqdm
import numpy as np
import os
import pickle
import argparse
# config
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw_path",
type=str,
default="../raw/",
help="path to raw mind dataset or parsed ",
)
parser.add_argument(
"--out_path",
type=str,
default="../data/",
help="path to save processed dataset, default in ../raw/mind/preprocess",
)
parser.add_argument(
"--data",
type=str,
default="mind",
choices=["mind", "adressa"],
help="decide which dataset for preprocess"
)
parser.add_argument(
"--npratio",
type=int,
default=4
)
parser.add_argument(
"--max_his_len", type=int, default=50
)
parser.add_argument("--min_word_cnt", type=int, default=3)
parser.add_argument("--max_title_len", type=int, default=30)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
raw_path = Path(args.raw_path) / args.data
out_path = Path(args.out_path) / args.data
if not raw_path.is_dir():
raise ValueError(f"{raw_path.name} does not exist.")
out_path.mkdir(exist_ok=True, parents=True)
if args.data == "mind":
model_type = "bert-base-uncased"
else:
model_type = "NbAiLab/nb-bert-base"
tokenizer = BertTokenizer.from_pretrained(model_type)
# news preprocess
nid2index = {"<unk>": 0}
news_index = [[[0] * args.max_title_len, [0] * args.max_title_len]]
for l in tqdm(open(raw_path / "train" / "news.tsv", "r", encoding='utf-8')):
nid, vert, subvert, title, abst, url, ten, aen = l.strip("\n").split("\t")
if nid in nid2index:
continue
tokens = tokenizer(
title,
max_length=args.max_title_len,
truncation=True,
padding="max_length",
return_attention_mask=True,
)
nid2index[nid] = len(nid2index)
news_index.append([tokens.input_ids, tokens.attention_mask])
for l in tqdm(open(raw_path / "valid" / "news.tsv", "r", encoding='utf-8')):
nid, vert, subvert, title, abst, url, ten, aen = l.strip("\n").split("\t")
if nid in nid2index:
continue
tokens = tokenizer(
title,
max_length=args.max_title_len,
truncation=True,
padding="max_length",
return_attention_mask=True,
)
nid2index[nid] = len(nid2index)
news_index.append([tokens.input_ids, tokens.attention_mask])
with open(out_path / "bert_nid2index.pkl", "wb") as f:
pickle.dump(nid2index, f)
news_index = np.array(news_index)
np.save(out_path / "bert_news_index", news_index)
if os.path.exists(raw_path / "test"):
nid2index = {"<unk>": 0}
news_index = [[[0] * args.max_title_len, [0] * args.max_title_len]]
for l in tqdm(open(raw_path / "test" / "news.tsv", "r", encoding='utf-8')):
nid, vert, subvert, title, abst, url, ten, aen = l.strip("\n").split("\t")
if nid in nid2index:
continue
tokens = tokenizer(
title,
max_length=args.max_title_len,
truncation=True,
padding="max_length",
return_attention_mask=True,
)
nid2index[nid] = len(nid2index)
news_index.append([tokens.input_ids, tokens.attention_mask])
with open(out_path / "bert_test_nid2index.pkl", "wb") as f:
pickle.dump(nid2index, f)
news_index = np.array(news_index)
np.save(out_path / "bert_test_news_index", news_index)
| 3,844 | 29.515873 | 86 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-setuptestsuite-test.py
|
#!/usr/bin/env python
#
# Copyright 2019, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that SetUpTestSuite and TearDownTestSuite errors are noticed."""
from googletest.test import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath(
'googletest-setuptestsuite-test_')
class GTestSetUpTestSuiteTest(gtest_test_utils.TestCase):
def testSetupErrorAndTearDownError(self):
p = gtest_test_utils.Subprocess(COMMAND)
self.assertNotEqual(p.exit_code, 0, msg=p.output)
self.assertIn(
'[ FAILED ] SetupFailTest: SetUpTestSuite or TearDownTestSuite\n'
'[ FAILED ] TearDownFailTest: SetUpTestSuite or TearDownTestSuite\n'
'\n'
' 2 FAILED TEST SUITES\n',
p.output)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,265 | 40.2 | 78 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-failfast-unittest.py
|
#!/usr/bin/env python
#
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test fail_fast.
A user can specify if a Google Test program should continue test execution
after a test failure via the GTEST_FAIL_FAST environment variable or the
--gtest_fail_fast flag. The default value of the flag can also be changed
by Bazel fail fast environment variable TESTBRIDGE_TEST_RUNNER_FAIL_FAST.
This script tests such functionality by invoking googletest-failfast-unittest_
(a program written with Google Test) with different environments and command
line flags.
"""
import os
from googletest.test import gtest_test_utils
# Constants.
# Bazel testbridge environment variable for fail fast
BAZEL_FAIL_FAST_ENV_VAR = 'TESTBRIDGE_TEST_RUNNER_FAIL_FAST'
# The environment variable for specifying fail fast.
FAIL_FAST_ENV_VAR = 'GTEST_FAIL_FAST'
# The command line flag for specifying fail fast.
FAIL_FAST_FLAG = 'gtest_fail_fast'
# The command line flag to run disabled tests.
RUN_DISABLED_FLAG = 'gtest_also_run_disabled_tests'
# The command line flag for specifying a filter.
FILTER_FLAG = 'gtest_filter'
# Command to run the googletest-failfast-unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath(
'googletest-failfast-unittest_')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(test_suite=None, fail_fast=None, run_disabled=False):
"""Runs the test program and returns its output."""
args = []
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
'.GTestFailFastUnitTest.xml')
args += ['--gtest_output=xml:' + xml_path]
if fail_fast is not None:
if isinstance(fail_fast, str):
args += ['--%s=%s' % (FAIL_FAST_FLAG, fail_fast)]
elif fail_fast:
args += ['--%s' % FAIL_FAST_FLAG]
else:
args += ['--no%s' % FAIL_FAST_FLAG]
if test_suite:
args += ['--%s=%s.*' % (FILTER_FLAG, test_suite)]
if run_disabled:
args += ['--%s' % RUN_DISABLED_FLAG]
txt_out = gtest_test_utils.Subprocess([COMMAND] + args, env=environ).output
with open(xml_path) as xml_file:
return txt_out, xml_file.read()
# The unit test.
class GTestFailFastUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag for fail_fast."""
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the fail_fast."""
txt, _ = RunAndReturnOutput()
self.assertIn('22 FAILED TEST', txt)
def testGoogletestFlag(self):
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=False)
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
def testGoogletestEnvVar(self):
"""Tests the behavior of specifying fail_fast via Googletest env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testBazelEnvVar(self):
"""Tests the behavior of specifying fail_fast via Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testFlagOverridesEnvVar(self):
"""Tests precedence of flag over env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest', True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testGoogletestEnvVarOverridesBazelEnvVar(self):
"""Tests that the Googletest native env var over Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testEventListener(self):
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=False)
self.assertIn('3 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 1 test', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
def assertXmlResultCount(self, result, count, xml):
self.assertEqual(
count, xml.count('result="%s"' % result),
'Expected \'result="%s"\' match count of %s: %s ' %
(result, count, xml))
def assertXmlStatusCount(self, status, count, xml):
self.assertEqual(
count, xml.count('status="%s"' % status),
'Expected \'status="%s"\' match count of %s: %s ' %
(status, count, xml))
def assertFailFastXmlAndTxtOutput(self,
fail_fast,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert XML and text output of a test execution."""
txt, xml = RunAndReturnOutput(test_suite, fail_fast, run_disabled)
if failure_count > 0:
self.assertIn('%s FAILED TEST' % failure_count, txt)
if suppressed_count > 0:
self.assertIn('%s DISABLED TEST' % suppressed_count, txt)
if skipped_count > 0:
self.assertIn('[ SKIPPED ] %s tests' % skipped_count, txt)
self.assertXmlStatusCount('run',
passed_count + failure_count + skipped_count, xml)
self.assertXmlStatusCount('notrun', suppressed_count, xml)
self.assertXmlResultCount('completed', passed_count + failure_count, xml)
self.assertXmlResultCount('skipped', skipped_count, xml)
self.assertXmlResultCount('suppressed', suppressed_count, xml)
def assertFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --fail_fast via flag."""
for fail_fast in ('true', '1', 't', True):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def assertNotFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --nofail_fast via flag."""
for fail_fast in ('false', '0', 'f', False):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def testFlag_HasFixtureTest(self):
"""Tests the behavior of fail_fast and TEST_F."""
self.assertFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasSimpleTest(self):
"""Tests the behavior of fail_fast and TEST."""
self.assertFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasParametersTest(self):
"""Tests the behavior of fail_fast and TEST_P."""
self.assertFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=2,
suppressed_count=1,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=3,
skipped_count=0,
suppressed_count=1,
run_disabled=False)
def testFlag_HasDisabledRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases enabled."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
def testFlag_HasDisabledSuiteTest(self):
"""Tests the behavior of fail_fast and Disabled test suites."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
def testFlag_HasDisabledSuiteRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test suites enabled."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
if SUPPORTS_DEATH_TESTS:
def testFlag_HasDeathTest(self):
"""Tests the behavior of fail_fast and death tests."""
self.assertFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
if __name__ == '__main__':
gtest_test_utils.Main()
| 14,915 | 35.291971 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_xml_outfiles_test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
import os
from xml.dom import minidom, Node
from googletest.test import gtest_test_utils
from googletest.test import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
<property name="TearDownProp" value="1"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
<property name="TearDownProp" value="2"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| 5,415 | 38.823529 | 119 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-color-test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
import os
from googletest.test import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-color-test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs googletest-color-test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,896 | 37.257813 | 76 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-shuffle-test.py
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
import os
from googletest.test import gtest_test_utils
# Command to run the googletest-shuffle-test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-shuffle-test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to googletest-shuffle-test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| 12,539 | 37.703704 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_skip_environment_check_output_test.py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's gtest skip in environment setup behavior.
This script invokes gtest_skip_in_environment_setup_test_ and verifies its
output.
"""
from googletest.test import gtest_test_utils
# Path to the gtest_skip_in_environment_setup_test binary
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_skip_in_environment_setup_test')
OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
# Test.
class SkipEntireEnvironmentTest(gtest_test_utils.TestCase):
def testSkipEntireEnvironmentTest(self):
self.assertIn('Skipping the entire environment', OUTPUT)
self.assertNotIn('FAILED', OUTPUT)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,230 | 39.563636 | 74 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_xml_test_utils.py
|
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
import re
from xml.dom import minidom, Node
from googletest.test import gtest_test_utils
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node.attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.items():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
'skipped': 'message',
'property': 'name',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite>, <testcase>, and <property>
elements, the ID is the value of their "name" attribute; for <failure>
elements, it is the value of the "message" attribute; for <properties>
elements, it is the value of their parent's "name" attribute plus the
literal string "properties"; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
if child.tagName == 'properties':
self.assert_(child.parentNode is not None,
'Encountered <properties> element without a parent')
child_id = child.parentNode.getAttribute('name') + '-properties'
else:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
child_id = child.getAttribute(
self.identifying_attribute[child.tagName])
self.assert_(child_id not in children)
children[child_id] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure' or element.tagName == 'skipped':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'Stack trace:\n(.|\n)*',
'Stack trace:\n*', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| 9,327 | 46.111111 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-global-environment-unittest.py
|
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's global test environment behavior.
A user can specify a global test environment via
testing::AddGlobalTestEnvironment. Failures in the global environment should
result in all unit tests being skipped.
This script tests such functionality by invoking
googletest-global-environment-unittest_ (a program written with Google Test).
"""
import re
from googletest.test import gtest_test_utils
def RunAndReturnOutput(args=None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([
gtest_test_utils.GetTestExecutablePath(
'googletest-global-environment-unittest_')
] + (args or [])).output
class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase):
"""Tests global test environment failures."""
def testEnvironmentSetUpFails(self):
"""Tests the behavior of not specifying the fail_fast."""
# Run the test.
txt = RunAndReturnOutput()
# We should see the text of the global environment setup error.
self.assertIn('Canned environment setup error', txt)
# Our test should have been skipped due to the error, and not treated as a
# pass.
self.assertIn('[ SKIPPED ] 1 test', txt)
self.assertIn('[ PASSED ] 0 tests', txt)
# The test case shouldn't have been run.
self.assertNotIn('Unexpected call', txt)
def testEnvironmentSetUpAndTornDownForEachRepeat(self):
"""Tests the behavior of test environments and gtest_repeat."""
# When --gtest_recreate_environments_when_repeating is true, the global test
# environment should be set up and torn down for each iteration.
txt = RunAndReturnOutput([
'--gtest_repeat=2',
'--gtest_recreate_environments_when_repeating=true',
])
expected_pattern = ('(.|\n)*'
r'Repeating all tests \(iteration 1\)'
'(.|\n)*'
'Global test environment set-up.'
'(.|\n)*'
'SomeTest.DoesFoo'
'(.|\n)*'
'Global test environment tear-down'
'(.|\n)*'
r'Repeating all tests \(iteration 2\)'
'(.|\n)*'
'Global test environment set-up.'
'(.|\n)*'
'SomeTest.DoesFoo'
'(.|\n)*'
'Global test environment tear-down'
'(.|\n)*')
self.assertRegex(txt, expected_pattern)
def testEnvironmentSetUpAndTornDownOnce(self):
"""Tests environment and --gtest_recreate_environments_when_repeating."""
# By default the environment should only be set up and torn down once, at
# the start and end of the test respectively.
txt = RunAndReturnOutput([
'--gtest_repeat=2',
])
expected_pattern = ('(.|\n)*'
r'Repeating all tests \(iteration 1\)'
'(.|\n)*'
'Global test environment set-up.'
'(.|\n)*'
'SomeTest.DoesFoo'
'(.|\n)*'
r'Repeating all tests \(iteration 2\)'
'(.|\n)*'
'SomeTest.DoesFoo'
'(.|\n)*'
'Global test environment tear-down'
'(.|\n)*')
self.assertRegex(txt, expected_pattern)
self.assertEqual(len(re.findall('Global test environment set-up', txt)), 1)
self.assertEqual(
len(re.findall('Global test environment tear-down', txt)), 1)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,243 | 39.030534 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-catch-exceptions-test.py
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes googletest-catch-exceptions-test_ and
googletest-catch-exceptions-ex-test_ (programs written with
Google Test) and verifies their output.
"""
from googletest.test import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the googletest-catch-exceptions-ex-test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-catch-exceptions-ex-test_')
# Path to the googletest-catch-exceptions-test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-catch-exceptions-no-ex-test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestSuite()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestSuite()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInDestructorTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInConstructorTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertFalse(
'CxxExceptionInSetUpTestSuiteTest constructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertFalse(
'CxxExceptionInSetUpTestSuiteTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertFalse(
'CxxExceptionInSetUpTestSuiteTest::SetUp() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertFalse(
'CxxExceptionInSetUpTestSuiteTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertFalse(
'CxxExceptionInSetUpTestSuiteTest test body '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in SetUp()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in TearDown()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTearDownTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTearDownTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in the test body' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assertTrue(
'Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| 10,023 | 41.295359 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_skip_check_output_test.py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's gtest skip in environment setup behavior.
This script invokes gtest_skip_in_environment_setup_test_ and verifies its
output.
"""
import re
from googletest.test import gtest_test_utils
# Path to the gtest_skip_in_environment_setup_test binary
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_skip_test')
OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
# Test.
class SkipEntireEnvironmentTest(gtest_test_utils.TestCase):
def testSkipEntireEnvironmentTest(self):
self.assertIn('Skipped\nskipping single test\n', OUTPUT)
skip_fixture = 'Skipped\nskipping all tests for this fixture\n'
self.assertIsNotNone(
re.search(skip_fixture + '.*' + skip_fixture, OUTPUT, flags=re.DOTALL),
repr(OUTPUT))
self.assertNotIn('FAILED', OUTPUT)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,411 | 39.2 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_test_utils.py
|
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing and Mocking Framework."""
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
import os
import subprocess
import sys
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
IS_OS2 = os.name == 'os2'
import atexit
import shutil
import tempfile
import unittest as _test_module
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN or IS_OS2) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print(message, file=sys.stderr)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True if and only if the child process has been
terminated by a signal.
exited True if and only if the child process exited
normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file object for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
if bool(self._return_code & 0x80000000):
self.terminated_by_signal = True
self.exited = False
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| 8,407 | 31.84375 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-json-output-unittest.py
|
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import datetime
import errno
import json
import os
import re
import sys
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.json'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY = {
u'tests':
26,
u'failures':
5,
u'disabled':
2,
u'errors':
0,
u'timestamp':
u'*',
u'time':
u'*',
u'ad_hoc_property':
u'42',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'SuccessfulTest',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SuccessfulTest'
}]
}, {
u'name':
u'FailedTest',
u'tests':
1,
u'failures':
1,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name':
u'Fails',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'FailedTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}]
}]
}, {
u'name':
u'DisabledTest',
u'tests':
1,
u'failures':
0,
u'disabled':
1,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'DISABLED_test_not_run',
u'status': u'NOTRUN',
u'result': u'SUPPRESSED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'DisabledTest'
}]
}, {
u'name':
u'SkippedTest',
u'tests':
3,
u'failures':
1,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'Skipped',
u'status': u'RUN',
u'result': u'SKIPPED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SkippedTest'
}, {
u'name': u'SkippedWithMessage',
u'status': u'RUN',
u'result': u'SKIPPED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SkippedTest'
}, {
u'name':
u'SkippedAfterFailure',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'SkippedTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}]
}]
}, {
u'name':
u'MixedResultTest',
u'tests':
3,
u'failures':
1,
u'disabled':
1,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'MixedResultTest'
}, {
u'name':
u'Fails',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'MixedResultTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}, {
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 2\n 3' + STACK_TRACE_TEMPLATE,
u'type': u''
}]
}, {
u'name': u'DISABLED_test',
u'status': u'NOTRUN',
u'result': u'SUPPRESSED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'MixedResultTest'
}]
}, {
u'name':
u'XmlQuotingTest',
u'tests':
1,
u'failures':
1,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name':
u'OutputsCData',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'XmlQuotingTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nXML output: <?xml encoding="utf-8">'
u'<top><![CDATA[cdata text]]></top>' +
STACK_TRACE_TEMPLATE,
u'type': u''
}]
}]
}, {
u'name':
u'InvalidCharactersTest',
u'tests':
1,
u'failures':
1,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name':
u'InvalidCharactersInMessage',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'InvalidCharactersTest',
u'failures': [{
u'failure': u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nInvalid characters in brackets'
u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
u'type': u''
}]
}]
}, {
u'name':
u'PropertyRecordingTest',
u'tests':
4,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'SetUpTestSuite':
u'yes',
u'TearDownTestSuite':
u'aye',
u'testsuite': [{
u'name': u'OneProperty',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1'
}, {
u'name': u'IntValuedProperty',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyRecordingTest',
u'key_int': u'1'
}, {
u'name': u'ThreeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1',
u'key_2': u'2',
u'key_3': u'3'
}, {
u'name': u'TwoValuesForOneKeyUsesLastValue',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'2'
}]
}, {
u'name':
u'NoFixtureTest',
u'tests':
3,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'RecordProperty',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'NoFixtureTest',
u'key': u'1'
}, {
u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_int': u'1'
}, {
u'name': u'ExternalUtilityThatCallsRecordStringValuedProperty',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_string': u'1'
}]
}, {
u'name':
u'TypedTest/0',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'TypedTest/0'
}]
}, {
u'name':
u'TypedTest/1',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'TypedTest/1'
}]
}, {
u'name':
u'Single/TypeParameterizedTestSuite/0',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/0'
}]
}, {
u'name':
u'Single/TypeParameterizedTestSuite/1',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/1'
}]
}, {
u'name':
u'Single/ValueParamTest',
u'tests':
4,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'HasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/ValueParamTest'
}, {
u'name': u'HasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/ValueParamTest'
}, {
u'name': u'AnotherTestThatHasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/ValueParamTest'
}, {
u'name': u'AnotherTestThatHasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'Single/ValueParamTest'
}]
}]
}
EXPECTED_FILTERED = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'ad_hoc_property':
u'42',
u'testsuites': [{
u'name':
u'SuccessfulTest',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'SuccessfulTest',
}]
}],
}
EXPECTED_NO_TEST = {
u'tests':
0,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'NonTestSuiteFailure',
u'tests':
1,
u'failures':
1,
u'disabled':
0,
u'skipped':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name':
u'',
u'status':
u'RUN',
u'result':
u'COMPLETED',
u'time':
u'*',
u'timestamp':
u'*',
u'classname':
u'',
u'failures': [{
u'failure': u'gtest_no_test_unittest.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u'',
}]
}]
}],
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyJsonOutput(self):
"""Verifies JSON output for a Google Test binary with non-empty output.
Runs a test program that generates a non-empty JSON output, and
tests that the JSON output is expected.
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY, 1)
def testNoTestJsonOutput(self):
"""Verifies JSON output for a Google Test binary without actual tests.
Runs a test program that generates an JSON output for a binary with no
tests, and tests that the JSON output is expected.
"""
self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_NO_TEST, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the JSON output is valid.
Runs a test program that generates an empty JSON output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetJsonOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual['timestamp']
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'JSON datettime string %s has incorrect format' % date_time_str)
date_time_from_json = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_json)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
def testDefaultOutputFile(self):
"""Verifies the default output file name.
Confirms that Google Test produces an JSON output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=json' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedJsonOutput(self):
"""Verifies that no JSON output is generated.
Tests that no JSON file is generated if the default JSON listener is
shut down before RUN_ALL_TESTS is invoked.
"""
json_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.json')
if os.path.isfile(json_path):
os.remove(json_path)
command = [GTEST_PROGRAM_PATH,
'%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(json_path))
def testFilteredTestJsonOutput(self):
"""Verifies JSON output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the JSON output.
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetJsonOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""Returns the JSON output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
Args:
gtest_prog_name: Google Test binary name.
extra_args: extra arguments to binary invocation.
expected_exit_code: program's exit code.
"""
json_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.json')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = (
[gtest_prog_path, '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path)] +
extra_args
)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
with open(json_path) as f:
actual = json.load(f)
return actual
def _TestJsonOutput(self, gtest_prog_name, expected,
expected_exit_code, extra_args=None):
"""Checks the JSON output generated by the Google Test binary.
Asserts that the JSON document generated by running the program
gtest_prog_name matches expected_json, a string containing another
JSON document. Furthermore, the program's exit code must be
expected_exit_code.
Args:
gtest_prog_name: Google Test binary name.
expected: expected output.
expected_exit_code: program's exit code.
extra_args: extra arguments to binary invocation.
"""
actual = self._GetJsonOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 24,483 | 27.838634 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_testbridge_test.py
|
#!/usr/bin/env python
#
# Copyright 2018 Google LLC. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test uses filter provided via testbridge."""
import os
from googletest.test import gtest_test_utils
binary_name = 'gtest_testbridge_test_'
COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name)
TESTBRIDGE_NAME = 'TESTBRIDGE_TEST_ONLY'
def Assert(condition):
if not condition:
raise AssertionError
class GTestTestFilterTest(gtest_test_utils.TestCase):
def testTestExecutionIsFiltered(self):
"""Tests that the test filter is picked up from the testbridge env var."""
subprocess_env = os.environ.copy()
subprocess_env[TESTBRIDGE_NAME] = '*.TestThatSucceeds'
p = gtest_test_utils.Subprocess(COMMAND, env=subprocess_env)
self.assertEquals(0, p.exit_code)
Assert('filter = *.TestThatSucceeds' in p.output)
Assert('[ OK ] TestFilterTest.TestThatSucceeds' in p.output)
Assert('[ PASSED ] 1 test.' in p.output)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,496 | 38.015625 | 78 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_json_test_utils.py
|
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_json_output."""
import re
def normalize(obj):
"""Normalize output object.
Args:
obj: Google Test's JSON output object to normalize.
Returns:
Normalized output without any references to transient information that may
change from run to run.
"""
def _normalize(key, value):
if key == 'time':
return re.sub(r'^\d+(\.\d+)?s$', '*', value)
elif key == 'timestamp':
return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value)
elif key == 'failure':
value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value)
return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value)
else:
return normalize(value)
if isinstance(obj, dict):
return {k: _normalize(k, v) for k, v in obj.items()}
if isinstance(obj, list):
return [normalize(x) for x in obj]
else:
return obj
| 2,411 | 38.540984 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-uninitialized-test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
from googletest.test import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
if p.exited and p.exit_code == 0:
Assert('IMPORTANT NOTICE' in p.output);
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,495 | 35.705882 | 82 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-filter-unittest.py
|
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
googletest-filter-unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
from googletest.test import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
CAN_PASS_EMPTY_ENV = False
if sys.executable:
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
CAN_UNSET_ENV = False
if sys.executable:
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'
])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABLED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the googletest-filter-unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-filter-unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in googletest-filter-unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of googletest-filter-unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
If check_exit_0, .
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs googletest-filter-unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABLED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| 21,466 | 32.542188 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_help_test.py
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing and Mocking Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
import os
import re
from googletest.test import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_GNUHURD = os.name == 'posix' and os.uname()[0] == 'GNU'
IS_GNUKFREEBSD = os.name == 'posix' and os.uname()[0] == 'GNU/kFreeBSD'
IS_OPENBSD = os.name == 'posix' and os.uname()[0] == 'OpenBSD'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'brief.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD or IS_OPENBSD:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| 6,114 | 33.942857 | 75 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-output-test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Testing and Mocking Framework.
To update the golden file:
googletest_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built googletest-output-test_ file.
googletest_output_test.py --gengolden
googletest_output_test.py
"""
import difflib
import os
import re
import sys
from googletest.test import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
GOLDEN_NAME = 'googletest-output-test-golden-lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('googletest-output-test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ',
r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of googletest-output-test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
SUPPORTS_STACK_TRACES)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read().decode())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
if GENGOLDEN_FLAG in sys.argv:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output.encode())
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests,
typed tests, stack traces, and multiple threads).
Please build this test and generate the golden file using Blaze on Linux.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| 12,648 | 35.45245 | 81 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-json-outfiles-test.py
|
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import json
import os
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_OUTPUT_SUBDIR = 'json_outfiles'
GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyOne',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyOne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
u'TearDownProp': u'1',
}],
}],
}
EXPECTED_2 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyTwo',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
u'TestSomeProperty': u'2',
u'TearDownProp': u'2',
}],
}],
}
class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, '')
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json'))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json'))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2)
def _TestOutFile(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + '.json'
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
if os.path.isfile(output_file1):
with open(output_file1) as f:
actual = json.load(f)
else:
with open(output_file2) as f:
actual = json.load(f)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '0'
gtest_test_utils.Main()
| 5,705 | 28.71875 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_list_output_unittest.py
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. If output is requested, via --gtest_output=xml
or --gtest_output=json, the tests are listed, with extra information in the
output file.
This script tests such functionality by invoking gtest_list_output_unittest_
(a program written with Google Test) the command line flags.
"""
import os
import re
from googletest.test import gtest_test_utils
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
EXPECTED_XML = """<\?xml version="1.0" encoding="UTF-8"\?>
<testsuites tests="16" name="AllTests">
<testsuite name="FooTest" tests="2">
<testcase name="Test1" file=".*gtest_list_output_unittest_.cc" line="43" />
<testcase name="Test2" file=".*gtest_list_output_unittest_.cc" line="45" />
</testsuite>
<testsuite name="FooTestFixture" tests="2">
<testcase name="Test3" file=".*gtest_list_output_unittest_.cc" line="48" />
<testcase name="Test4" file=".*gtest_list_output_unittest_.cc" line="49" />
</testsuite>
<testsuite name="TypedTest/0" tests="2">
<testcase name="Test7" type_param="int" file=".*gtest_list_output_unittest_.cc" line="60" />
<testcase name="Test8" type_param="int" file=".*gtest_list_output_unittest_.cc" line="61" />
</testsuite>
<testsuite name="TypedTest/1" tests="2">
<testcase name="Test7" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="60" />
<testcase name="Test8" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="61" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/0" tests="2">
<testcase name="Test9" type_param="int" file=".*gtest_list_output_unittest_.cc" line="66" />
<testcase name="Test10" type_param="int" file=".*gtest_list_output_unittest_.cc" line="67" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/1" tests="2">
<testcase name="Test9" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="66" />
<testcase name="Test10" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="67" />
</testsuite>
<testsuite name="ValueParam/ValueParamTest" tests="4">
<testcase name="Test5/0" value_param="33" file=".*gtest_list_output_unittest_.cc" line="52" />
<testcase name="Test5/1" value_param="42" file=".*gtest_list_output_unittest_.cc" line="52" />
<testcase name="Test6/0" value_param="33" file=".*gtest_list_output_unittest_.cc" line="53" />
<testcase name="Test6/1" value_param="42" file=".*gtest_list_output_unittest_.cc" line="53" />
</testsuite>
</testsuites>
"""
EXPECTED_JSON = """{
"tests": 16,
"name": "AllTests",
"testsuites": \[
{
"name": "FooTest",
"tests": 2,
"testsuite": \[
{
"name": "Test1",
"file": ".*gtest_list_output_unittest_.cc",
"line": 43
},
{
"name": "Test2",
"file": ".*gtest_list_output_unittest_.cc",
"line": 45
}
\]
},
{
"name": "FooTestFixture",
"tests": 2,
"testsuite": \[
{
"name": "Test3",
"file": ".*gtest_list_output_unittest_.cc",
"line": 48
},
{
"name": "Test4",
"file": ".*gtest_list_output_unittest_.cc",
"line": 49
}
\]
},
{
"name": "TypedTest\\\\/0",
"tests": 2,
"testsuite": \[
{
"name": "Test7",
"type_param": "int",
"file": ".*gtest_list_output_unittest_.cc",
"line": 60
},
{
"name": "Test8",
"type_param": "int",
"file": ".*gtest_list_output_unittest_.cc",
"line": 61
}
\]
},
{
"name": "TypedTest\\\\/1",
"tests": 2,
"testsuite": \[
{
"name": "Test7",
"type_param": "bool",
"file": ".*gtest_list_output_unittest_.cc",
"line": 60
},
{
"name": "Test8",
"type_param": "bool",
"file": ".*gtest_list_output_unittest_.cc",
"line": 61
}
\]
},
{
"name": "Single\\\\/TypeParameterizedTestSuite\\\\/0",
"tests": 2,
"testsuite": \[
{
"name": "Test9",
"type_param": "int",
"file": ".*gtest_list_output_unittest_.cc",
"line": 66
},
{
"name": "Test10",
"type_param": "int",
"file": ".*gtest_list_output_unittest_.cc",
"line": 67
}
\]
},
{
"name": "Single\\\\/TypeParameterizedTestSuite\\\\/1",
"tests": 2,
"testsuite": \[
{
"name": "Test9",
"type_param": "bool",
"file": ".*gtest_list_output_unittest_.cc",
"line": 66
},
{
"name": "Test10",
"type_param": "bool",
"file": ".*gtest_list_output_unittest_.cc",
"line": 67
}
\]
},
{
"name": "ValueParam\\\\/ValueParamTest",
"tests": 4,
"testsuite": \[
{
"name": "Test5\\\\/0",
"value_param": "33",
"file": ".*gtest_list_output_unittest_.cc",
"line": 52
},
{
"name": "Test5\\\\/1",
"value_param": "42",
"file": ".*gtest_list_output_unittest_.cc",
"line": 52
},
{
"name": "Test6\\\\/0",
"value_param": "33",
"file": ".*gtest_list_output_unittest_.cc",
"line": 53
},
{
"name": "Test6\\\\/1",
"value_param": "42",
"file": ".*gtest_list_output_unittest_.cc",
"line": 53
}
\]
}
\]
}
"""
class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's list tests with output to file functionality.
"""
def testXml(self):
"""Verifies XML output for listing tests in a Google Test binary.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestOutput('xml', EXPECTED_XML)
def testJSON(self):
"""Verifies XML output for listing tests in a Google Test binary.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestOutput('json', EXPECTED_JSON)
def _GetOutput(self, out_format):
file_path = os.path.join(gtest_test_utils.GetTempDir(),
'test_out.' + out_format)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_list_output_unittest_')
command = ([
gtest_prog_path,
'%s=%s:%s' % (GTEST_OUTPUT_FLAG, out_format, file_path),
'--gtest_list_tests'
])
environ_copy = os.environ.copy()
p = gtest_test_utils.Subprocess(
command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir())
self.assertTrue(p.exited)
self.assertEqual(0, p.exit_code)
self.assertTrue(os.path.isfile(file_path))
with open(file_path) as f:
result = f.read()
return result
def _TestOutput(self, test_format, expected_output):
actual = self._GetOutput(test_format)
actual_lines = actual.splitlines()
expected_lines = expected_output.splitlines()
line_count = 0
for actual_line in actual_lines:
expected_line = expected_lines[line_count]
expected_line_re = re.compile(expected_line.strip())
self.assertTrue(
expected_line_re.match(actual_line.strip()),
('actual output of "%s",\n'
'which does not match expected regex of "%s"\n'
'on line %d' % (actual, expected_output, line_count)))
line_count = line_count + 1
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 9,502 | 32.111498 | 98 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-list-tests-unittest.py
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking googletest-list-tests-unittest_ (a program written with
Google Test) the command line flags.
"""
import re
from googletest.test import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the googletest-list-tests-unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('googletest-list-tests-unittest_')
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs googletest-list-tests-unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs googletest-list-tests-unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| 6,543 | 30.76699 | 84 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-param-test-invalid-name1-test.py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
from googletest.test import gtest_test_utils
binary_name = 'googletest-param-test-invalid-name1-test_'
COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name)
def Assert(condition):
if not condition:
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
err = ('Parameterized test name \'"InvalidWithQuotes"\' is invalid')
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
# Verify the output message contains appropriate output
Assert(err in p.output)
class GTestParamTestInvalidName1Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,400 | 36.515625 | 77 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-param-test-invalid-name2-test.py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
from googletest.test import gtest_test_utils
binary_name = 'googletest-param-test-invalid-name2-test_'
COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name)
def Assert(condition):
if not condition:
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
err = ('Duplicate parameterized test name \'a\'')
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
# Check for appropriate output
Assert(err in p.output)
class GTestParamTestInvalidName2Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,355 | 36.396825 | 77 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-break-on-failure-unittest.py
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking googletest-break-on-failure-unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
import os
from googletest.test import gtest_test_utils
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the googletest-break-on-failure-unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-break-on-failure-unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs googletest-break-on-failure-unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| 7,322 | 34.038278 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-throw-on-failure-test.py
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes googletest-throw-on-failure-test_ (a program written with
Google Test) with different environments and command line flags.
"""
import os
from googletest.test import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the googletest-throw-on-failure-test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-throw-on-failure-test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print('Running "%s". . .' % ' '.join(command))
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs googletest-throw-on-failure-test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True if and only if the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,658 | 32.485207 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/googletest-env-var-test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
from googletest.test import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test
TestFlag('fail_fast', '1', '0')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('brief', '1', '0')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,173 | 33.495868 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googletest/test/gtest_xml_output_unittest.py
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
from googletest.test import gtest_test_utils
from googletest.test import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="26" failures="5" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 2
 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
2
3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" result="suppressed" time="*" timestamp="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="OutputsCData" status="run" result="completed" time="*" timestamp="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="InvalidCharactersInMessage" status="run" result="completed" time="*" timestamp="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="DISABLED_test_not_run" status="notrun" result="suppressed" time="*" timestamp="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="SkippedTest" tests="3" failures="1" disabled="0" skipped="2" errors="0" time="*" timestamp="*">
<testcase name="Skipped" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest">
<skipped message="gtest_xml_output_unittest_.cc:*
"><![CDATA[gtest_xml_output_unittest_.cc:*
%(stack)s]]></skipped>
</testcase>
<testcase name="SkippedWithMessage" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest">
<skipped message="gtest_xml_output_unittest_.cc:*
It is good practice to tell why you skip a test."><![CDATA[gtest_xml_output_unittest_.cc:*
It is good practice to tell why you skip a test.%(stack)s]]></skipped>
</testcase>
<testcase name="SkippedAfterFailure" status="run" result="completed" time="*" timestamp="*" classname="SkippedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
<skipped message="gtest_xml_output_unittest_.cc:*
It is good practice to tell why you skip a test."><![CDATA[gtest_xml_output_unittest_.cc:*
It is good practice to tell why you skip a test.%(stack)s]]></skipped>
</testcase>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="OneProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
</properties>
</testcase>
<testcase name="IntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_int" value="1"/>
</properties>
</testcase>
<testcase name="ThreeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
<property name="key_2" value="2"/>
<property name="key_3" value="3"/>
</properties>
</testcase>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="RecordProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_int" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_string" value="1"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/1" />
</testsuite>
</testsuites>""" % {
'stack': STACK_TRACE_TEMPLATE
}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0"
errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_SHARDED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="IntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_int" value="1"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
</testsuites>"""
EXPECTED_NO_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
<testsuite name="NonTestSuiteFailure" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*">
<testcase name="" status="run" result="completed" time="*" timestamp="*" classname="">
<failure message="gtest_no_test_unittest.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_no_test_unittest.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
</testcase>
</testsuite>
</testsuites>""" % {
'stack': STACK_TRACE_TEMPLATE
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testNoTestXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an XML output for a binary without tests,
and tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_NO_TEST_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], {}, 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def testShardedTestXmlOutput(self):
"""Verifies XML output when run using multiple shards.
Runs a test program that executes only one shard and verifies that tests
from other shards do not show up in the XML output.
"""
self._TestXmlOutput(
GTEST_PROGRAM_NAME,
EXPECTED_SHARDED_TEST_XML,
0,
extra_env={SHARD_INDEX_ENV_VAR: '0',
TOTAL_SHARDS_ENV_VAR: '10'})
def _GetXmlOutput(self, gtest_prog_name, extra_args, extra_env,
expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
environ_copy = os.environ.copy()
if extra_env:
environ_copy.update(extra_env)
p = gtest_test_utils.Subprocess(command, env=environ_copy)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None, extra_env=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
extra_env or {}, expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 20,639 | 48.615385 | 225 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googlemock/test/gmock_leak_test.py
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
from googlemock.test import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| 4,357 | 40.504762 | 73 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googlemock/test/gmock_test_utils.py
|
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
import os
# pylint: disable=C6204
from googletest.test import gtest_test_utils
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| 3,218 | 32.185567 | 79 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/tests/rocm_smi_test/gtest/googlemock/test/gmock_output_test.py
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
from googlemock.test import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main()
| 6,175 | 32.565217 | 80 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/python_smi_tools/rsmiBindings.py
|
#!/usr/bin/env python3
"""ROCm_SMI_LIB CLI Tool Python Bindings"""
# TODO: Get most (or all) of these from rocm_smi.h to avoid mismatches and redundancy
from __future__ import print_function
import ctypes.util
from ctypes import *
from enum import Enum
import os
# Use ROCm installation path if running from standard installation
# With File Reorg rsmiBindings.py will be installed in /opt/rocm/libexec/rocm_smi.
# relative path changed accordingly
path_librocm = os.path.dirname(os.path.realpath(__file__)) + '/../../lib/librocm_smi64.so'
if not os.path.isfile(path_librocm):
print('Unable to find %s . Trying /opt/rocm*' % path_librocm)
for root, dirs, files in os.walk('/opt', followlinks=True):
if 'librocm_smi64.so' in files:
path_librocm = os.path.join(os.path.realpath(root), 'librocm_smi64.so')
if os.path.isfile(path_librocm):
print('Using lib from %s' % path_librocm)
else:
print('Unable to find librocm_smi64.so')
# ----------> TODO: Support static libs as well as SO
try:
cdll.LoadLibrary(path_librocm)
rocmsmi = CDLL(path_librocm)
except OSError:
print('Unable to load the rocm_smi library.\n'\
'Set LD_LIBRARY_PATH to the folder containing librocm_smi64.\n'\
'{0}Please refer to https://github.com/'\
'RadeonOpenCompute/rocm_smi_lib for the installation guide.{1}'\
.format('\33[33m', '\033[0m'))
exit()
# Device ID
dv_id = c_uint64()
# GPU ID
gpu_id = c_uint32(0)
# Policy enums
RSMI_MAX_NUM_FREQUENCIES = 32
RSMI_MAX_FAN_SPEED = 255
RSMI_NUM_VOLTAGE_CURVE_POINTS = 3
class rsmi_status_t(c_int):
RSMI_STATUS_SUCCESS = 0x0
RSMI_STATUS_INVALID_ARGS = 0x1
RSMI_STATUS_NOT_SUPPORTED = 0x2
RSMI_STATUS_FILE_ERROR = 0x3
RSMI_STATUS_PERMISSION = 0x4
RSMI_STATUS_OUT_OF_RESOURCES = 0x5
RSMI_STATUS_INTERNAL_EXCEPTION = 0x6
RSMI_STATUS_INPUT_OUT_OF_BOUNDS = 0x7
RSMI_STATUS_INIT_ERROR = 0x8
RSMI_INITIALIZATION_ERROR = RSMI_STATUS_INIT_ERROR
RSMI_STATUS_NOT_YET_IMPLEMENTED = 0x9
RSMI_STATUS_NOT_FOUND = 0xA
RSMI_STATUS_INSUFFICIENT_SIZE = 0xB
RSMI_STATUS_INTERRUPT = 0xC
RSMI_STATUS_UNEXPECTED_SIZE = 0xD
RSMI_STATUS_NO_DATA = 0xE
RSMI_STATUS_UNEXPECTED_DATA = 0xF
RSMI_STATUS_BUSY = 0x10
RSMI_STATUS_REFCOUNT_OVERFLOW = 0x11
RSMI_STATUS_SETTING_UNAVAILABLE = 0x12
RSMI_STATUS_AMDGPU_RESTART_ERR = 0x13
RSMI_STATUS_UNKNOWN_ERROR = 0xFFFFFFFF
#Dictionary of rsmi ret codes and it's verbose output
rsmi_status_verbose_err_out = {
rsmi_status_t.RSMI_STATUS_SUCCESS: 'Operation was successful',
rsmi_status_t.RSMI_STATUS_INVALID_ARGS: 'Invalid arguments provided',
rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED: 'Not supported on the given system',
rsmi_status_t.RSMI_STATUS_FILE_ERROR: 'Problem accessing a file',
rsmi_status_t.RSMI_STATUS_PERMISSION: 'Permission denied',
rsmi_status_t.RSMI_STATUS_OUT_OF_RESOURCES: 'Unable to acquire memory or other resource',
rsmi_status_t.RSMI_STATUS_INTERNAL_EXCEPTION: 'An internal exception was caught',
rsmi_status_t.RSMI_STATUS_INPUT_OUT_OF_BOUNDS: 'Provided input is out of allowable or safe range',
rsmi_status_t.RSMI_INITIALIZATION_ERROR: 'Error occured during rsmi initialization',
rsmi_status_t.RSMI_STATUS_NOT_YET_IMPLEMENTED: 'Requested function is not implemented on this setup',
rsmi_status_t.RSMI_STATUS_NOT_FOUND: 'Item searched for but not found',
rsmi_status_t.RSMI_STATUS_INSUFFICIENT_SIZE: 'Insufficient resources available',
rsmi_status_t.RSMI_STATUS_INTERRUPT: 'Interrupt occured during execution',
rsmi_status_t.RSMI_STATUS_UNEXPECTED_SIZE: 'Unexpected amount of data read',
rsmi_status_t.RSMI_STATUS_NO_DATA: 'No data found for the given input',
rsmi_status_t.RSMI_STATUS_UNEXPECTED_DATA: 'Unexpected data received',
rsmi_status_t.RSMI_STATUS_BUSY: 'Busy - resources are preventing call the ability to execute',
rsmi_status_t.RSMI_STATUS_REFCOUNT_OVERFLOW: 'Data overflow - data exceeded INT32_MAX',
rsmi_status_t.RSMI_STATUS_SETTING_UNAVAILABLE: 'Requested setting is unavailable for current device',
rsmi_status_t.RSMI_STATUS_AMDGPU_RESTART_ERR: 'Could not successfully restart the amdgpu driver',
rsmi_status_t.RSMI_STATUS_UNKNOWN_ERROR: 'Unknown error occured'
}
class rsmi_init_flags_t(c_int):
RSMI_INIT_FLAG_ALL_GPUS = 0x1
class rsmi_dev_perf_level_t(c_int):
RSMI_DEV_PERF_LEVEL_AUTO = 0
RSMI_DEV_PERF_LEVEL_FIRST = RSMI_DEV_PERF_LEVEL_AUTO
RSMI_DEV_PERF_LEVEL_LOW = 1
RSMI_DEV_PERF_LEVEL_HIGH = 2
RSMI_DEV_PERF_LEVEL_MANUAL = 3
RSMI_DEV_PERF_LEVEL_STABLE_STD = 4
RSMI_DEV_PERF_LEVEL_STABLE_PEAK = 5
RSMI_DEV_PERF_LEVEL_STABLE_MIN_MCLK = 6
RSMI_DEV_PERF_LEVEL_STABLE_MIN_SCLK = 7
RSMI_DEV_PERF_LEVEL_DETERMINISM = 8
RSMI_DEV_PERF_LEVEL_LAST = RSMI_DEV_PERF_LEVEL_DETERMINISM
RSMI_DEV_PERF_LEVEL_UNKNOWN = 0x100
notification_type_names = ['VM_FAULT', 'THERMAL_THROTTLE', 'GPU_RESET']
class rsmi_evt_notification_type_t(c_int):
RSMI_EVT_NOTIF_VMFAULT = 0
RSMI_EVT_NOTIF_FIRST = RSMI_EVT_NOTIF_VMFAULT
RSMI_EVT_NOTIF_THERMAL_THROTTLE = 1
RSMI_EVT_NOTIF_GPU_PRE_RESET = 2
RSMI_EVT_NOTIF_GPU_POST_RESET = 3
RSMI_EVT_NOTIF_LAST = RSMI_EVT_NOTIF_GPU_POST_RESET
class rsmi_voltage_metric_t(c_int):
RSMI_VOLT_CURRENT = 0
RSMI_VOLT_FIRST = RSMI_VOLT_CURRENT
RSMI_VOLT_MAX = 1
RSMI_VOLT_MIN_CRIT = 2
RSMI_VOLT_MIN = 3
RSMI_VOLT_MAX_CRIT = 4
RSMI_VOLT_AVERAGE = 5
RSMI_VOLT_LOWEST = 6
RSMI_VOLT_HIGHEST = 7
RSMI_VOLT_LAST = RSMI_VOLT_HIGHEST
RSMI_VOLT_UNKNOWN = 0x100
class rsmi_voltage_type_t(c_int):
RSMI_VOLT_TYPE_FIRST = 0
RSMI_VOLT_TYPE_VDDGFX = RSMI_VOLT_TYPE_FIRST
RSMI_VOLT_TYPE_LAST = RSMI_VOLT_TYPE_VDDGFX
RSMI_VOLT_TYPE_INVALID = 0xFFFFFFFF
# The perf_level_string is correlated to rsmi_dev_perf_level_t
def perf_level_string(i):
switcher = {
0: 'AUTO',
1: 'LOW',
2: 'HIGH',
3: 'MANUAL',
4: 'STABLE_STD',
5: 'STABLE_PEAK',
6: 'STABLE_MIN_MCLK',
7: 'STABLE_MIN_SCLK',
8: 'PERF_DETERMINISM',
}
return switcher.get(i, 'UNKNOWN')
rsmi_dev_perf_level = rsmi_dev_perf_level_t
class rsmi_sw_component_t(c_int):
RSMI_SW_COMP_FIRST = 0x0
RSMI_SW_COMP_DRIVER = RSMI_SW_COMP_FIRST
RSMI_SW_COMP_LAST = RSMI_SW_COMP_DRIVER
rsmi_event_handle_t = POINTER(c_uint)
class rsmi_event_group_t(Enum):
RSMI_EVNT_GRP_XGMI = 0
RSMI_EVNT_GRP_XGMI_DATA_OUT = 10
RSMI_EVNT_GRP_INVALID = 0xFFFFFFFF
class rsmi_event_type_t(c_int):
RSMI_EVNT_FIRST = rsmi_event_group_t.RSMI_EVNT_GRP_XGMI
RSMI_EVNT_XGMI_FIRST = rsmi_event_group_t.RSMI_EVNT_GRP_XGMI
RSMI_EVNT_XGMI_0_NOP_TX = RSMI_EVNT_XGMI_FIRST
RSMI_EVNT_XGMI_0_REQUEST_TX = 1
RSMI_EVNT_XGMI_0_RESPONSE_TX = 2
RSMI_EVNT_XGMI_0_BEATS_TX = 3
RSMI_EVNT_XGMI_1_NOP_TX = 4
RSMI_EVNT_XGMI_1_REQUEST_TX = 5
RSMI_EVNT_XGMI_1_RESPONSE_TX = 6
RSMI_EVNT_XGMI_1_BEATS_TX = 7
RSMI_EVNT_XGMI_LAST = RSMI_EVNT_XGMI_1_BEATS_TX
RSMI_EVNT_XGMI_DATA_OUT_FIRST = rsmi_event_group_t.RSMI_EVNT_GRP_XGMI_DATA_OUT
RSMI_EVNT_XGMI_DATA_OUT_0 = RSMI_EVNT_XGMI_DATA_OUT_FIRST
RSMI_EVNT_XGMI_DATA_OUT_1 = 11
RSMI_EVNT_XGMI_DATA_OUT_2 = 12
RSMI_EVNT_XGMI_DATA_OUT_3 = 13
RSMI_EVNT_XGMI_DATA_OUT_4 = 14
RSMI_EVNT_XGMI_DATA_OUT_5 = 15
RSMI_EVNT_XGMI_DATA_OUT_LAST = RSMI_EVNT_XGMI_DATA_OUT_5
RSMI_EVNT_LAST = RSMI_EVNT_XGMI_DATA_OUT_LAST,
class rsmi_counter_command_t(c_int):
RSMI_CNTR_CMD_START = 0
RSMI_CNTR_CMD_STOP = 1
class rsmi_counter_value_t(Structure):
_fields_ = [('value', c_uint64),
('time_enabled', c_uint64),
('time_running', c_uint64)]
class rsmi_clk_type_t(c_int):
RSMI_CLK_TYPE_SYS = 0x0
RSMI_CLK_TYPE_FIRST = RSMI_CLK_TYPE_SYS
RSMI_CLK_TYPE_DF = 0x1
RSMI_CLK_TYPE_DCEF = 0x2
RSMI_CLK_TYPE_SOC = 0x3
RSMI_CLK_TYPE_MEM = 0x4
RSMI_CLK_TYPE_LAST = RSMI_CLK_TYPE_MEM
RSMI_CLK_INVALID = 0xFFFFFFFF
# Clock names here are correlated to the rsmi_clk_type_t values above
clk_type_names = ['sclk', 'sclk', 'fclk', 'dcefclk',\
'socclk', 'mclk', 'mclk', 'invalid']
rsmi_clk_type_dict = {'RSMI_CLK_TYPE_SYS': 0x0, 'RSMI_CLK_TYPE_FIRST': 0x0,\
'RSMI_CLK_TYPE_DF': 0x1, 'RSMI_CLK_TYPE_DCEF': 0x2,\
'RSMI_CLK_TYPE_SOC': 0x3, 'RSMI_CLK_TYPE_MEM': 0x4,\
'RSMI_CLK_TYPE_LAST': 0X4, 'RSMI_CLK_INVALID': 0xFFFFFFFF}
rsmi_clk_names_dict = {'sclk': 0x0, 'fclk': 0x1, 'dcefclk': 0x2,\
'socclk': 0x3, 'mclk': 0x4}
rsmi_clk_type = rsmi_clk_type_t
class rsmi_temperature_metric_t(c_int):
RSMI_TEMP_CURRENT = 0x0
RSMI_TEMP_FIRST = RSMI_TEMP_CURRENT
RSMI_TEMP_MAX = 0x1
RSMI_TEMP_MIN = 0x2
RSMI_TEMP_MAX_HYST = 0x3
RSMI_TEMP_MIN_HYST = 0x4
RSMI_TEMP_CRITICAL = 0x5
RSMI_TEMP_CRITICAL_HYST = 0x6
RSMI_TEMP_EMERGENCY = 0x7
RSMI_TEMP_EMERGENCY_HYST = 0x8
RSMI_TEMP_CRIT_MIN = 0x9
RSMI_TEMP_CRIT_MIN_HYST = 0xA
RSMI_TEMP_OFFSET = 0xB
RSMI_TEMP_LOWEST = 0xC
RSMI_TEMP_HIGHEST = 0xD
RSMI_TEMP_LAST = RSMI_TEMP_HIGHEST
rsmi_temperature_metric = rsmi_temperature_metric_t
class rsmi_temperature_type_t(c_int):
RSMI_TEMP_TYPE_FIRST = 0
RSMI_TEMP_TYPE_EDGE = RSMI_TEMP_TYPE_FIRST
RSMI_TEMP_TYPE_JUNCTION = 1
RSMI_TEMP_TYPE_MEMORY = 2
RSMI_TEMP_TYPE_HBM_0 = 3
RSMI_TEMP_TYPE_HBM_1 = 4
RSMI_TEMP_TYPE_HBM_2 = 5
RSMI_TEMP_TYPE_HBM_3 = 6
RSMI_TEMP_TYPE_LAST = RSMI_TEMP_TYPE_HBM_3
# temp_type_lst list correlates to rsmi_temperature_type_t
temp_type_lst = ['edge', 'junction', 'memory', 'HBM 0', 'HBM 1', 'HBM 2', 'HBM 3']
class rsmi_power_profile_preset_masks_t(c_uint64):
RSMI_PWR_PROF_PRST_CUSTOM_MASK = 0x1
RSMI_PWR_PROF_PRST_VIDEO_MASK = 0x2
RSMI_PWR_PROF_PRST_POWER_SAVING_MASK = 0x4
RSMI_PWR_PROF_PRST_COMPUTE_MASK = 0x8
RSMI_PWR_PROF_PRST_VR_MASK = 0x10
RSMI_PWR_PROF_PRST_3D_FULL_SCR_MASK = 0x20
RSMI_PWR_PROF_PRST_BOOTUP_DEFAULT = 0x40
RSMI_PWR_PROF_PRST_LAST = RSMI_PWR_PROF_PRST_BOOTUP_DEFAULT
RSMI_PWR_PROF_PRST_INVALID = 0xFFFFFFFFFFFFFFFF
rsmi_power_profile_preset_masks = rsmi_power_profile_preset_masks_t
class rsmi_gpu_block_t(c_int):
RSMI_GPU_BLOCK_INVALID = 0x0000000000000000
RSMI_GPU_BLOCK_FIRST = 0x0000000000000001
RSMI_GPU_BLOCK_UMC = RSMI_GPU_BLOCK_FIRST
RSMI_GPU_BLOCK_SDMA = 0x0000000000000002
RSMI_GPU_BLOCK_GFX = 0x0000000000000004
RSMI_GPU_BLOCK_MMHUB = 0x0000000000000008
RSMI_GPU_BLOCK_ATHUB = 0x0000000000000010
RSMI_GPU_BLOCK_PCIE_BIF = 0x0000000000000020
RSMI_GPU_BLOCK_HDP = 0x0000000000000040
RSMI_GPU_BLOCK_XGMI_WAFL = 0x0000000000000080
RSMI_GPU_BLOCK_DF = 0x0000000000000100
RSMI_GPU_BLOCK_SMN = 0x0000000000000200
RSMI_GPU_BLOCK_SEM = 0x0000000000000400
RSMI_GPU_BLOCK_MP0 = 0x0000000000000800
RSMI_GPU_BLOCK_MP1 = 0x0000000000001000
RSMI_GPU_BLOCK_FUSE = 0x0000000000002000
RSMI_GPU_BLOCK_LAST = RSMI_GPU_BLOCK_FUSE
RSMI_GPU_BLOCK_RESERVED = 0x8000000000000000
rsmi_gpu_block = rsmi_gpu_block_t
# The following dictionary correlates with rsmi_gpu_block_t enum
rsmi_gpu_block_d = {
'UMC' : 0x0000000000000001,
'SDMA' : 0x0000000000000002,
'GFX' : 0x0000000000000004,
'MMHUB': 0x0000000000000008,
'ATHUB': 0x0000000000000010,
'PCIE_BIF': 0x0000000000000020,
'HDP': 0x0000000000000040,
'XGMI_WAFL': 0x0000000000000080,
'DF': 0x0000000000000100,
'SMN': 0x0000000000000200,
'SEM': 0x0000000000000400,
'MP0': 0x0000000000000800,
'MP1': 0x0000000000001000,
'FUSE': 0x0000000000002000
}
class rsmi_ras_err_state_t(c_int):
RSMI_RAS_ERR_STATE_NONE = 0
RSMI_RAS_ERR_STATE_DISABLED = 1
RSMI_RAS_ERR_STATE_PARITY = 2
RSMI_RAS_ERR_STATE_SING_C = 3
RSMI_RAS_ERR_STATE_MULT_UC = 4
RSMI_RAS_ERR_STATE_POISON = 5
RSMI_RAS_ERR_STATE_ENABLED = 6
RSMI_RAS_ERR_STATE_LAST = RSMI_RAS_ERR_STATE_ENABLED
RSMI_RAS_ERR_STATE_INVALID = 0xFFFFFFFF
# Error type list correlates to rsmi_ras_err_state_t
rsmi_ras_err_stale_readable = ['no errors', 'ECC disabled',
'unknown type err', 'single correctable err',
'multiple uncorrectable err',
'page isolated, treat as uncorrectable err',
'ECC enabled', 'status invalid']
rsmi_ras_err_stale_machine = ['none', 'disabled', 'unknown error',
'sing', 'mult', 'position', 'enabled']
validRasTypes = ['ue', 'ce']
validRasActions = ['disable', 'enable', 'inject']
validRasBlocks = ['fuse', 'mp1', 'mp0', 'sem', 'smn', 'df', 'xgmi_wafl', 'hdp', 'pcie_bif',
'athub', 'mmhub', 'gfx', 'sdma', 'umc']
class rsmi_memory_type_t(c_int):
RSMI_MEM_TYPE_FIRST = 0
RSMI_MEM_TYPE_VRAM = RSMI_MEM_TYPE_FIRST
RSMI_MEM_TYPE_VIS_VRAM = 1
RSMI_MEM_TYPE_GTT = 2
RSMI_MEM_TYPE_LAST = RSMI_MEM_TYPE_GTT
# memory_type_l includes names for with rsmi_memory_type_t
# Usage example to get corresponding names:
# memory_type_l[rsmi_memory_type_t.RSMI_MEM_TYPE_VRAM] will return string 'vram'
memory_type_l = ['VRAM', 'VIS_VRAM', 'GTT']
class rsmi_freq_ind_t(c_int):
RSMI_FREQ_IND_MIN = 0
RSMI_FREQ_IND_MAX = 1
RSMI_FREQ_IND_INVALID = 0xFFFFFFFF
rsmi_freq_ind = rsmi_freq_ind_t
class rsmi_fw_block_t(c_int):
RSMI_FW_BLOCK_FIRST = 0
RSMI_FW_BLOCK_ASD = RSMI_FW_BLOCK_FIRST
RSMI_FW_BLOCK_CE = 1
RSMI_FW_BLOCK_DMCU = 2
RSMI_FW_BLOCK_MC = 3
RSMI_FW_BLOCK_ME = 4
RSMI_FW_BLOCK_MEC = 5
RSMI_FW_BLOCK_MEC2 = 6
RSMI_FW_BLOCK_PFP = 7
RSMI_FW_BLOCK_RLC = 8
RSMI_FW_BLOCK_RLC_SRLC = 9
RSMI_FW_BLOCK_RLC_SRLG = 10
RSMI_FW_BLOCK_RLC_SRLS = 11
RSMI_FW_BLOCK_SDMA = 12
RSMI_FW_BLOCK_SDMA2 = 13
RSMI_FW_BLOCK_SMC = 14
RSMI_FW_BLOCK_SOS = 15
RSMI_FW_BLOCK_TA_RAS = 16
RSMI_FW_BLOCK_TA_XGMI = 17
RSMI_FW_BLOCK_UVD = 18
RSMI_FW_BLOCK_VCE = 19
RSMI_FW_BLOCK_VCN = 20
RSMI_FW_BLOCK_LAST = RSMI_FW_BLOCK_VCN
# The following list correlated to the rsmi_fw_block_t
fw_block_names_l = ['ASD', 'CE', 'DMCU', 'MC', 'ME', 'MEC', 'MEC2', 'PFP',\
'RLC', 'RLC SRLC', 'RLC SRLG', 'RLC SRLS', 'SDMA', 'SDMA2',\
'SMC', 'SOS', 'TA RAS', 'TA XGMI', 'UVD', 'VCE', 'VCN']
rsmi_bit_field_t = c_uint64()
rsmi_bit_field = rsmi_bit_field_t
class rsmi_utilization_counter_type(c_int):
RSMI_UTILIZATION_COUNTER_FIRST = 0
RSMI_COARSE_GRAIN_GFX_ACTIVITY = RSMI_UTILIZATION_COUNTER_FIRST
RSMI_COARSE_GRAIN_MEM_ACTIVITY = 1
RSMI_UTILIZATION_COUNTER_LAST = RSMI_COARSE_GRAIN_MEM_ACTIVITY
utilization_counter_name = ['GFX Activity', 'Memory Activity']
class rsmi_utilization_counter_t(Structure):
_fields_ = [('type', c_int),
('val', c_uint64)]
class rsmi_xgmi_status_t(c_int):
RSMI_XGMI_STATUS_NO_ERRORS = 0
RSMI_XGMI_STATUS_ERROR = 1
RSMI_XGMI_STATUS_MULTIPLE_ERRORS = 2
class rsmi_memory_page_status_t(c_int):
RSMI_MEM_PAGE_STATUS_RESERVED = 0
RSMI_MEM_PAGE_STATUS_PENDING = 1
RSMI_MEM_PAGE_STATUS_UNRESERVABLE = 2
memory_page_status_l = ['reserved', 'pending', 'unreservable']
class rsmi_retired_page_record_t(Structure):
_fields_ = [('page_address', c_uint64),
('page_size', c_uint64),
('status', c_int)]
RSMI_MAX_NUM_POWER_PROFILES = (sizeof(rsmi_bit_field_t) * 8)
class rsmi_power_profile_status_t(Structure):
_fields_ = [('available_profiles', c_uint32),
('current', c_uint64),
('num_profiles', c_uint32)]
rsmi_power_profile_status = rsmi_power_profile_status_t
class rsmi_frequencies_t(Structure):
_fields_ = [('num_supported', c_int32),
('current', c_uint32),
('frequency', c_uint64 * RSMI_MAX_NUM_FREQUENCIES)]
rsmi_frequencies = rsmi_frequencies_t
class rsmi_pcie_bandwidth_t(Structure):
_fields_ = [('transfer_rate', rsmi_frequencies_t),
('lanes', c_uint32 * RSMI_MAX_NUM_FREQUENCIES)]
rsmi_pcie_bandwidth = rsmi_pcie_bandwidth_t
class rsmi_version_t(Structure):
_fields_ = [('major', c_uint32),
('minor', c_uint32),
('patch', c_uint32),
('build', c_char_p)]
rsmi_version = rsmi_version_t
class rsmi_range_t(Structure):
_fields_ = [('lower_bound', c_uint64),
('upper_bound', c_uint64)]
rsmi_range = rsmi_range_t
class rsmi_od_vddc_point_t(Structure):
_fields_ = [('frequency', c_uint64),
('voltage', c_uint64)]
rsmi_od_vddc_point = rsmi_od_vddc_point_t
class rsmi_freq_volt_region_t(Structure):
_fields_ = [('freq_range', rsmi_range_t),
('volt_range', rsmi_range_t)]
rsmi_freq_volt_region = rsmi_freq_volt_region_t
class rsmi_od_volt_curve_t(Structure):
_fields_ = [('vc_points', rsmi_od_vddc_point_t *\
RSMI_NUM_VOLTAGE_CURVE_POINTS)]
rsmi_od_volt_curve = rsmi_od_volt_curve_t
class rsmi_od_volt_freq_data_t(Structure):
_fields_ = [('curr_sclk_range', rsmi_range_t),
('curr_mclk_range', rsmi_range_t),
('sclk_freq_limits', rsmi_range_t),
('mclk_freq_limits', rsmi_range_t),
('curve', rsmi_od_volt_curve_t),
('num_regions', c_uint32)]
rsmi_od_volt_freq_data = rsmi_od_volt_freq_data_t
class rsmi_error_count_t(Structure):
_fields_ = [('correctable_err', c_uint64),
('uncorrectable_err', c_uint64)]
class rsmi_evt_notification_data_t(Structure):
_fields_ = [('dv_ind', c_uint32),
('event', rsmi_evt_notification_type_t),
('message', c_char*64)]
class rsmi_process_info_t(Structure):
_fields_ = [('process_id', c_uint32),
('pasid', c_uint32),
('vram_usage', c_uint64),
('sdma_usage', c_uint64),
('cu_occupancy', c_uint32)]
class rsmi_func_id_iter_handle(Structure):
_fields_ = [('func_id_iter', POINTER(c_uint)),
('container_ptr', POINTER(c_uint)),
('id_type', c_uint32)]
rsmi_func_id_iter_handle_t = POINTER(rsmi_func_id_iter_handle)
RSMI_DEFAULT_VARIANT = 0xFFFFFFFFFFFFFFFF
class submodule_union(Union):
_fields_ = [('memory_type', c_int), # rsmi_memory_type_t,
('temp_metric', c_int), # rsmi_temperature_metric_t,
('evnt_type', c_int), # rsmi_event_type_t,
('evnt_group', c_int), # rsmi_event_group_t,
('clk_type', c_int), # rsmi_clk_type_t,
('fw_block', c_int), # rsmi_fw_block_t,
('gpu_block_type', c_int)] # rsmi_gpu_block_t
class rsmi_func_id_value_t(Union):
_fields_ = [('id', c_uint64),
('name', c_char_p),
('submodule', submodule_union)]
class rsmi_compute_partition_type_t(c_int):
RSMI_COMPUTE_PARTITION_INVALID = 0
RSMI_COMPUTE_PARTITION_CPX = 1
RSMI_COMPUTE_PARTITION_SPX = 2
RSMI_COMPUTE_PARTITION_DPX = 3
RSMI_COMPUTE_PARTITION_TPX = 4
RSMI_COMPUTE_PARTITION_QPX = 5
rsmi_compute_partition_type_dict = {
#'RSMI_COMPUTE_PARTITION_INVALID': 0,
'CPX': 1,
'SPX': 2,
'DPX': 3,
'TPX': 4,
'QPX': 5
}
rsmi_compute_partition_type = rsmi_compute_partition_type_t
# compute_partition_type_l includes string names for the rsmi_compute_partition_type_t
# Usage example to get corresponding names:
# compute_partition_type_l[rsmi_compute_partition_type_t.RSMI_COMPUTE_PARTITION_CPX]
# will return string 'CPX'
compute_partition_type_l = ['CPX', 'SPX', 'DPX', 'TPX', 'QPX']
class rsmi_nps_mode_type_t(c_int):
RSMI_MEMORY_PARTITION_UNKNOWN = 0
RSMI_MEMORY_PARTITION_NPS1 = 1
RSMI_MEMORY_PARTITION_NPS2 = 2
RSMI_MEMORY_PARTITION_NPS4 = 3
RSMI_MEMORY_PARTITION_NPS8 = 4
rsmi_nps_mode_type_dict = {
'NPS1': 1,
'NPS2': 2,
'NPS4': 3,
'NPS8': 4
}
rsmi_nps_mode_type = rsmi_nps_mode_type_t
# nps_mode_type_l includes string names for the rsmi_compute_partition_type_t
# Usage example to get corresponding names:
# nps_mode_type_l[rsmi_nps_mode_type_t.RSMI_MEMORY_PARTITION_NPS2]
# will return string 'NPS2'
nps_mode_type_l = ['NPS1', 'NPS2', 'NPS4', 'NPS8']
| 20,511 | 31 | 105 |
py
|
rocm_smi_lib
|
rocm_smi_lib-master/python_smi_tools/rocm_smi.py
|
#!/usr/bin/env python3
"""ROCm_SMI_LIB CLI Tool
This tool acts as a command line interface for manipulating
and monitoring the amdgpu kernel, and is intended to replace
and deprecate the existing rocm_smi.py CLI tool.
It uses Ctypes to call the rocm_smi_lib API.
Recommended: At least one AMD GPU with ROCm driver installed
Required: ROCm SMI library installed (librocm_smi64)
"""
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import subprocess
import _thread
import time
import multiprocessing
import trace
from io import StringIO
from time import ctime
from subprocess import check_output
from rsmiBindings import *
# rocmSmiLib_cli version. Increment this as needed.
# Major version - Increment when backwards-compatibility breaks
# Minor version - Increment when adding a new feature, set to 0 when major is incremented
# Patch version - Increment when adding a fix, set to 0 when minor is incremented
SMI_MAJ = 1
SMI_MIN = 4
SMI_PAT = 1
__version__ = '%s.%s.%s' % (SMI_MAJ, SMI_MIN, SMI_PAT)
# Set to 1 if an error occurs
RETCODE = 0
# If we want JSON format output instead
PRINT_JSON = False
JSON_DATA = {}
# Version of the JSON output used to save clocks
CLOCK_JSON_VERSION = 1
headerString = ' ROCm System Management Interface '
footerString = ' End of ROCm SMI Log '
# Output formatting
appWidth = 84
deviceList = []
# Enable or disable serialized format
OUTPUT_SERIALIZATION = False
# These are the valid clock types that can be returned/modified:
# TODO: "clk_type_names" from rsmiBindings.py should fetch valid clocks from
# the same location as rocm_smi_device.cc instead of hardcoding the values
validClockNames = clk_type_names[1:-2]
# The purpose of the [1:-2] here ^^^^ is to remove the duplicate elements at the
# beginning and end of the clk_type_names list (specifically sclk and mclk)
# Also the "invalid" clock in the list is removed since it isn't a valid clock type
validClockNames.append('pcie')
validClockNames.sort()
def driverInitialized():
""" Returns true if amdgpu is found in the list of initialized modules
"""
driverInitialized = ''
try:
driverInitialized = str(subprocess.check_output("cat /sys/module/amdgpu/initstate |grep live", shell=True))
except subprocess.CalledProcessError:
pass
if len(driverInitialized) > 0:
return True
return False
def formatJson(device, log):
""" Print out in JSON format
@param device: DRM device identifier
@param log: String to parse and output into JSON format
"""
global JSON_DATA
for line in log.splitlines():
# Drop any invalid or improperly-formatted data
if ':' not in line:
continue
logTuple = line.split(': ')
if str(device) != 'system':
JSON_DATA['card' + str(device)][logTuple[0]] = logTuple[1].strip()
else:
JSON_DATA['system'][logTuple[0]] = logTuple[1].strip()
def formatCsv(deviceList):
""" Print out the JSON_DATA in CSV format """
global JSON_DATA
jsondata = json.dumps(JSON_DATA)
outstr = jsondata
# Check if the first json data element is 'system' or 'device'
outputType = outstr[outstr.find('\"')+1:]
outputType = outputType[:outputType.find('\"')]
header = []
my_string = ''
if outputType != 'system':
header.append('device')
else:
header.append('system')
if outputType == 'system':
jsonobj = json.loads(jsondata)
keylist = header
for record in jsonobj:
my_string += str(record)
for key in keylist:
if key == 'system':
tempstr = str(jsonobj[record])
tempstr = tempstr[tempstr.find('\'')+1:]
tempstr = tempstr[:tempstr.find('\'')]
# Force output device type to 'system'
my_string += ',%s\nsystem,%s' % (tempstr, jsonobj[record][tempstr])
my_string += '\n'
# Force output device type to 'system'
if my_string.startswith('system'):
my_string = 'device' + my_string[6:]
return my_string
headerkeys = []
# Separate device-specific information from system-level information
for dev in deviceList:
if str(dev) != 'system':
headerkeys.extend(l for l in JSON_DATA['card' + str(dev)].keys() if l not in headerkeys)
else:
headerkeys.extend(l for l in JSON_DATA['system'].keys() if l not in headerkeys)
header.extend(headerkeys)
outStr = '%s\n' % ','.join(header)
if len(header) <= 1:
return ''
for dev in deviceList:
if str(dev) != 'system':
outStr += 'card%s,' % dev
else:
outStr += 'system,'
for val in headerkeys:
try:
if str(dev) != 'system':
# Remove commas like the ones in PCIe speed
outStr += '%s,' % JSON_DATA['card' + str(dev)][val].replace(',', '')
else:
outStr += '%s,' % JSON_DATA['system'][val].replace(',', '')
except KeyError as e:
# If the key doesn't exist (like dcefclock on Fiji, or unsupported functionality)
outStr += 'N/A,'
# Drop the trailing ',' and replace it with a \n
outStr = '%s\n' % outStr[0:-1]
return outStr
def formatMatrixToJSON(deviceList, matrix, metricName):
""" Format symmetric matrix of GPU permutations to become JSON print-ready.
@param deviceList: List of DRM devices (can be a single-item list)
@param metricName: Title of the item to print to the log
@param matrix: symmetric matrix full of values of every permutation of DRM devices.
example:
GPU0 GPU1
GPU0 0 40
GPU1 40 0
Where matrix content is: [[0, 40], [40, 0]]
"""
devices_ind = range(len(deviceList))
for row_indx in devices_ind:
# Start at row_indx +1 to avoid printing repeated values ( GPU1 x GPU2 is the same as GPU2 x GPU1 )
for col_ind in range(row_indx + 1, len(deviceList)):
try:
valueStr = matrix[deviceList[row_indx]][deviceList[col_ind]].value
except AttributeError:
valueStr = matrix[deviceList[row_indx]][deviceList[col_ind]]
printSysLog(metricName.format(deviceList[row_indx], deviceList[col_ind]), valueStr)
def getBus(device):
""" Return the bus identifier of a given device
@param device: DRM device identifier
"""
bdfid = c_uint64(0)
ret = rocmsmi.rsmi_dev_pci_id_get(device, byref(bdfid))
# BDFID = ((DOMAIN & 0xffffffff) << 32) | ((BUS & 0xff) << 8) |((DEVICE & 0x1f) <<3 ) | (FUNCTION & 0x7)
domain = (bdfid.value >> 32) & 0xffffffff
bus = (bdfid.value >> 8) & 0xff
device = (bdfid.value >> 3) & 0x1f
function = bdfid.value & 0x7
pic_id = '{:04X}:{:02X}:{:02X}.{:0X}'.format(domain, bus, device, function)
if rsmi_ret_ok(ret, device, 'get_pci_id'):
return pic_id
def getFanSpeed(device):
""" Return a tuple with the fan speed (value,%) for a specified device,
or (None,None) if either current fan speed or max fan speed cannot be
obtained
@param device: DRM device identifier
"""
fanLevel = c_int64()
fanMax = c_int64()
sensor_ind = c_uint32(0)
fl = 0
fm = 0
ret = rocmsmi.rsmi_dev_fan_speed_get(device, sensor_ind, byref(fanLevel))
if rsmi_ret_ok(ret, device, 'get_fan_speed', True):
fl = fanLevel.value
ret = rocmsmi.rsmi_dev_fan_speed_max_get(device, sensor_ind, byref(fanMax))
if rsmi_ret_ok(ret, device, 'get_fan_max_speed', True):
fm = fanMax.value
if fl == 0 or fm == 0:
return (fl, 0) # to prevent division by zero crash
return (fl, round((float(fl) / float(fm)) * 100, 2))
def getGpuUse(device):
""" Return the current GPU usage as a percentage
@param device: DRM device identifier
"""
percent = c_uint32()
ret = rocmsmi.rsmi_dev_busy_percent_get(device, byref(percent))
if rsmi_ret_ok(ret, device, 'GPU Utilization '):
return percent.value
return -1
def getId(device):
""" Return the hexadecimal value of a device's ID
@param device: DRM device identifier
"""
dv_id = c_short()
ret = rocmsmi.rsmi_dev_id_get(device, byref(dv_id))
if rsmi_ret_ok(ret, device, 'get_device_id'):
return hex(dv_id.value)
def getMaxPower(device):
""" Return the maximum power cap of a given device
@param device: DRM device identifier
"""
power_cap = c_uint64()
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(power_cap))
if rsmi_ret_ok(ret, device, 'get_power_cap'):
return power_cap.value / 1000000
return -1
def getMemInfo(device, memType, quiet=False):
""" Returns a tuple of (memory_used, memory_total) of
the requested memory type usage for the device specified
@param device: DRM device identifier
@param type: [vram|vis_vram|gtt] Memory type to return
@param quiet=Turn on to silience error output
(you plan to handle manually). Default is off,
which exposes any issue accessing the different
memory types.
"""
memType = memType.upper()
if memType not in memory_type_l:
printErrLog(device, 'Invalid memory type %s' % (memType))
return (None, None)
memoryUse = c_uint64()
memoryTot = c_uint64()
memUsed = None
memTotal = None
ret = rocmsmi.rsmi_dev_memory_usage_get(device, memory_type_l.index(memType), byref(memoryUse))
if rsmi_ret_ok(ret, device, 'get_memory_usage_' + str(memType), quiet):
memUsed = memoryUse.value
ret = rocmsmi.rsmi_dev_memory_total_get(device, memory_type_l.index(memType), byref(memoryTot))
if rsmi_ret_ok(ret, device, 'get_memory_total_' + str(memType), quiet):
memTotal = memoryTot.value
return (memUsed, memTotal)
def getProcessName(pid):
""" Get the process name of a specific pid
@param pid: Process ID of a program to be parsed
"""
if int(pid) < 1:
logging.debug('PID must be greater than 0')
return 'UNKNOWN'
try:
pName = str(subprocess.check_output("ps -p %d -o comm=" % (int(pid)), shell=True))
except subprocess.CalledProcessError as e:
pName = 'UNKNOWN'
if pName == None:
pName = 'UNKNOWN'
# Remove the substrings surrounding from process name (b' and \n')
if str(pName).startswith('b\''):
pName = pName[2:]
if str(pName).endswith('\\n\''):
pName = pName[:-3]
return pName
def getPerfLevel(device):
""" Return the current performance level of a given device
@param device: DRM device identifier
"""
perf = rsmi_dev_perf_level_t()
ret = rocmsmi.rsmi_dev_perf_level_get(device, byref(perf))
if rsmi_ret_ok(ret, device, 'get_perf_level'):
return perf_level_string(perf.value)
return -1
def getPid(name):
""" Get the process id of a specific application
@param name: Process name of a program to be parsed
"""
return check_output(['pidof', name])
def getPidList():
""" Return a list of KFD process IDs """
num_items = c_uint32()
ret = rocmsmi.rsmi_compute_process_info_get(None, byref(num_items))
if rsmi_ret_ok(ret, metric='get_compute_process_info'):
buff_sz = num_items.value + 10
procs = (rsmi_process_info_t * buff_sz)()
procList = []
ret = rocmsmi.rsmi_compute_process_info_get(byref(procs), byref(num_items))
for i in range(num_items.value):
procList.append('%s' % (procs[i].process_id))
return procList
return
def getPower(device):
""" Return the current power level of a given device
@param device: DRM device identifier
"""
power = c_uint32()
ret = rocmsmi.rsmi_dev_power_ave_get(device, 0, byref(power))
if rsmi_ret_ok(ret, device, 'get_power_avg'):
return power.value / 1000000
return 'N/A'
def getRasEnablement(device, block):
""" Return RAS enablement state for a given device
@param device: DRM device identifier
@param block: RAS block identifier
"""
state = rsmi_ras_err_state_t()
ret = rocmsmi.rsmi_dev_ecc_status_get(device, rsmi_gpu_block_d[block], byref(state))
if rsmi_ret_ok(ret, device, 'get_ecc_status_' + str(block), True):
return rsmi_ras_err_stale_machine[state.value].upper()
return 'N/A'
def getTemp(device, sensor):
""" Display the current temperature from a given device's sensor
@param device: DRM device identifier
@param sensor: Temperature sensor identifier
"""
temp = c_int64(0)
metric = rsmi_temperature_metric_t.RSMI_TEMP_CURRENT
ret = rocmsmi.rsmi_dev_temp_metric_get(c_uint32(device), temp_type_lst.index(sensor), metric, byref(temp))
if rsmi_ret_ok(ret, device, 'get_temp_metric' + str(sensor), True):
return temp.value / 1000
return 'N/A'
def getVbiosVersion(device):
""" Returns the VBIOS version for a given device
@param device: DRM device identifier
"""
vbios = create_string_buffer(256)
ret = rocmsmi.rsmi_dev_vbios_version_get(device, vbios, 256)
if rsmi_ret_ok(ret, device):
return vbios.value.decode()
def getVersion(deviceList, component):
""" Return the software version for the specified component
@param deviceList: List of DRM devices (can be a single-item list)
@param component: Component (currently only driver)
"""
ver_str = create_string_buffer(256)
ret = rocmsmi.rsmi_version_str_get(component, ver_str, 256)
if rsmi_ret_ok(ret, None, 'get_version_str_' + str(component)):
return ver_str.value.decode()
return None
def getComputePartition(device):
""" Return the current compute partition of a given device
@param device: DRM device identifier
"""
currentComputePartition = create_string_buffer(256)
ret = rocmsmi.rsmi_dev_compute_partition_get(device, currentComputePartition, 256)
if rsmi_ret_ok(ret, device, 'get_compute_partition', silent=True) and currentComputePartition.value.decode():
return str(currentComputePartition.value.decode())
return "UNKNOWN"
def getMemoryPartition(device):
""" Return the current memory partition of a given device
@param device: DRM device identifier
"""
currentNPSMode = create_string_buffer(256)
ret = rocmsmi.rsmi_dev_nps_mode_get(device, currentNPSMode, 256)
if rsmi_ret_ok(ret, device, 'get_NPS_mode', silent=True) and currentNPSMode.value.decode():
return str(currentNPSMode.value.decode())
return "UNKNOWN"
def print2DArray(dataArray):
""" Print 2D Array with uniform spacing """
global PRINT_JSON
dataArrayLength = []
isPid = False
if str(dataArray[0][0]) == 'PID':
isPid = True
for position in range(len(dataArray[0])):
dataArrayLength.append(len(dataArray[0][position]))
for position in range(len(dataArray)):
for cell in range(len(dataArray[0])):
if len(dataArray[position][cell]) > dataArrayLength[cell]:
dataArrayLength[cell] = len(dataArray[position][cell])
for position in range(len(dataArray)):
printString = ''
for cell in range(len(dataArray[0])):
printString += str(dataArray[position][cell]).ljust(dataArrayLength[cell], ' ') + '\t'
if PRINT_JSON:
printString = ' '.join(printString.split()).lower()
firstElement = printString.split(' ', 1)[0]
printString = printString.split(' ', 1)[1]
printString = printString.replace(' ', ', ')
if (position > 0):
if isPid:
printSysLog('PID%s' % (firstElement), printString)
else:
printSysLog(firstElement, printString)
else:
printLog(None, printString, None)
def printEmptyLine():
""" Print out a single empty line """
global PRINT_JSON
if not PRINT_JSON:
print()
def printErrLog(device, err):
""" Print out an error to the SMI log
@param device: DRM device identifier
@param err: Error string to print
"""
global PRINT_JSON
devName = device
for line in err.split('\n'):
errstr = 'GPU[%s]\t: %s' % (devName, line)
if not PRINT_JSON:
logging.error(errstr)
else:
logging.debug(errstr)
def printInfoLog(device, metricName, value):
""" Print out an info line to the SMI log
@param device: DRM device identifier
@param metricName: Title of the item to print to the log
@param value: The item's value to print to the log
"""
global PRINT_JSON
if not PRINT_JSON:
if value is not None:
logstr = 'GPU[%s]\t: %s: %s' % (device, metricName, value)
else:
logstr = 'GPU[%s]\t: %s' % (device, metricName)
if device is None:
logstr = logstr[13:]
logging.info(logstr)
def printEventList(device, delay, eventList):
""" Print out notification events for a specified device
@param device: DRM device identifier
@param delay: Notification delay in ms
@param eventList: List of event type names (can be a single-item list)
"""
mask = 0
ret = rocmsmi.rsmi_event_notification_init(device)
if not rsmi_ret_ok(ret, device, 'event_notification_init'):
printErrLog(device, 'Unable to initialize event notifications.')
return
for eventType in eventList:
mask |= 2 ** notification_type_names.index(eventType.upper())
ret = rocmsmi.rsmi_event_notification_mask_set(device, mask)
if not rsmi_ret_ok(ret, device, 'set_event_notification_mask'):
printErrLog(device, 'Unable to set event notification mask.')
return
while 1: # Exit condition from user keyboard input of 'q' or 'ctrl + c'
num_elements = c_uint32(1)
data = rsmi_evt_notification_data_t(1)
rocmsmi.rsmi_event_notification_get(delay, byref(num_elements), byref(data))
if len(data.message) > 0:
print2DArray([['\rGPU[%d]:\t' % (device), ctime().split()[3], notification_type_names[data.event.value - 1],
data.message.decode('utf8') + '\r']])
def printLog(device, metricName, value, extraSpace=False):
""" Print out to the SMI log
@param device: DRM device identifier
@param metricName: Title of the item to print to the log
@param value: The item's value to print to the log
"""
global PRINT_JSON
if PRINT_JSON:
if value is not None and device is not None:
formatJson(device, str(metricName) + ': ' + str(value))
elif device is not None:
formatJson(device, str(metricName))
return
if value is not None:
logstr = 'GPU[%s]\t\t: %s: %s' % (device, metricName, value)
else:
logstr = 'GPU[%s]\t\t: %s' % (device, metricName)
if device is None:
logstr = logstr.split(':')[1][1:]
# Force thread safe printing
lock = multiprocessing.Lock()
lock.acquire()
if extraSpace:
print('\n' + logstr + '\n', end='', flush=True)
else:
print(logstr + '\n', end='', flush=True)
lock.release()
def printListLog(metricName, valuesList):
""" Print out to the SMI log for the lists
@param metricName: Title of the item to print to the log
@param valuesList: The item's list of values to print to the log
"""
global PRINT_JSON
listStr = ''
line = metricName + ':\n'
if not valuesList:
line = 'None'
else:
for value in valuesList:
value = str(value) + ' '
if (len(line) + len(value)) < appWidth:
line += value
else:
listStr = listStr + line + '\n'
line = value
if not PRINT_JSON:
print(listStr + line)
def printLogSpacer(displayString=None, fill='='):
""" Prints [name of the option]/[name of the program] in the spacer to explain data below
If no parameters are given, a default fill of the '=' string is used in the spacer
@param displayString: name of item to be displayed inside of the log spacer
@param fill: padding string which surrounds the given display string
"""
global appWidth, PRINT_JSON
if not PRINT_JSON:
if displayString:
if len(displayString) % 2:
displayString += fill
logSpacer = fill * int((appWidth - (len(displayString))) / 2) + displayString + fill * int(
(appWidth - (len(displayString))) / 2)
else:
logSpacer = fill * appWidth
print(logSpacer)
def printSysLog(SysComponentName, value):
""" Print out to the SMI log for repeated features
@param SysComponentName: Title of the item to print to the log
@param value: The item's value to print to the log
"""
global PRINT_JSON, JSON_DATA
if PRINT_JSON:
if 'system' not in JSON_DATA:
JSON_DATA['system'] = {}
formatJson('system', str(SysComponentName) + ': ' + str(value))
return
logstr = '{}: {}'.format(SysComponentName, value)
logging.debug(logstr)
print(logstr)
def printTableLog(column_headers, data_matrix, device=None, tableName=None, anchor='>', v_delim=' '):
""" Print out to the SMI log for the lists
@param column_headers: Header names for each column
@param data_matrix: Matrix of values
@param device: DRM device identifier
@param tableName: Title of the table to print to the log
@param anchor: Alignment direction of the print output
@param v_delim: Boundary String delimiter for the print output
"""
# Usage: the length of col_Names would be determining column width.
# If additional space is needed, please pad corresponding column name with spaces
# If table should print tabulated, pad name of column one with leading zeroes
# Use anchor '<' to to align columns to the right
global OUTPUT_SERIALIZATION, PRINT_JSON
if OUTPUT_SERIALIZATION or PRINT_JSON:
return
if (device is not None) or tableName:
if device is not None:
print('\nGPU[%s]: ' % (device), end='\t')
if tableName:
print(tableName, end='')
printEmptyLine()
for header in column_headers:
print('{:>}'.format(header), end=v_delim)
printEmptyLine()
for row in data_matrix:
for index, cell in enumerate(row):
if cell is None:
cell = 'None'
print('{:{anc}{width}}'.format(cell, anc=anchor, width=len(column_headers[index])), end=v_delim)
printEmptyLine()
def printTableRow(space, displayString, v_delim=" "):
""" Print out a line of a matrix table
@param space: The item's spacing to print
@param displayString: The item's value to print
@param v_delim: Boundary String delimiter for the print output
"""
if space:
print(space % (displayString), end=v_delim)
else:
print(displayString, end=v_delim)
def checkIfSecondaryDie(device):
""" Checks if GCD(die) is the secondary die in a MCM.
Secondary dies lack power management features.
TODO: switch to more robust way to check for primary/secondary die, when implemented in Kernel and rocm_smi_lib.
@param device: The device to check
"""
power_cap = c_uint64()
# secondary die can currently be determined by checking if all power1_* (power cap) values are equal to zero.
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(power_cap))
if not (rsmi_ret_ok(ret, None, 'get_power_cap', False) and power_cap.value == 0):
return False
ret = rocmsmi.rsmi_dev_power_cap_default_get(device, byref(power_cap))
if not (rsmi_ret_ok(ret, None, 'get_power_cap_default', False) and power_cap.value == 0):
return False
ret = rocmsmi.rsmi_dev_power_ave_get(device, 0, byref(power_cap))
if not (rsmi_ret_ok(ret, None, 'get_power_avg', False) and power_cap.value == 0):
return False
return True
def checkIfSecondaryDie(device):
""" Checks if GCD(die) is the secondary die in a MCM.
Secondary dies lack power management features.
TODO: switch to more robust way to check for primary/secondary die, when implemented in Kernel and rocm_smi_lib.
@param device: The device to check
"""
power_cap = c_uint64()
# secondary die can currently be determined by checking if all power1_* (power cap) values are equal to zero.
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(power_cap))
if not (rsmi_ret_ok(ret, None, None, False) and power_cap.value == 0):
return False
ret = rocmsmi.rsmi_dev_power_cap_default_get(device, byref(power_cap))
if not (rsmi_ret_ok(ret, None, None, False) and power_cap.value == 0):
return False
ret = rocmsmi.rsmi_dev_power_ave_get(device, 0, byref(power_cap))
if not (rsmi_ret_ok(ret, None, None, False) and power_cap.value == 0):
return False
return True
def resetClocks(deviceList):
""" Reset clocks to default
Reset clocks to default values by setting performance level to auto, as well
as setting OverDrive back to 0
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Reset Clocks ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_overdrive_level_set(device, rsmi_dev_perf_level_t(0))
if rsmi_ret_ok(ret, device, 'set_overdrive_level'):
printLog(device, 'OverDrive set to 0', None)
else:
printLog(device, 'Unable to reset OverDrive', None)
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(0))
if rsmi_ret_ok(ret, device, 'set_perf_level'):
printLog(device, 'Successfully reset clocks', None)
else:
printLog(device, 'Unable to reset clocks', None)
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(0))
if rsmi_ret_ok(ret, device, 'set_perf_level'):
printLog(device, 'Performance level reset to auto', None)
else:
printLog(device, 'Unable to reset performance level to auto', None)
def resetFans(deviceList):
""" Reset fans to driver control for a list of devices.
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Reset GPU Fan Speed ')
for device in deviceList:
sensor_ind = c_uint32(0)
ret = rocmsmi.rsmi_dev_fan_reset(device, sensor_ind)
if rsmi_ret_ok(ret, device, 'reset_fan'):
printLog(device, 'Successfully reset fan speed to driver control', None)
printLogSpacer()
def resetPowerOverDrive(deviceList, autoRespond):
""" Reset Power OverDrive to the default value
@param deviceList: List of DRM devices (can be a single-item list)
"""
setPowerOverDrive(deviceList, 0, autoRespond)
def resetProfile(deviceList):
""" Reset profile for a list of a devices.
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Reset Profile ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_power_profile_set(device, 0, profileString('BOOTUP DEFAULT'))
if rsmi_ret_ok(ret, device, 'set_power_profile'):
printLog(device, 'Successfully reset Power Profile', None)
else:
printErrLog(device, 'Unable to reset Power Profile')
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(0))
if rsmi_ret_ok(ret, device, 'set_perf_level'):
printLog(device, 'Successfully reset Performance Level', None)
else:
printErrLog(device, 'Unable to reset Performance Level')
printLogSpacer()
def resetXgmiErr(deviceList):
""" Reset the XGMI Error value
@param deviceList: Reset XGMI error count for these devices
"""
printLogSpacer('Reset XGMI Error Status ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_xgmi_error_reset(device)
if rsmi_ret_ok(ret, device, 'reset xgmi'):
printLog(device, 'Successfully reset XGMI Error count', None)
else:
logging.error('GPU[%s]\t\t: Unable to reset XGMI error count', device)
printLogSpacer()
def resetPerfDeterminism(deviceList):
""" Reset Performance Determinism
@param deviceList: Disable Performance Determinism for these devices
"""
printLogSpacer('Disable Performance Determinism')
for device in deviceList:
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(0))
if rsmi_ret_ok(ret, device, 'disable performance determinism'):
printLog(device, 'Successfully disabled performance determinism', None)
else:
logging.error('GPU[%s]\t\t: Unable to disable performance determinism', device)
printLogSpacer()
def resetComputePartition(deviceList):
""" Reset Compute Partition to its boot state
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(" Reset compute partition to its boot state ")
for device in deviceList:
originalPartition = getComputePartition(device)
ret = rocmsmi.rsmi_dev_compute_partition_reset(device)
if rsmi_ret_ok(ret, device, 'reset_compute_partition', silent=True):
resetBootState = getComputePartition(device)
printLog(device, "Successfully reset compute partition (" +
originalPartition + ") to boot state (" + resetBootState +
")", None)
elif ret == rsmi_status_t.RSMI_STATUS_PERMISSION:
printLog(device, 'Permission denied', None)
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None)
else:
rsmi_ret_ok(ret, device, 'reset_compute_partition')
printErrLog(device, 'Failed to reset the compute partition to boot state')
printLogSpacer()
def resetNpsMode(deviceList):
""" Reset NPS mode to its boot state
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(" Reset nps mode to its boot state ")
for device in deviceList:
originalPartition = getMemoryPartition(device)
t1 = multiprocessing.Process(target=showProgressbar,
args=("Resetting NPS mode",13,))
t1.start()
addExtraLine=True
start=time.time()
ret = rocmsmi.rsmi_dev_nps_mode_reset(device)
stop=time.time()
duration=stop-start
if t1.is_alive():
t1.terminate()
t1.join()
if duration < float(0.1): # For longer runs, add extra line before output
addExtraLine=False # This is to prevent overriding progress bar
if rsmi_ret_ok(ret, device, 'reset_NPS_mode', silent=True):
resetBootState = getMemoryPartition(device)
printLog(device, "Successfully reset nps mode (" +
originalPartition + ") to boot state (" +
resetBootState + ")", None, addExtraLine)
elif ret == rsmi_status_t.RSMI_STATUS_PERMISSION:
printLog(device, 'Permission denied', None, addExtraLine)
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None, addExtraLine)
else:
rsmi_ret_ok(ret, device, 'reset_NPS_mode')
printErrLog(device, 'Failed to reset nps mode to boot state')
printLogSpacer()
def setClockRange(deviceList, clkType, minvalue, maxvalue, autoRespond):
""" Set the range for the specified clktype in the PowerPlay table for a list of devices.
Parameters:
deviceList -- List of DRM devices (can be a single-item list)
clktype -- [sclk|mclk] Which clock type to apply the range to
minvalue -- Minimum value to apply to the clock range
maxvalue -- Maximum value to apply to the clock range
autoRespond -- Response to automatically provide for all prompts
"""
global RETCODE
if clkType not in {'sclk', 'mclk'}:
printLog(None, 'Invalid range identifier %s' % (clkType), None)
logging.error('Unsupported range type %s', clkType)
RETCODE = 1
return
try:
int(minvalue) & int(maxvalue)
except ValueError:
printErrLog(device, 'Unable to set %s range' % (clkType))
logging.error('%s or %s is not an integer', minvalue, maxvalue)
RETCODE = 1
return
confirmOutOfSpecWarning(autoRespond)
printLogSpacer(' Set Valid %s Range ' % (clkType))
for device in deviceList:
ret = rocmsmi.rsmi_dev_clk_range_set(device, int(minvalue), int(maxvalue), rsmi_clk_names_dict[clkType])
if rsmi_ret_ok(ret, device, silent=True):
printLog(device, 'Successfully set %s from %s(MHz) to %s(MHz)' % (clkType, minvalue, maxvalue), None)
else:
printErrLog(device, 'Unable to set %s from %s(MHz) to %s(MHz)' % (clkType, minvalue, maxvalue))
RETCODE = 1
if ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Setting %s range is not supported for this device.' % (clkType), None)
def setVoltageCurve(deviceList, point, clk, volt, autoRespond):
""" Set voltage curve for a point in the PowerPlay table for a list of devices.
Parameters:
deviceList -- List of DRM devices (can be a single-item list)
point -- Point on the voltage curve to modify
clk -- Clock speed specified for this curve point
volt -- Voltage specified for this curve point
autoRespond -- Response to automatically provide for all prompts
"""
global RETCODE
value = '%s %s %s' % (point, clk, volt)
try:
any(int(item) for item in value.split())
except ValueError:
printErrLog(None, 'Unable to set Voltage curve')
printErrLog(None, 'Non-integer characters are present in %s' %value)
RETCODE = 1
return
confirmOutOfSpecWarning(autoRespond)
for device in deviceList:
ret = rocmsmi.rsmi_dev_od_volt_info_set(device, int(point), int(clk), int(volt))
if rsmi_ret_ok(ret, device, 'set_voltage_curve'):
printLog(device, 'Successfully set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt), None)
else:
printErrLog(device, 'Unable to set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt))
RETCODE = 1
def setPowerPlayTableLevel(deviceList, clkType, point, clk, volt, autoRespond):
""" Set clock frequency and voltage for a level in the PowerPlay table for a list of devices.
Parameters:
deviceList -- List of DRM devices (can be a single-item list)
clktype -- [sclk|mclk] Which clock type to apply the range to
point -- Point on the voltage curve to modify
clk -- Clock speed specified for this curve point
volt -- Voltage specified for this curve point
autoRespond -- Response to automatically provide for all prompts
"""
global RETCODE
value = '%s %s %s' % (point, clk, volt)
listOfValues = value.split(' ')
try:
any(int(item) for item in value.split())
except ValueError:
printErrLog(None, 'Unable to set PowerPlay table level')
printErrLog(None, 'Non-integer characters are present in %s' %value)
RETCODE = 1
return
confirmOutOfSpecWarning(autoRespond)
for device in deviceList:
if clkType == 'sclk':
ret = rocmsmi.rsmi_dev_od_clk_info_set(device, rsmi_freq_ind_t(int(point)), int(clk),
rsmi_clk_names_dict[clkType])
if rsmi_ret_ok(ret, device, 'set_power_play_table_level_' + str(clkType)):
printLog(device, 'Successfully set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt), None)
else:
printErrLog(device, 'Unable to set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt))
RETCODE = 1
elif clkType == 'mclk':
ret = rocmsmi.rsmi_dev_od_clk_info_set(device, rsmi_freq_ind_t(int(point)), int(clk),
rsmi_clk_names_dict[clkType])
if rsmi_ret_ok(ret, device, 'set_power_play_table_level_' + str(clkType)):
printLog(device, 'Successfully set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt), None)
else:
printErrLog(device, 'Unable to set voltage point %s to %s(MHz) %s(mV)' % (point, clk, volt))
RETCODE = 1
else:
printErrLog(device, 'Unable to set %s range' % (clkType))
logging.error('Unsupported range type %s', clkType)
RETCODE = 1
def setClockOverDrive(deviceList, clktype, value, autoRespond):
""" Set clock speed to OverDrive for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param type: [sclk|mclk] Clock type to set
@param value: [0-20] OverDrive percentage
@param autoRespond: Response to automatically provide for all prompts
"""
printLogSpacer(' Set Clock OverDrive (Range: 0% to 20%) ')
global RETCODE
try:
int(value)
except ValueError:
printLog(None, 'Unable to set OverDrive level', None)
logging.error('%s it is not an integer', value)
RETCODE = 1
return
confirmOutOfSpecWarning(autoRespond)
for device in deviceList:
if int(value) < 0:
printErrLog(device, 'Unable to set OverDrive')
logging.debug('Overdrive cannot be less than 0%')
RETCODE = 1
return
if int(value) > 20:
printLog(device, 'Setting OverDrive to 20%', None)
logging.debug('OverDrive cannot be set to a value greater than 20%')
value = '20'
if getPerfLevel(device) != 'MANUAL':
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(3))
if rsmi_ret_ok(ret, device, 'set_perf_level_manual_' + str(clktype)):
printLog(device, 'Performance level set to manual', None)
else:
printErrLog(device, 'Unable to set performance level to manual')
if clktype == 'mclk':
fsFile = os.path.join('/sys/class/drm', 'card%d' % (device), 'device', 'pp_mclk_od')
if not os.path.isfile(fsFile):
printLog(None, 'Unable to write to sysfs file (' + fsFile +
'), file does not exist', None)
logging.debug('%s does not exist', fsFile)
continue
try:
logging.debug('Writing value \'%s\' to file \'%s\'', value, fsFile)
with open(fsFile, 'w') as fs:
fs.write(value + '\n')
except (IOError, OSError):
printLog(None, 'Unable to write to sysfs file %s' %fsFile, None)
logging.warning('IO or OS error')
RETCODE = 1
continue
printLog(device, 'Successfully set %s OverDrive to %s%%' % (clktype, value), None)
elif clktype == 'sclk':
ret = rocmsmi.rsmi_dev_overdrive_level_set(device, rsmi_dev_perf_level_t(int(value)))
if rsmi_ret_ok(ret, device, 'set_overdrive_level_' + str(clktype)):
printLog(device, 'Successfully set %s OverDrive to %s%%' % (clktype, value), None)
else:
printLog(device, 'Unable to set %s OverDrive to %s%%' % (clktype, value), None)
else:
printErrLog(device, 'Unable to set OverDrive')
logging.error('Unsupported clock type %s', clktype)
RETCODE = 1
def setClocks(deviceList, clktype, clk):
""" Set clock frequency levels for a list of devices.
@param deviceList: List of DRM devices (can be a single-item list)
@param clktype: [validClockNames] Clock type to set
@param clk: Clock frequency level to set
"""
global RETCODE
if not clk:
printLog(None, 'Invalid clock frequency', None)
RETCODE = 1
return
if clktype not in validClockNames:
printErrLog(None, 'Unable to set clock level')
logging.error('Invalid clock type %s', clktype)
RETCODE = 1
return
check_value = ''.join(map(str, clk))
try:
int(check_value)
except ValueError:
printLog(None, 'Unable to set clock level', None)
logging.error('Non-integer characters are present in value %s', value)
RETCODE = 1
return
# Generate a frequency bitmask from user input value
freq_bitmask = 0
for bit in clk:
if bit > 63:
printErrLog(None, 'Invalid clock frequency')
logging.error('Invalid frequency: %s', bit)
RETCODE = 1
return
freq_bitmask |= (1 << bit)
printLogSpacer(' Set %s Frequency ' % (str(clktype)))
for device in deviceList:
# Check if the performance level is manual, if not then set it to manual
if getPerfLevel(device).lower() != 'manual':
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(3))
if rsmi_ret_ok(ret, device, 'set_perf_level_manual'):
printLog(device, 'Performance level was set to manual', None)
else:
printErrLog(device, 'Unable to set performance level to manual')
RETCODE = 1
return
if clktype != 'pcie':
# Validate frequency bitmask
freq = rsmi_frequencies_t()
ret = rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clktype], byref(freq))
if rsmi_ret_ok(ret, device, 'get_gpu_clk_freq_' + str(clktype)) == False:
RETCODE = 1
return
# The freq_bitmask should be less than 2^(freqs.num_supported)
# For example, num_supported == 3, the max bitmask is 0111
if freq_bitmask >= (1 << freq.num_supported):
printErrLog(device, 'Invalid clock frequency %s' % hex(freq_bitmask))
RETCODE = 1
return
ret = rocmsmi.rsmi_dev_gpu_clk_freq_set(device, rsmi_clk_names_dict[clktype], freq_bitmask)
if rsmi_ret_ok(ret, device, 'set_gpu_clk_freq_' + str(clktype)):
printLog(device, 'Successfully set %s bitmask to' % (clktype), hex(freq_bitmask))
else:
printErrLog(device, 'Unable to set %s bitmask to: %s' % (clktype, hex(freq_bitmask)))
RETCODE = 1
else:
# Validate the bandwidth bitmask
bw = rsmi_pcie_bandwidth_t()
ret = rocmsmi.rsmi_dev_pci_bandwidth_get(device, byref(bw))
if rsmi_ret_ok(ret, device, 'get_PCIe_bandwidth') == False:
RETCODE = 1
return
# The freq_bitmask should be less than 2^(bw.transfer_rate.num_supported)
# For example, num_supported == 3, the max bitmask is 0111
if freq_bitmask >= (1 << bw.transfer_rate.num_supported):
printErrLog(device, 'Invalid PCIe frequency %s' % hex(freq_bitmask))
RETCODE = 1
return
ret = rocmsmi.rsmi_dev_pci_bandwidth_set(device, freq_bitmask)
if rsmi_ret_ok(ret, device, 'set_PCIe_bandwidth'):
printLog(device, 'Successfully set %s to level bitmask' % (clktype), hex(freq_bitmask))
else:
printErrLog(device, 'Unable to set %s bitmask to: %s' % (clktype, hex(freq_bitmask)))
RETCODE = 1
printLogSpacer()
def setPerfDeterminism(deviceList, clkvalue):
""" Set clock frequency level for a list of devices to enable performance
determinism.
@param deviceList: List of DRM devices (can be a single-item list)
@param value: Clock frequency level to set
"""
global RETCODE
try:
int(clkvalue)
except ValueError:
printErrLog(device, 'Unable to set Performance Determinism')
logging.error('%s is not an integer', clkvalue)
RETCODE = 1
return
for device in deviceList:
ret = rocmsmi.rsmi_perf_determinism_mode_set(device, int(clkvalue))
if rsmi_ret_ok(ret, device, 'set_perf_determinism'):
printLog(device, 'Successfully enabled performance determinism and set GFX clock frequency', str(clkvalue))
else:
printErrLog(device, 'Unable to set performance determinism and clock frequency to %s' % (str(clkvalue)))
RETCODE = 1
def resetGpu(device):
""" Perform a GPU reset on the specified device
@param device: DRM device identifier
"""
printLogSpacer(' Reset GPU ')
global RETCODE
if len(device) > 1:
logging.error('GPU Reset can only be performed on one GPU per call')
RETCODE = 1
return
resetDev = int(device[0])
if not isAmdDevice(resetDev):
logging.error('GPU Reset can only be performed on an AMD GPU')
RETCODE = 1
return
ret = rocmsmi.rsmi_dev_gpu_reset(resetDev)
if rsmi_ret_ok(ret, resetDev, 'reset_gpu'):
printLog(resetDev, 'Successfully reset GPU %d' % (resetDev), None)
else:
printErrLog(resetDev, 'Unable to reset GPU %d' % (resetDev))
logging.debug('GPU reset failed with return value of %d' % ret)
printLogSpacer()
def isRasControlAvailable(device):
""" Check if RAS control is available for a specified device.
Parameters:
device -- DRM device identifier
"""
path = os.path.join('/sys/kernel/debug/dri', 'card%d' % device, 'device', 'ras_ctrl')
if not doesDeviceExist(device) or not path or not os.path.isfile(path):
logging.warning('GPU[%s]\t: RAS control is not available')
return False
return True
def setRas(deviceList, rasAction, rasBlock, rasType):
""" Perform a RAS action on the devices
Parameters:
deviceList -- List of DRM devices (can be a single-item list)
rasAction -- [enable|disable|inject] RAS Action to perform
rasBlock -- [$validRasBlocks] RAS block
rasType -- [ce|ue] Error type to enable/disable
"""
global RETCODE
printLog(None, "This is experimental feature, use 'amdgpuras' tool for ras error manipulations for newer vbios")
if rasAction not in validRasActions:
printLog(None, 'Unable to perform RAS command %s on block %s for type %s' % (rasAction, rasBlock, rasType),
None)
logging.debug('Action %s is not a valid RAS command' % rasAction)
return
if rasBlock not in validRasBlocks:
printLog(None, 'Unable to perform RAS command %s on block %s for type %s' % (rasAction, rasBlock, rasType),
None)
printLog(None, 'Block %s is not a valid RAS block' % rasBlock)
return
if rasType not in validRasTypes:
printLog(None, 'Unable to perform RAS command %s on block %s for type %s' % (rasAction, rasBlock, rasType),
None)
printLog(None, 'Memory error type %s is not a valid RAS memory type' % rasAction)
return
printLogSpacer()
# NOTE PSP FW doesn't support enabling disabled counters yet
for device in deviceList:
if isRasControlAvailable(device):
rasFilePath = path = os.path.join('/sys/kernel/debug/dri', 'card%d' % device, 'device', 'ras_ctrl')
rasCmd = '%s %s %s' % (rasAction, rasBlock, rasType)
# writeToSysfs analog to old cli
if not os.path.isfile(rasFilePath):
printLog(None, 'Unable to write to sysfs file', None)
logging.debug('%s does not exist', rasFilePath)
return False
try:
logging.debug('Writing value \'%s\' to file \'%s\'', rasCmd, rasFilePath)
with open(rasFilePath, 'w') as fs:
fs.write(rasFilePath + '\n') # Certain sysfs files require \n at the end
except (IOError, OSError):
printLog(None, 'Unable to write to sysfs file %s' % rasFilePath, None)
logging.warning('IO or OS error')
RETCODE = 1
printLogSpacer()
return
def setFanSpeed(deviceList, fan):
""" Set fan speed for a list of devices.
@param deviceList: List of DRM devices (can be a single-item list)
@param level: [0-255] Fan speed level
"""
printLogSpacer(' Set GPU Fan Speed ')
for device in deviceList:
if str(fan):
fanLevel = c_int64()
sensor_ind = c_uint32(0)
last_char = str(fan)[-1]
if last_char == '%':
fanLevel = int(str(fan)[:-1]) / 100 * 255
else:
fanLevel = int(str(fan))
ret = rocmsmi.rsmi_dev_fan_speed_set(device, 0, int(fanLevel))
if rsmi_ret_ok(ret, device, 'set_fan_speed'):
printLog(device, 'Successfully set fan speed to level %s' % (str(int(fanLevel))), None)
printLogSpacer()
def setPerformanceLevel(deviceList, level):
""" Set the Performance Level for a specified device.
@param deviceList: List of DRM devices (can be a single-item list)
@param level: Performance Level to set
"""
printLogSpacer(' Set Performance Level ')
validLevels = ['auto', 'low', 'high', 'manual']
for device in deviceList:
if level not in validLevels:
printErrLog(device, 'Unable to set Performance Level')
logging.error('Invalid Performance level: %s', level)
else:
ret = rocmsmi.rsmi_dev_perf_level_set(device, rsmi_dev_perf_level_t(validLevels.index(level)))
if rsmi_ret_ok(ret, device, 'set_perf_level'):
printLog(device, 'Performance level set to %s' % (str(level)), None)
printLogSpacer()
def setPowerOverDrive(deviceList, value, autoRespond):
""" Use Power OverDrive to change the the maximum power available power
available to the GPU in Watts. May be limited by the maximum power the
VBIOS is configured to allow this card to use in OverDrive mode.
@param deviceList: List of DRM devices (can be a single-item list)
@param value: New maximum power to assign to the target device, in Watts
@param autoRespond: Response to automatically provide for all prompts
"""
global RETCODE, PRINT_JSON
try:
int(value)
except ValueError:
printLog(None, 'Unable to set Power OverDrive', None)
logging.error('%s is not an integer', value)
RETCODE = 1
return
# Wattage input value converted to microWatt for ROCm SMI Lib
if int(value) == 0:
printLogSpacer(' Reset GPU Power OverDrive ')
else:
printLogSpacer(' Set GPU Power OverDrive ')
# Value in Watts - stored early this way to avoid strenuous value type conversions
strValue = value
specWarningConfirmed = False
for device in deviceList:
# Continue to next device in deviceList loop if the device is a secondary die
if checkIfSecondaryDie(device):
logging.debug("Unavailable for secondary die.")
continue
power_cap_min = c_uint64()
power_cap_max = c_uint64()
current_power_cap = c_uint64()
default_power_cap = c_uint64()
new_power_cap = c_uint64()
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(current_power_cap))
if ret != 0:
logging.debug("Unable to retireive current power cap.")
ret = rocmsmi.rsmi_dev_power_cap_default_get(device, byref(default_power_cap))
# If rsmi_dev_power_cap_default_get fails, use manual workaround to fetch default power cap
if ret != 0:
logging.debug("Unable to retrieve default power cap; retrieving via reset.")
ret = rocmsmi.rsmi_dev_power_cap_set(device, 0, 0)
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(default_power_cap))
if int(value) == 0:
new_power_cap = default_power_cap
else:
new_power_cap.value = int(value) * 1000000
ret = rocmsmi.rsmi_dev_power_cap_range_get(device, 0, byref(power_cap_max), byref(power_cap_min))
if rsmi_ret_ok(ret, device, 'get_power_cap_range') == False:
printErrLog(device, 'Unable to parse Power OverDrive range')
RETCODE = 1
continue
if int(strValue) > (power_cap_max.value / 1000000):
printErrLog(device, 'Unable to set Power OverDrive')
logging.error('GPU[%s]\t\t: Value cannot be greater than: %dW ', device, power_cap_max.value / 1000000)
RETCODE = 1
continue
if int(strValue) < (power_cap_min.value / 1000000):
printErrLog(device, 'Unable to set Power OverDrive')
logging.error('GPU[%s]\t\t: Value cannot be less than: %dW ', device, power_cap_min.value / 1000000)
RETCODE = 1
continue
if new_power_cap.value == current_power_cap.value:
printErrLog(device,'Max power was already at: {}W'.format(new_power_cap.value / 1000000))
if current_power_cap.value < default_power_cap.value:
current_power_cap.value = default_power_cap.value
if not specWarningConfirmed and new_power_cap.value > current_power_cap.value:
confirmOutOfSpecWarning(autoRespond)
specWarningConfirmed = True
ret = rocmsmi.rsmi_dev_power_cap_set(device, 0, new_power_cap)
if rsmi_ret_ok(ret, device, 'set_power_cap'):
if int(value) == 0:
power_cap = c_uint64()
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(power_cap))
if rsmi_ret_ok(ret, device, 'get_power_cap'):
if not PRINT_JSON:
printLog(device,
'Successfully reset Power OverDrive to: %sW' % (int(power_cap.value / 1000000)), None)
else:
if not PRINT_JSON:
ret = rocmsmi.rsmi_dev_power_cap_get(device, 0, byref(current_power_cap))
if current_power_cap.value == new_power_cap.value:
printLog(device, 'Successfully set power to: %sW' % (strValue), None)
else:
printErrLog(device, 'Unable set power to: %sW, current value is %sW' % \
(strValue, int(current_power_cap.value / 1000000)))
else:
if int(value) == 0:
printErrLog(device, 'Unable to reset Power OverDrive to default')
else:
printErrLog(device, 'Unable to set Power OverDrive to ' + strValue + 'W')
printLogSpacer()
def setProfile(deviceList, profile):
""" Set Power Profile, or set CUSTOM Power Profile values for a list of devices.
@param deviceList: List of DRM devices (can be a single-item list)
@param profile: Profile to set
"""
printLogSpacer(' Set Power Profile ')
status = rsmi_power_profile_status_t()
for device in deviceList:
# Get previous profile
ret = rocmsmi.rsmi_dev_power_profile_presets_get(device, 0, byref(status))
if rsmi_ret_ok(ret, device, 'get_power_profile'):
previousProfile = profileString(status.current)
# Get desired profile
desiredProfile = 'UNKNOWN'
if str(profile).isnumeric() and int(profile) > 0 and int(profile) < 8:
desiredProfile = profileString(2 ** (int(profile) - 1))
elif str(profileString(str(profile).replace('_', ' ').upper())).isnumeric():
desiredProfile = str(profile).replace('_', ' ').upper()
else:
printErrLog(device, 'Unable to set profile to: %s (UNKNOWN profile)' % (str(profile)))
return
# Set profile to desired profile
if previousProfile == desiredProfile:
printLog(device, 'Profile was already set to', previousProfile)
return
else:
ret = rocmsmi.rsmi_dev_power_profile_set(device, 0, profileString(desiredProfile))
if rsmi_ret_ok(ret, device, 'set_power_profile'):
# Get current profile
ret = rocmsmi.rsmi_dev_power_profile_presets_get(device, 0, byref(status))
if rsmi_ret_ok(ret, device, 'get_power_profile_presets'):
currentProfile = profileString(status.current)
if currentProfile == desiredProfile:
printLog(device, 'Successfully set profile to', desiredProfile)
else:
printErrLog(device, 'Failed to set profile to: %s' % (desiredProfile))
printLogSpacer()
def setComputePartition(deviceList, computePartitionType):
""" Sets compute partitioning for a list of device
@param deviceList: List of DRM devices (can be a single-item list)
@param computePartition: Compute Partition type to set as
"""
printLogSpacer(' Set compute partition to %s ' % (str(computePartitionType).upper()))
for device in deviceList:
computePartitionType = computePartitionType.upper()
if computePartitionType not in compute_partition_type_l:
printErrLog(device, 'Invalid compute partition type %s'
'\nValid compute partition types are %s'
% ( computePartitionType.upper(),
(', '.join(map(str, compute_partition_type_l))) ))
return (None, None)
ret = rocmsmi.rsmi_dev_compute_partition_set(device,
rsmi_compute_partition_type_dict[computePartitionType])
if rsmi_ret_ok(ret, device, 'set_compute_partition', silent=True):
printLog(device,
'Successfully set compute partition to %s' % (computePartitionType),
None)
elif ret == rsmi_status_t.RSMI_STATUS_PERMISSION:
printLog(device, 'Permission denied', None)
elif ret == rsmi_status_t.RSMI_STATUS_SETTING_UNAVAILABLE:
printLog(device, 'Requested setting (%s) is unavailable for current device'
%computePartitionType, None)
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None)
else:
rsmi_ret_ok(ret, device, 'set_compute_partition')
printErrLog(device, 'Failed to retrieve compute partition, even though device supports it.')
printLogSpacer()
def progressbar(it, prefix="", size=60, out=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
lock = multiprocessing.Lock()
lock.acquire()
print("{}[{}{}] {}/{} secs remain".format(prefix, u"█"*x, "."*(size-x), j, count),
end='\r', file=out, flush=True)
lock.release()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
lock = multiprocessing.Lock()
lock.acquire()
print("\n", flush=True, file=out)
lock.release()
def showProgressbar(title="", timeInSeconds=13):
if title != "":
title += ": "
for i in progressbar(range(timeInSeconds), title, 40):
time.sleep(1)
def setNPSMode(deviceList, npsMode):
""" Sets nps mode (memory partition) for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param npsMode: NPS Mode type to set as
"""
printLogSpacer(' Set nps mode to %s ' % (str(npsMode).upper()))
for device in deviceList:
npsMode = npsMode.upper()
if npsMode not in nps_mode_type_l:
printErrLog(device, 'Invalid nps mode type %s'
'\nValid nps mode types are %s'
% ( npsMode.upper(),
(', '.join(map(str, nps_mode_type_l))) ))
return (None, None)
t1 = multiprocessing.Process(target=showProgressbar,
args=("Updating NPS mode",13,))
t1.start()
addExtraLine=True
start=time.time()
ret = rocmsmi.rsmi_dev_nps_mode_set(device,
rsmi_nps_mode_type_dict[npsMode])
stop=time.time()
duration=stop-start
if t1.is_alive():
t1.terminate()
t1.join()
if duration < float(0.1): # For longer runs, add extra line before output
addExtraLine=False # This is to prevent overriding progress bar
if rsmi_ret_ok(ret, device, 'set_NPS_mode', silent=True):
printLog(device,
'Successfully set nps mode to %s' % (npsMode),
None, addExtraLine)
elif ret == rsmi_status_t.RSMI_STATUS_PERMISSION:
printLog(device, 'Permission denied', None, addExtraLine)
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None, addExtraLine)
else:
rsmi_ret_ok(ret, device, 'set_NPS_mode')
printErrLog(device, 'Failed to retrieve NPS mode, even though device supports it.')
printLogSpacer()
def showAllConcise(deviceList):
""" Display critical info for all devices in a concise format
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
if PRINT_JSON:
print('ERROR: Cannot print JSON/CSV output for concise output')
sys.exit(1)
printLogSpacer(' Concise Info ')
header = ['GPU', 'Temp (DieEdge)', 'AvgPwr', 'SCLK', 'MCLK', 'Fan', 'Perf', 'PwrCap', 'VRAM%', 'GPU%']
head_widths = [len(head) + 2 for head in header]
values = {}
for device in deviceList:
temp = str(getTemp(device, 'edge'))
if temp != 'N/A':
temp += 'c'
avgPwr = str(getPower(device))
if avgPwr != '0.0' and avgPwr != 'N/A':
avgPwr += 'W'
else:
avgPwr = 'N/A'
concise = True
sclk = showCurrentClocks([device], 'sclk', concise)
mclk = showCurrentClocks([device], 'mclk', concise)
(fanLevel, fanSpeed) = getFanSpeed(device)
fan = str(fanSpeed) + '%'
if getPerfLevel(device) != -1:
perf = getPerfLevel(device)
else:
perf = 'Unsupported'
if getMaxPower(device) != -1:
pwrCap = str(getMaxPower(device)) + 'W'
else:
pwrCap = 'Unsupported'
if getGpuUse(device) != -1:
gpu_busy = str(getGpuUse(device)) + '%'
else:
gpu_busy = 'Unsupported'
vram_used, vram_total = getMemInfo(device, 'vram', True)
mem_use_pct = 0
if vram_used is None:
mem_use_pct='Unsupported'
if vram_used != None and vram_total != None and float(vram_total) != 0:
mem_use_pct = '% 3.0f%%' % (100 * (float(vram_used) / float(vram_total)))
values['card%s' % (str(device))] = [device, temp, avgPwr, sclk, mclk, fan, str(perf).lower(), pwrCap,
mem_use_pct, gpu_busy]
val_widths = {}
for device in deviceList:
val_widths[device] = [len(str(val)) + 2 for val in values['card%s' % (str(device))]]
max_widths = head_widths
for device in deviceList:
for col in range(len(val_widths[device])):
max_widths[col] = max(max_widths[col], val_widths[device][col])
printLog(None, "".join(word.ljust(max_widths[col]) for col, word in zip(range(len(max_widths)), header)), None)
for device in deviceList:
printLog(None, "".join(str(word).ljust(max_widths[col]) for col, word in
zip(range(len(max_widths)), values['card%s' % (str(device))])), None)
printLogSpacer()
def showAllConciseHw(deviceList):
""" Display critical Hardware info for all devices in a concise format
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
if PRINT_JSON:
print('ERROR: Cannot print JSON/CSV output for concise hardware output')
sys.exit(1)
printLogSpacer(' Concise Hardware Info ')
header = ['GPU', 'DID', 'GFX RAS', 'SDMA RAS', 'UMC RAS', 'VBIOS', 'BUS']
head_widths = [len(head) + 2 for head in header]
values = {}
for device in deviceList:
gpuid = getId(device)
if str(gpuid).startswith('0x'):
gpuid = str(gpuid)[2:]
gfxRas = getRasEnablement(device, 'GFX')
sdmaRas = getRasEnablement(device, 'SDMA')
umcRas = getRasEnablement(device, 'UMC')
vbios = getVbiosVersion(device)
bus = getBus(device)
values['card%s' % (str(device))] = [device, gpuid, gfxRas, sdmaRas, umcRas, vbios, bus]
val_widths = {}
for device in deviceList:
val_widths[device] = [len(str(val)) + 2 for val in values['card%s' % (str(device))]]
max_widths = head_widths
for device in deviceList:
for col in range(len(val_widths[device])):
max_widths[col] = max(max_widths[col], val_widths[device][col])
printLog(None, "".join(word.ljust(max_widths[col]) for col, word in zip(range(len(max_widths)), header)), None)
for device in deviceList:
printLog(None, "".join(str(word).ljust(max_widths[col]) for col, word in
zip(range(len(max_widths)), values['card%s' % (str(device))])), None)
printLogSpacer()
def showBus(deviceList):
""" Display PCI Bus info
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' PCI Bus ID ')
for device in deviceList:
printLog(device, 'PCI Bus', getBus(device))
printLogSpacer()
def showClocks(deviceList):
""" Display all available clocks for a list of devices
Current clocks marked with a '*' symbol
@param deviceList: List of DRM devices (can be a single-item list)
"""
freq = rsmi_frequencies_t()
bw = rsmi_pcie_bandwidth_t()
printLogSpacer(' Supported clock frequencies ')
for device in deviceList:
for clk_type in sorted(rsmi_clk_names_dict):
freq_list = []
if rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_type], None) == 1:
ret = rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_type], byref(freq))
if rsmi_ret_ok(ret, device, 'get_clk_freq_' + clk_type, True):
printLog(device, 'Supported %s frequencies on GPU%s' % (clk_type, str(device)), None)
for x in range(freq.num_supported):
fr = '{:>.0f}Mhz'.format(freq.frequency[x] / 1000000)
if x == freq.current:
printLog(device, str(x), str(fr) + ' *')
else:
printLog(device, str(x), str(fr))
printLog(device, '', None)
else:
logging.debug('{} frequency is unsupported on device[{}]'.format(clk_type, device))
printLog(device, '', None)
if rocmsmi.rsmi_dev_pci_bandwidth_get(device, None) == 1:
ret = rocmsmi.rsmi_dev_pci_bandwidth_get(device, byref(bw))
if rsmi_ret_ok(ret, device, 'get_PCIe_bandwidth', True):
printLog(device, 'Supported %s frequencies on GPU%s' % ('PCIe', str(device)), None)
freq_list = []
for x in range(bw.transfer_rate.num_supported):
fr = '{:>.1f}GT/s x{}'.format(bw.transfer_rate.frequency[x] / 1000000000, bw.lanes[x])
if x == bw.transfer_rate.current:
printLog(device, str(x), str(fr) + ' *')
else:
printLog(device, str(x), str(fr))
printLog(device, '', None)
else:
logging.debug('PCIe frequency is unsupported on device [{}]'.format(device))
printLog(device, '', None)
printLogSpacer(None, '-') # divider between devices for better visibility
printLogSpacer()
def showCurrentClocks(deviceList, clk_defined=None, concise=False):
""" Display all clocks for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param clk-type: Clock type to display
"""
global PRINT_JSON
freq = rsmi_frequencies_t()
bw = rsmi_pcie_bandwidth_t()
currentString = ''
sortedClocksArray = []
if not concise:
printLogSpacer(' Current clock frequencies ')
for device in deviceList:
if clk_defined:
if rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_defined], None) == 1:
ret = rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_defined], byref(freq))
if rsmi_ret_ok(ret, device, 'get_gpu_clk_freq_' + str(clk_defined), silent=True):
levl = freq.current
if levl >= freq.num_supported:
printLog(device, '%s current clock frequency not found' % (clk_defined), None)
continue
fr = freq.frequency[levl] / 1000000
if concise: # in case function is used for concise output, no need to print.
return '{:.0f}Mhz'.format(fr)
printLog(device, '{} clock level'.format(clk_defined), '{} ({:.0f}Mhz)'.format(levl, fr))
else:
printErrLog(device, '%s clock is unsupported' % (clk_defined))
else: # if clk is not defined, will display all current clk
for clk_type in sorted(rsmi_clk_names_dict):
if rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_type], None) == 1:
ret = rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_type], byref(freq))
if rsmi_ret_ok(ret, device, 'get_clk_freq_' + str(clk_type), True):
levl = freq.current
if levl >= freq.num_supported:
printLog(device, '%s current clock frequency not found' % (clk_type), None)
continue
fr = freq.frequency[levl] / 1000000
if PRINT_JSON:
printLog(device, '%s clock speed:' % (clk_type), '(%sMhz)' % (str(fr)[:-2]))
printLog(device, '%s clock level:' % (clk_type), levl)
else:
printLog(device, '%s clock level: %s' % (clk_type, levl), '(%sMhz)' % (str(fr)[:-2]))
else:
logging.debug('{} clock is unsupported on device[{}]'.format(clk_type, device))
# pcie clocks
if rocmsmi.rsmi_dev_pci_bandwidth_get(device, None) == 1:
ret = rocmsmi.rsmi_dev_pci_bandwidth_get(device, byref(bw))
if rsmi_ret_ok(ret, device, 'get_PCIe_bandwidth', True):
current_f = bw.transfer_rate.current
if current_f >= bw.transfer_rate.num_supported:
printLog(device, 'PCIe current clock frequency not found', None )
continue
fr = '{:.1f}GT/s x{}'.format(bw.transfer_rate.frequency[current_f] / 1000000000,
bw.lanes[current_f])
printLog(device, 'pcie clock level', '{} ({})'.format(current_f, fr))
else:
logging.debug('PCIe clock is unsupported on device[{}]'.format(device))
printLogSpacer()
def showCurrentFans(deviceList):
""" Display the current fan speed for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
printLogSpacer(' Current Fan Metric ')
rpmSpeed = c_int64()
sensor_ind = c_uint32(0)
for device in deviceList:
(fanLevel, fanSpeed) = getFanSpeed(device)
fanSpeed = round(fanSpeed)
if fanLevel == 0 or fanSpeed == 0:
printLog(device, 'Unable to detect fan speed for GPU %d' % (device), None)
logging.debug('Current fan speed is: %d\n' % (fanSpeed) + \
' Current fan level is: %d\n' % (fanLevel) + \
' (GPU might be cooled with a non-PWM fan)')
continue
if PRINT_JSON:
printLog(device, 'Fan speed (level)', str(fanLevel))
printLog(device, 'Fan speed (%)', str(fanSpeed))
else:
printLog(device, 'Fan Level', str(fanLevel) + ' (%s%%)' % (str(fanSpeed)))
ret = rocmsmi.rsmi_dev_fan_rpms_get(device, sensor_ind, byref(rpmSpeed))
if rsmi_ret_ok(ret, device, 'get_fan_rpms'):
printLog(device, 'Fan RPM', rpmSpeed.value)
printLogSpacer()
def showCurrentTemps(deviceList):
""" Display all available temperatures for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Temperature ')
for device in deviceList:
for sensor in temp_type_lst:
temp = getTemp(device, sensor)
if temp != 'N/A':
printLog(device, 'Temperature (Sensor %s) (C)' % (sensor), temp)
else:
printInfoLog(device, 'Temperature (Sensor %s) (C)' % (sensor), temp)
printLogSpacer()
def showFwInfo(deviceList, fwType):
""" Show the requested FW information for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param fwType: [$validFwBlocks] FW block version to display (all if left empty)
"""
if not fwType or 'all' in fwType:
firmware_blocks = fw_block_names_l
else:
for name in fwType: # cleaning list from wrong values
if name.upper() not in fw_block_names_l:
fwType.remove(name)
firmware_blocks = fwType
printLogSpacer(' Firmware Information ')
for device in deviceList:
fw_ver_list = []
fw_ver = c_uint64()
for fw_name in firmware_blocks:
fw_name = fw_name.upper()
ret = rocmsmi.rsmi_dev_firmware_version_get(device, fw_block_names_l.index(fw_name), byref(fw_ver))
if rsmi_ret_ok(ret, device, 'get_firmware_version_' + str(fw_name)):
# The VCN, VCE, UVD, SOS and ASD firmware's value needs to be in hexadecimal
if fw_name in ['VCN', 'VCE', 'UVD', 'SOS', 'ASD']:
printLog(device, '%s firmware version' % (fw_name),
'\t0x%s' % (str(hex(fw_ver.value))[2:].zfill(8)))
# The TA XGMI, TA RAS, and SMC firmware's hex value looks like 0x12345678
# However, they are parsed as: int(0x12).int(0x34).int(0x56).int(0x78)
# Which results in the following: 12.34.56.78
elif fw_name in ['TA XGMI', 'TA RAS', 'SMC']:
pos1 = str('%02d' % int((('0x%s' % (str(hex(fw_ver.value))[2:].zfill(8))[0:2])), 16))
pos2 = str('%02d' % int((('0x%s' % (str(hex(fw_ver.value))[2:].zfill(8))[2:4])), 16))
pos3 = str('%02d' % int((('0x%s' % (str(hex(fw_ver.value))[2:].zfill(8))[4:6])), 16))
pos4 = str('%02d' % int((('0x%s' % (str(hex(fw_ver.value))[2:].zfill(8))[6:8])), 16))
printLog(device, '%s firmware version' % (fw_name), '\t%s.%s.%s.%s' % (pos1, pos2, pos3, pos4))
# The ME, MC, and CE firmware names are only 2 characters, so they need an additional tab
elif fw_name in ['ME', 'MC', 'CE']:
printLog(device, '%s firmware version' % (fw_name), '\t\t%s' % (str(fw_ver.value)))
else:
printLog(device, '%s firmware version' % (fw_name), '\t%s' % (str(fw_ver.value)))
printLogSpacer()
def showGpusByPid(pidList):
""" Show GPUs used by a specific Process ID (pid)
Print out the GPU(s) used by a specific KFD process
If pidList is empty, print all used GPUs for all KFD processes
@param pidList: List of PIDs to check
"""
printLogSpacer(' GPUs Indexed by PID ')
# If pidList is empty then we were given 0 arguments, so they want all PIDs
# dv_indices = (c_uint32 * dv_limit)()
num_devices = c_uint32()
dv_indices = c_void_p()
if not pidList:
pidList = getPidList()
if not pidList:
printLog(None, 'No KFD PIDs currently running', None)
printLogSpacer()
return
for pid in pidList:
ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), None, byref(num_devices))
if rsmi_ret_ok(ret, metric=('PID ' + pid)):
dv_indices = (c_uint32 * num_devices.value)()
ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), dv_indices, byref(num_devices))
if rsmi_ret_ok(ret, metric='get_gpu_compute_process'):
metricName = 'PID %s is using %s DRM device(s)' % (pid, str(num_devices.value))
if (num_devices.value):
printListLog(metricName, list(dv_indices))
else:
printLog(None, metricName, None)
else:
print(None, 'Unable to get list of KFD PIDs. A kernel update may be needed', None)
printLogSpacer()
def getCoarseGrainUtil(device, typeName=None):
""" Find Coarse Grain Utilization
If typeName is not given, will return array with of all available sensors,
where sensor type and value could be addressed like this:
for ut_counter in utilization_counters:
printLog(device, utilization_counter_name[ut_counter.type], ut_counter.val)
@param device: DRM device identifier
@param typeName: 'GFX Activity', 'Memory Activity'
"""
timestamp = c_uint64(0)
if typeName != None:
try:
i = utilization_counter_name.index(typeName)
length = 1
utilization_counters = (rsmi_utilization_counter_t * length)()
utilization_counters[0].type = c_int(i)
except ValueError:
printLog(None, "No such coarse grain counter type")
return -1
else:
length = rsmi_utilization_counter_type.RSMI_UTILIZATION_COUNTER_LAST + 1
utilization_counters = (rsmi_utilization_counter_t * length)()
# populate array with all existing types to query
for i in range(0, length):
utilization_counters[i].type = c_int(i)
ret = rocmsmi.rsmi_utilization_count_get(device, utilization_counters, length, byref(timestamp))
if rsmi_ret_ok(ret, device, 'get_utilization_count_'+ str(typeName), True):
return utilization_counters
return -1
def showGpuUse(deviceList):
""" Display GPU use for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' % time GPU is busy ')
for device in deviceList:
if getGpuUse(device) != -1:
printLog(device, 'GPU use (%)', getGpuUse(device))
else:
printLog(device, 'GPU use Unsupported', None)
util_counters = getCoarseGrainUtil(device, "GFX Activity")
if util_counters != -1:
for ut_counter in util_counters:
printLog(device, utilization_counter_name[ut_counter.type], ut_counter.val)
else:
printInfoLog(device, 'GFX Activity', 'N/A')
printLogSpacer()
def showEnergy(deviceList):
""" Display amount of energy consumed by device until now
Default counter value is 10000b, indicating energy status unit
is 15.3 micro-Joules increment.
@param deviceList: List of DRM devices (can be a single-item list)
"""
power = c_uint64()
timestamp = c_uint64()
counter_resolution = c_float()
printLogSpacer(" Consumed Energy ")
for device in deviceList:
ret = rocmsmi.rsmi_dev_energy_count_get(device, byref(power), byref(counter_resolution), byref(timestamp))
if rsmi_ret_ok(ret, device, "% Energy Counter"):
printLog(device, "Energy counter", power.value)
printLog(device, "Accumulated Energy (uJ)", round(power.value * counter_resolution.value, 2))
printLogSpacer()
def showId(deviceList):
""" Display the device ID for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' ID ')
for device in deviceList:
printLog(device, 'GPU ID', getId(device))
printLogSpacer()
def showMaxPower(deviceList):
""" Display the maximum Graphics Package Power that this GPU will attempt to consume
before it begins throttling performance
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Power Cap ')
for device in deviceList:
if getMaxPower(device) != -1:
printLog(device, 'Max Graphics Package Power (W)', getMaxPower(device))
else:
printLog(device, 'Max Graphics Package Power Unsupported', None)
printLogSpacer()
def showMemInfo(deviceList, memType):
""" Display Memory information for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param memType: [$validMemTypes] Type of memory information to display
"""
# Python will pass in a list of values as a single-value list
# If we get 'all' as the string, just set the list to all supported types
# Otherwise, split the single-item list by space, then split each element
# up to process it below
if 'all' in memType:
returnTypes = memory_type_l
else:
returnTypes = memType
printLogSpacer(' Memory Usage (Bytes) ')
for device in deviceList:
for mem in returnTypes:
mem = mem.upper()
memInfo = getMemInfo(device, mem)
printLog(device, '%s Total Memory (B)' % (mem), memInfo[1])
printLog(device, '%s Total Used Memory (B)' % (mem), memInfo[0])
printLogSpacer()
def showMemUse(deviceList):
""" Display GPU memory usage for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
memoryUse = c_uint64()
printLogSpacer(' Current Memory Use ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_memory_busy_percent_get(device, byref(memoryUse))
if rsmi_ret_ok(ret, device, '% memory use'):
printLog(device, 'GPU memory use (%)', memoryUse.value)
util_counters = getCoarseGrainUtil(device, "Memory Activity")
if util_counters != -1:
for ut_counter in util_counters:
printLog(device, utilization_counter_name[ut_counter.type], ut_counter.val)
else:
printLog(device, 'Memory Activity', 'N/A')
printLogSpacer()
def showMemVendor(deviceList):
""" Display GPU memory vendor for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
vendor = create_string_buffer(256)
printLogSpacer(' Memory Vendor ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_vram_vendor_get(device, vendor, 256)
if rsmi_ret_ok(ret, device, 'get_vram_vendor') and vendor.value.decode():
printLog(device, 'GPU memory vendor', vendor.value.decode())
else:
logging.debug('GPU memory vendor missing or not supported')
printLogSpacer()
def showOverDrive(deviceList, odtype):
""" Display current OverDrive level for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param odtype: [sclk|mclk] OverDrive type
"""
rsmi_od = c_uint32()
printLogSpacer(' OverDrive Level ')
for device in deviceList:
if odtype == 'sclk':
odStr = 'GPU'
ret = rocmsmi.rsmi_dev_overdrive_level_get(device, byref(rsmi_od))
od = rsmi_od.value
if not rsmi_ret_ok(ret, device, 'get_overdrive_level_' + str(odtype)):
continue
elif odtype == 'mclk':
odStr = 'GPU Memory'
ret = rocmsmi.rsmi_dev_mem_overdrive_level_get(device, byref(rsmi_od))
od = rsmi_od.value
if not rsmi_ret_ok(ret, device, 'get_mem_overdrive_level_' + str(odtype)):
continue
else:
printErrLog(device, 'Unable to retrieve OverDrive')
logging.error('Unsupported clock type %s', odtype)
RETCODE = 1
printLog(device, odStr + ' OverDrive value (%)', od)
printLogSpacer()
def showPcieBw(deviceList):
""" Display estimated PCIe bandwidth usage for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
sent = c_uint64()
received = c_uint64()
max_pkt_sz = c_uint64()
printLogSpacer(' Measured PCIe Bandwidth ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_pci_throughput_get(device, byref(sent), byref(received), byref(max_pkt_sz))
if rsmi_ret_ok(ret, device, 'get_PCIe_bandwidth'):
# Use 1024.0 to ensure that the result is a float and not integer division
bw = ((received.value + sent.value) * max_pkt_sz.value) / 1024.0 / 1024.0
# Use the bwstr below to control precision on the string
bwstr = '%.3f' % bw
printLog(device, 'Estimated maximum PCIe bandwidth over the last second (MB/s)', bwstr)
else:
logging.debug('GPU PCIe bandwidth usage not supported')
printLogSpacer()
def showPcieReplayCount(deviceList):
""" Display number of PCIe replays for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
counter = c_uint64()
printLogSpacer(' PCIe Replay Counter ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_pci_replay_counter_get(device, byref(counter))
if rsmi_ret_ok(ret, device, 'PCIe Replay Count'):
printLog(device, 'PCIe Replay Count', counter.value)
printLogSpacer()
def showPerformanceLevel(deviceList):
""" Display current Performance Level for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Show Performance Level ')
for device in deviceList:
if getPerfLevel(device) != -1:
printLog(device, 'Performance Level', str(getPerfLevel(device)).lower())
else:
printLog(device, 'Performance Level Unsupported', None)
printLogSpacer()
def showPids():
""" Show Information for PIDs created in a KFD (Compute) context """
printLogSpacer(' KFD Processes ')
dataArray = []
dataArray.append(['PID', 'PROCESS NAME', 'GPU(s)', 'VRAM USED', 'SDMA USED', 'CU OCCUPANCY'])
pidList = getPidList()
if not pidList:
printLog(None, 'No KFD PIDs currently running', None)
printLogSpacer()
return
dv_indices = c_void_p()
num_devices = c_uint32()
proc = rsmi_process_info_t()
for pid in pidList:
gpuNumber = 'UNKNOWN'
vramUsage = 'UNKNOWN'
sdmaUsage = 'UNKNOWN'
cuOccupancy = 'UNKNOWN'
ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), None, byref(num_devices))
if rsmi_ret_ok(ret, metric='get_gpu_compute_process'):
dv_indices = (c_uint32 * num_devices.value)()
ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), dv_indices, byref(num_devices))
if rsmi_ret_ok(ret, metric='get_gpu_compute_process'):
gpuNumber = str(num_devices.value)
else:
logging.debug('Unable to fetch GPU number by PID')
ret = rocmsmi.rsmi_compute_process_info_by_pid_get(int(pid), byref(proc))
if rsmi_ret_ok(ret, metric='get_compute_process_info_by_pid'):
vramUsage = proc.vram_usage
sdmaUsage = proc.sdma_usage
cuOccupancy = proc.cu_occupancy
else:
logging.debug('Unable to fetch process info by PID')
dataArray.append([pid, getProcessName(pid), str(gpuNumber), str(vramUsage), str(sdmaUsage), str(cuOccupancy)])
printLog(None, 'KFD process information:', None)
print2DArray(dataArray)
printLogSpacer()
def showPower(deviceList):
""" Display current Average Graphics Package Power Consumption for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
secondaryPresent=False
printLogSpacer(' Power Consumption ')
for device in deviceList:
if checkIfSecondaryDie(device):
printLog(device, 'Average Graphics Package Power (W)', "N/A (Secondary die)")
secondaryPresent=True
elif str(getPower(device)) != '0.0':
printLog(device, 'Average Graphics Package Power (W)', getPower(device))
else:
printErrLog(device, 'Unable to get Average Graphics Package Power Consumption')
if secondaryPresent:
printLog(None, "\n\t\tPrimary die (usually one above or below the secondary) shows total (primary + secondary) socket power information", None)
printLogSpacer()
def showPowerPlayTable(deviceList):
""" Display current GPU Memory clock frequencies and voltages for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
if PRINT_JSON:
return
printLogSpacer(' GPU Memory clock frequencies and voltages ')
odvf = rsmi_od_volt_freq_data_t()
for device in deviceList:
ret = rocmsmi.rsmi_dev_od_volt_info_get(device, byref(odvf))
if rsmi_ret_ok(ret, device, 'get_od_volt'):
# TODO: Make this more dynamic and less hard-coded if possible
printLog(device, 'OD_SCLK:', None)
printLog(device, '0: %sMhz' % (int(odvf.curr_sclk_range.lower_bound / 1000000)), None)
printLog(device, '1: %sMhz' % (int(odvf.curr_sclk_range.upper_bound / 1000000)), None)
printLog(device, 'OD_MCLK:', None)
printLog(device, '1: %sMhz' % (int(odvf.curr_mclk_range.upper_bound / 1000000)), None)
printLog(device, 'OD_VDDC_CURVE:', None)
for position in range(3):
printLog(device, '%d: %sMhz %smV' % (
position, int(list(odvf.curve.vc_points)[position].frequency / 1000000),
int(list(odvf.curve.vc_points)[position].voltage)), None)
printLog(device, 'OD_RANGE:', None)
printLog(device, 'SCLK: %sMhz %sMhz' % (
int(odvf.sclk_freq_limits.lower_bound / 1000000), int(odvf.sclk_freq_limits.upper_bound / 1000000)), None)
printLog(device, 'MCLK: %sMhz %sMhz' % (
int(odvf.mclk_freq_limits.lower_bound / 1000000), int(odvf.mclk_freq_limits.upper_bound / 1000000)), None)
for position in range(3):
printLog(device, 'VDDC_CURVE_SCLK[%d]: %sMhz' % (
position, int(list(odvf.curve.vc_points)[position].frequency / 1000000)), None)
printLog(device, 'VDDC_CURVE_VOLT[%d]: %smV' % (
position, int(list(odvf.curve.vc_points)[position].voltage)), None)
printLogSpacer()
def showProductName(deviceList):
""" Show the requested product name for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
series = create_string_buffer(256)
model = create_string_buffer(256)
vendor = create_string_buffer(256)
vbios = create_string_buffer(256)
# sku = create_string_buffer(256)
printLogSpacer(' Product Info ')
for device in deviceList:
# Retrieve card vendor
ret = rocmsmi.rsmi_dev_vendor_name_get(device, vendor, 256)
# Only continue if GPU vendor is AMD
if rsmi_ret_ok(ret, device, 'get_vendor_name') and isAmdDevice(device):
try:
device_vendor = vendor.value.decode()
except UnicodeDecodeError:
printErrLog(device, "Unable to read device vendor")
device_vendor = "N/A"
# Retrieve the device series
ret = rocmsmi.rsmi_dev_name_get(device, series, 256)
if rsmi_ret_ok(ret, device, 'get_name'):
try:
device_series = series.value.decode()
printLog(device, 'Card series', '\t\t' + device_series)
except UnicodeDecodeError:
printErrLog(device, "Unable to read card series")
# Retrieve the device model
ret = rocmsmi.rsmi_dev_subsystem_name_get(device, model, 256)
if rsmi_ret_ok(ret, device, 'get_subsystem_name'):
try:
device_model = model.value.decode()
# padHexValue is used for applications that expect 4-digit card models
printLog(device, 'Card model', '\t\t' + padHexValue(device_model, 4))
except UnicodeDecodeError:
printErrLog(device, "Unable to read device model")
printLog(device, 'Card vendor', '\t\t' + device_vendor)
# TODO: Retrieve the SKU using 'rsmi_dev_sku_get' from the LIB
# ret = rocmsmi.rsmi_dev_sku_get(device, sku, 256)
# if rsmi_ret_ok(ret, device) and sku.value.decode():
# device_sku = sku.value.decode()
# Retrieve the device SKU as a substring from VBIOS
ret = rocmsmi.rsmi_dev_vbios_version_get(device, vbios, 256)
if rsmi_ret_ok(ret, device, 'get_vbios_version') and vbios.value.decode():
# Device SKU is just the characters in between the two '-' in vbios_version
if vbios.value.decode().count('-') == 2 and len(str(vbios.value.decode().split('-')[1])) > 1:
device_sku = vbios.value.decode().split('-')[1]
else:
device_sku = 'unknown'
printLog(device, 'Card SKU', '\t\t' + device_sku)
else:
printErrLog(device, "Unable to decode VBIOS value for device SKU")
else:
printLog(device, 'Incompatible device.\n' \
'GPU[%s]\t\t: Expected vendor name: Advanced Micro Devices, Inc. [AMD/ATI]\n' \
'GPU[%s]\t\t: Actual vendor name' % (device, device), vendor.value.decode())
printLogSpacer()
def showProfile(deviceList):
""" Display available Power Profiles for a list of devices.
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
if PRINT_JSON:
return
printLogSpacer(' Show Power Profiles ')
status = rsmi_power_profile_status_t()
for device in deviceList:
ret = rocmsmi.rsmi_dev_power_profile_presets_get(device, 0, byref(status))
if rsmi_ret_ok(ret, device, 'get_power_profiles', silent=False):
binaryMaskString = str(format(status.available_profiles, '07b'))[::-1]
bitMaskPosition = 0
profileNumber = 0
while (bitMaskPosition < 7):
if binaryMaskString[bitMaskPosition] == '1':
profileNumber = profileNumber + 1
if 2 ** bitMaskPosition == status.current:
printLog(device, '%d. Available power profile (#%d of 7)' % \
(profileNumber, bitMaskPosition + 1), profileString(2 ** bitMaskPosition) + '*')
else:
printLog(device, '%d. Available power profile (#%d of 7)' % \
(profileNumber, bitMaskPosition + 1), profileString(2 ** bitMaskPosition))
bitMaskPosition = bitMaskPosition + 1
printLogSpacer()
def showRange(deviceList, rangeType):
""" Show the range for either the sclk or voltage for the specified devices
@param deviceList: List of DRM devices (can be a single-item list)
@param rangeType: [sclk|voltage] Type of range to return
"""
global RETCODE
if rangeType not in {'sclk', 'mclk', 'voltage'}:
printLog(None, 'Invalid range identifier %s' % (rangeType), None)
RETCODE = 1
return
printLogSpacer(' Show Valid %s Range ' % (rangeType))
odvf = rsmi_od_volt_freq_data_t()
for device in deviceList:
ret = rocmsmi.rsmi_dev_od_volt_info_get(device, byref(odvf))
if rsmi_ret_ok(ret, device, 'get_od_volt', silent=False):
if rangeType == 'sclk':
printLog(device, 'Valid sclk range: %sMhz - %sMhz' % (
int(odvf.curr_sclk_range.lower_bound / 1000000), int(odvf.curr_sclk_range.upper_bound / 1000000)), None)
if rangeType == 'mclk':
printLog(device, 'Valid mclk range: %sMhz - %sMhz' % (
int(odvf.curr_mclk_range.lower_bound / 1000000), int(odvf.curr_mclk_range.upper_bound / 1000000)), None)
if rangeType == 'voltage':
if odvf.num_regions == 0:
printErrLog(device, 'Voltage curve regions unsupported.')
continue
num_regions = c_uint32(odvf.num_regions)
regions = (rsmi_freq_volt_region_t * odvf.num_regions)()
ret = rocmsmi.rsmi_dev_od_volt_curve_regions_get(device, byref(num_regions), byref(regions))
if rsmi_ret_ok(ret, device, 'volt'):
for i in range(num_regions.value):
printLog(device,
'Region %d: Valid voltage range: %smV - %smV' % (i, regions[i].volt_range.lower_bound,
regions[i].volt_range.upper_bound),
None)
else:
printLog(device, 'Unable to display %s range' % (rangeType), None)
printLogSpacer()
def showRasInfo(deviceList, rasType):
""" Show the requested RAS information for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param rasType: [$validRasBlocks] RAS counter to display (all if left empty)
"""
state = rsmi_ras_err_state_t()
if not rasType or 'all' in rasType:
rasBlocks = rsmi_gpu_block_d.keys()
else:
for name in rasType:
if name.upper() not in rsmi_gpu_block_d:
rasType.remove(name)
printErrLog(device, '%s is not a RAS block' % (name))
rasBlocks = [block.upper() for block in rasType]
printLogSpacer(' RAS Info ')
for device in deviceList:
data = []
for block in rasBlocks:
row = []
ret = rocmsmi.rsmi_dev_ecc_status_get(device, rsmi_gpu_block_d[block], byref(state))
if rsmi_ret_ok(ret, device, 'get_ecc_status_' + str(block), True):
row.append(block)
row.append(rsmi_ras_err_stale_machine[state.value].upper())
# Now add the error count
if rsmi_ras_err_stale_machine[state.value] != 'disabled' or 'none' or 'unknown error':
ec = rsmi_error_count_t()
ret = rocmsmi.rsmi_dev_ecc_count_get(device, rsmi_gpu_block_d[block], byref(ec))
if rsmi_ret_ok(ret, device, 'ecc err count', True):
row.append(ec.correctable_err)
row.append(ec.uncorrectable_err)
data.append(row)
printTableLog([' Block', ' Status ', 'Correctable Error', 'Uncorrectable Error'], data, device,
'RAS INFO')
# TODO: Use dynamic spacing for column widths
printLogSpacer(None, '_')
printLogSpacer()
def showRetiredPages(deviceList, retiredType='all'):
""" Show retired pages of a specified type for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param retiredType: Type of retired pages to show (default = all)
"""
printLogSpacer(' Pages Info ')
num_pages = c_uint32()
records = rsmi_retired_page_record_t()
for device in deviceList:
data = []
ret = rocmsmi.rsmi_dev_memory_reserved_pages_get(device, byref(num_pages), None)
if rsmi_ret_ok(ret, device, 'ras'):
records = (rsmi_retired_page_record_t * num_pages.value)()
else:
logging.debug('Unable to retrieve reserved page info')
return
ret = rocmsmi.rsmi_dev_memory_reserved_pages_get(device, byref(num_pages), byref(records))
for rec in records:
if (memory_page_status_l[rec.status] == retiredType or retiredType == 'all'):
data.append((hex(rec.page_address), hex(rec.page_size), memory_page_status_l[rec.status]))
if data:
printTableLog([' Page address', ' Page size', ' Status'], data, device,
retiredType.upper() + ' PAGES INFO')
printLogSpacer()
def showSerialNumber(deviceList):
""" Display the serial number for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Serial Number ')
for device in deviceList:
sn = create_string_buffer(256)
ret = rocmsmi.rsmi_dev_serial_number_get(device, sn, 256)
try:
sn.value.decode()
except UnicodeDecodeError:
printErrLog(device, "FRU Serial Number contains non-alphanumeric characters. FRU is likely corrupted")
continue
if rsmi_ret_ok(ret, device, 'get_serial_number') and sn.value.decode():
printLog(device, 'Serial Number', sn.value.decode())
else:
printLog(device, 'Serial Number', 'N/A')
printLogSpacer()
def showUId(deviceList):
""" Display the unique device ID for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Unique ID ')
for device in deviceList:
dv_uid = c_uint64()
ret = rocmsmi.rsmi_dev_unique_id_get(device, byref(dv_uid))
if rsmi_ret_ok(ret, device, 'get_unique_id', True) and str(hex(dv_uid.value)):
printLog(device, 'Unique ID', hex(dv_uid.value))
else:
printLog(device, 'Unique ID', 'N/A')
printLogSpacer()
def showVbiosVersion(deviceList):
""" Display the VBIOS version for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' VBIOS ')
for device in deviceList:
printLog(device, 'VBIOS version', getVbiosVersion(device))
printLogSpacer()
class _Getch:
"""
Get a single character from standard input
"""
def __init__(self):
import sys, tty
def __call__(self):
import sys, termios, tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def showEvents(deviceList, eventTypes):
""" Display a blocking list of events for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
@param eventTypes: List of event type names (can be a single-item list)
"""
printLogSpacer(' Show Events ')
printLog(None, 'press \'q\' or \'ctrl + c\' to quit', None)
eventTypeList = []
for event in eventTypes: # Cleaning list from wrong values
if event.replace(',', '').upper() in notification_type_names:
eventTypeList.append(event.replace(',', '').upper())
else:
printErrLog(None, 'Ignoring unrecognized event type %s' % (event.replace(',', '')))
if len(eventTypeList) == 0:
eventTypeList = notification_type_names
try:
print2DArray([['DEVICE\t', 'TIME\t', 'TYPE\t', 'DESCRIPTION']])
# Create a seperate thread for each GPU
for device in deviceList:
_thread.start_new_thread(printEventList, (device, 1000, eventTypeList))
time.sleep(0.25)
except Exception as e:
printErrLog(device, 'Unable to start new thread. %s' % (e))
return
while 1: # Exit condition from user keyboard input of 'q' or 'ctrl + c'
getch = _Getch()
user_input = getch()
# Catch user input for q or Ctrl + c
if user_input == 'q' or user_input == '\x03':
for device in deviceList:
ret = rocmsmi.rsmi_event_notification_stop(device)
if not rsmi_ret_ok(ret, device, 'stop_event_notification'):
printErrLog(device, 'Unable to end event notifications.')
print('\r')
break
def printTempGraph(deviceList, delay):
# deviceList must be in ascending order
deviceList.sort()
devices = 0
# Print an empty line for each device
for device in deviceList:
devices = devices + 1
for i in range(devices):
printEmptyLine()
originalTerminalWidth = os.get_terminal_size()[0]
while 1: # Exit condition from user keyboard input of 'q' or 'ctrl + c'
printString = ''
for device in deviceList:
temp = getTemp(device, 'edge')
percentage = temp
if percentage >= 100:
percentage = 100
if percentage < 0:
percentage = 0
# Get available space based on terminal width
terminalWidth = os.get_terminal_size()[0]
availableSpace = 0
if terminalWidth >= 20:
availableSpace = terminalWidth - 20
# Get color based on percentage, with a non-linear scaling
color = getGraphColor(3.16*(percentage**1.5)**(1/2))
# Get graph length based on percentage and available space
padding = (percentage / float(100)) * availableSpace
if padding > availableSpace:
padding = availableSpace
paddingSpace = color[-1]
for i in range(int(padding)):
paddingSpace += paddingSpace[-1]
remainder = 0
if availableSpace >= padding:
remainder = availableSpace + 1 - padding
remainderSpace = ' ' * int(remainder)
# TODO: Allow terminal size to be decreased
if terminalWidth < originalTerminalWidth:
print('Terminal size cannot be decreased.\n\r')
return
# Two spare Spaces
tempString = (str(int(temp)) + '°C').ljust(5)
printString += '\033[2;30;47mGPU[%d] Temp %s|%s%s\x1b[0m%s\r\n' % (device, tempString, color, paddingSpace[1:], remainderSpace)
originalTerminalWidth = terminalWidth
time.sleep((delay / 1000))
if terminalWidth >= 20:
for i in range(devices):
printString = '\033[A' + printString
print(printString, end = '\r')
def getGraphColor(percentage):
# Text / Background color mixing (Tested on PuTTY)
colors = ['\033[2;35;45m','\033[2;34;45m','\033[2;35;44m','\033[2;34;44m',
'\033[2;36;44m','\033[2;34;46m','\033[2;36;46m','\033[2;32;46m',
'\033[2;36;42m','\033[2;32;42m','\033[2;33;42m','\033[2;32;43m',
'\033[2;33;43m','\033[2;31;43m','\033[2;33;41m','\033[2;31;41m']
characters = [' ', '░', '░', '▒', '▒', '░']
# Ensure percentage is in range and rounded
if percentage > 99:
percentage = 99
if percentage < 0:
percentage = 0
percentage = round(percentage, 0)
# There are a total of 16 distinct colors, with 2 special ascii characters per
# color, for a total of 16*2=32 distinct colors for a gradient.
# Therefore every 100/32=3.125 percent the color gradient will change
stepSize = (100/len(colors))/2
characterIndex = int((percentage % (len(characters) * stepSize)) / stepSize)
colorIndex = int(percentage / (stepSize * 2))
returnStr = colors[colorIndex] + characters[characterIndex]
return returnStr
def showTempGraph(deviceList):
printLogSpacer(' Temperature Graph ')
# Start a thread for constantly printing
try:
# Create a thread (call print function, devices, delay in ms)
_thread.start_new_thread(printTempGraph, (deviceList, 150))
except Exception as e:
printErrLog(device, 'Unable to start new thread. %s' % (e))
# Catch user input for program termination
while 1: # Exit condition from user keyboard input of 'q' or 'ctrl + c'
getch = _Getch()
user_input = getch()
# Catch user input for q or Ctrl + c
if user_input == 'q' or user_input == '\x03':
break
# Reset color to default before exit
print('\033[A\x1b[0m\r')
printLogSpacer()
def showVersion(deviceList, component):
""" Display the software version for the specified component
@param deviceList: List of DRM devices (can be a single-item list)
@param component: Component (currently only driver)
"""
printLogSpacer(' Version of System Component ')
printSysLog(component_str(component) + ' version', getVersion(deviceList, component))
printLogSpacer()
def showVoltage(deviceList):
""" Display the current voltage (in millivolts) for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Current voltage ')
for device in deviceList:
vtype = rsmi_voltage_type_t(0)
met = rsmi_voltage_metric_t(0)
voltage = c_uint64()
ret = rocmsmi.rsmi_dev_volt_metric_get(device, vtype, met, byref(voltage))
if rsmi_ret_ok(ret, device, 'get_volt_metric') and str(voltage.value):
printLog(device, 'Voltage (mV)', str(voltage.value))
else:
logging.debug('GPU voltage not supported')
printLogSpacer()
def showVoltageCurve(deviceList):
""" Show the voltage curve points for the specified devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Voltage Curve Points ')
odvf = rsmi_od_volt_freq_data_t()
for device in deviceList:
ret = rocmsmi.rsmi_dev_od_volt_info_get(device, byref(odvf))
if rsmi_ret_ok(ret, device, 'get_od_volt_info', silent=False):
for position in range(3):
printLog(device, 'Voltage point %d: %sMhz %smV' % (
position, int(list(odvf.curve.vc_points)[position].frequency / 1000000),
int(list(odvf.curve.vc_points)[position].voltage)), None)
printLogSpacer()
def showXgmiErr(deviceList):
""" Display the XGMI Error status
This reads the XGMI error file, and interprets the return value from the sysfs file
@param deviceList: Show XGMI error state for these devices
"""
printLogSpacer('XGMI Error status')
xe = rsmi_xgmi_status_t()
for device in deviceList:
ret = rocmsmi.rsmi_dev_xgmi_error_status(device, byref(xe))
if rsmi_ret_ok(ret, device, 'xgmi status'):
desc = ''
if xe.value is None:
continue
else:
err = int(xe.value)
if err == 0:
desc = 'No errors detected since last read'
elif err == 1:
desc = 'Single error detected since last read'
elif err == 2:
desc = 'Multiple errors detected since last read'
else:
printErrLog(device, 'Invalid return value from xgmi_error')
continue
if PRINT_JSON is True:
printLog(device, 'XGMI Error count', err)
else:
printLog(device, 'XGMI Error count', '%s (%s)' % (err, desc))
printLogSpacer()
def showAccessibleTopology(deviceList):
""" Display the HW Topology Information based on link accessibility
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
devices_ind = range(len(deviceList))
accessible = c_bool()
gpu_links_type = [[0 for x in devices_ind] for y in devices_ind]
printLogSpacer(' Link accessibility between two GPUs ')
for srcdevice in deviceList:
for destdevice in deviceList:
ret = rocmsmi.rsmi_is_P2P_accessible(srcdevice, destdevice, byref(accessible))
if rsmi_ret_ok(ret, metric='is_P2P_accessible'):
gpu_links_type[srcdevice][destdevice] = accessible.value
else:
printErrLog(srcdevice, 'Cannot read link accessibility: Unsupported on this machine')
if PRINT_JSON:
formatMatrixToJSON(deviceList, gpu_links_type, "(Topology) Link accessibility between DRM devices {} and {}")
return
printTableRow(None, ' ')
for row in deviceList:
tmp = 'GPU%d' % row
printTableRow('%-12s', tmp)
printEmptyLine()
for gpu1 in deviceList:
tmp = 'GPU%d' % gpu1
printTableRow('%-6s', tmp)
for gpu2 in deviceList:
printTableRow('%-12s', gpu_links_type[gpu1][gpu2])
printEmptyLine()
def showWeightTopology(deviceList):
""" Display the HW Topology Information based on weights
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
global PRINT_JSON
devices_ind = range(len(deviceList))
gpu_links_weight = [[0 for x in devices_ind] for y in devices_ind]
printLogSpacer(' Weight between two GPUs ')
for srcdevice in deviceList:
for destdevice in deviceList:
if (srcdevice == destdevice):
gpu_links_weight[srcdevice][destdevice] = 0
continue
weight = c_uint64()
ret = rocmsmi.rsmi_topo_get_link_weight(srcdevice, destdevice, byref(weight))
if rsmi_ret_ok(ret, metric='get_link_weight_topology'):
gpu_links_weight[srcdevice][destdevice] = weight
else:
printErrLog(srcdevice, 'Cannot read Link Weight: Not supported on this machine')
gpu_links_weight[srcdevice][destdevice] = None
if PRINT_JSON:
formatMatrixToJSON(deviceList, gpu_links_weight, "(Topology) Weight between DRM devices {} and {}")
return
printTableRow(None, ' ')
for row in deviceList:
tmp = 'GPU%d' % row
printTableRow('%-12s', tmp)
printEmptyLine()
for gpu1 in deviceList:
tmp = 'GPU%d' % gpu1
printTableRow('%-6s', tmp)
for gpu2 in deviceList:
if (gpu1 == gpu2):
printTableRow('%-12s', '0')
elif (gpu_links_weight[gpu1][gpu2] == None):
printTableRow('%-12s', 'N/A')
else:
printTableRow('%-12s', gpu_links_weight[gpu1][gpu2].value)
printEmptyLine()
def showHopsTopology(deviceList):
""" Display the HW Topology Information based on number of hops
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
linktype = c_char_p()
devices_ind = range(len(deviceList))
gpu_links_hops = [[0 for x in devices_ind] for y in devices_ind]
printLogSpacer(' Hops between two GPUs ')
for srcdevice in deviceList:
for destdevice in deviceList:
if (srcdevice == destdevice):
gpu_links_hops[srcdevice][destdevice] = '0'
continue
hops = c_uint64()
ret = rocmsmi.rsmi_topo_get_link_type(srcdevice, destdevice, byref(hops), byref(linktype))
if rsmi_ret_ok(ret, metric='get_link_type_topology'):
gpu_links_hops[srcdevice][destdevice] = hops
else:
printErrLog(srcdevice, 'Cannot read Link Hops: Not supported on this machine')
gpu_links_hops[srcdevice][destdevice] = None
if PRINT_JSON:
formatMatrixToJSON(deviceList, gpu_links_hops, "(Topology) Hops between DRM devices {} and {}")
return
printTableRow(None, ' ')
for row in deviceList:
tmp = 'GPU%d' % row
printTableRow('%-12s', tmp)
printEmptyLine()
for gpu1 in deviceList:
tmp = 'GPU%d' % gpu1
printTableRow('%-6s', tmp)
for gpu2 in deviceList:
if (gpu1 == gpu2):
printTableRow('%-12s', '0')
elif (gpu_links_hops[gpu1][gpu2] == None):
printTableRow('%-12s', 'N/A')
else:
printTableRow('%-12s', gpu_links_hops[gpu1][gpu2].value)
printEmptyLine()
def showTypeTopology(deviceList):
""" Display the HW Topology Information based on link type
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
devices_ind = range(len(deviceList))
hops = c_uint64()
linktype = c_uint64()
gpu_links_type = [[0 for x in devices_ind] for y in devices_ind]
printLogSpacer(' Link Type between two GPUs ')
for srcdevice in deviceList:
for destdevice in deviceList:
if (srcdevice == destdevice):
gpu_links_type[srcdevice][destdevice] = '0'
continue
ret = rocmsmi.rsmi_topo_get_link_type(srcdevice, destdevice, byref(hops), byref(linktype))
if rsmi_ret_ok(ret, metric='get_link_topology_type'):
if (linktype.value == 1):
gpu_links_type[srcdevice][destdevice] = "PCIE"
elif (linktype.value == 2):
gpu_links_type[srcdevice][destdevice] = "XGMI"
else:
gpu_links_type[srcdevice][destdevice] = "XXXX"
else:
printErrLog(srcdevice, 'Cannot read Link Type: Not supported on this machine')
gpu_links_type[srcdevice][destdevice] = "XXXX"
if PRINT_JSON:
formatMatrixToJSON(deviceList, gpu_links_type, "(Topology) Link type between DRM devices {} and {}")
return
printTableRow(None, ' ')
for row in deviceList:
tmp = 'GPU%d' % row
printTableRow('%-12s', tmp)
printEmptyLine()
for gpu1 in deviceList:
tmp = 'GPU%d' % gpu1
printTableRow('%-6s', tmp)
for gpu2 in deviceList:
if (gpu1 == gpu2):
printTableRow('%-12s', '0')
else:
printTableRow('%-12s', gpu_links_type[gpu1][gpu2])
printEmptyLine()
def showNumaTopology(deviceList):
""" Display the HW Topology Information for numa nodes
This reads the HW Topology file and display the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
printLogSpacer(' Numa Nodes ')
numa_numbers = c_uint32()
for device in deviceList:
ret = rocmsmi.rsmi_topo_get_numa_node_number(device, byref(numa_numbers))
if rsmi_ret_ok(ret, device, 'get_numa_node_number'):
printLog(device, "(Topology) Numa Node", numa_numbers.value)
else:
printErrLog(device, "Cannot read Numa Node")
ret = rocmsmi.rsmi_topo_numa_affinity_get(device, byref(numa_numbers))
if rsmi_ret_ok(ret, metric='get_numa_affinity_topology'):
printLog(device, "(Topology) Numa Affinity", numa_numbers.value)
else:
printErrLog(device, 'Cannot read Numa Affinity')
def showHwTopology(deviceList):
""" Display the HW Topology Information based on weight/hops/type
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
showWeightTopology(deviceList)
printEmptyLine()
showHopsTopology(deviceList)
printEmptyLine()
showTypeTopology(deviceList)
printEmptyLine()
showNumaTopology(deviceList)
def showNodesBw(deviceList):
""" Display max and min bandwidth between nodes.
Currently supports XGMI only.
This reads the HW Topology file and displays the matrix for the nodes
@param deviceList: List of DRM devices (can be a single-item list)
"""
devices_ind = range(len(deviceList))
minBW = c_uint32()
maxBW = c_uint32()
hops = c_uint64()
linktype = c_uint64()
silent = False
nonXgmi = False
gpu_links_type = [[0 for x in devices_ind] for y in devices_ind]
printLogSpacer(' Bandwidth ')
for srcdevice in deviceList:
for destdevice in deviceList:
if srcdevice != destdevice:
ret = rocmsmi.rsmi_minmax_bandwidth_get(srcdevice, destdevice, byref(minBW), byref(maxBW))
#verify that link type is xgmi
ret2 = rocmsmi.rsmi_topo_get_link_type(srcdevice, destdevice, byref(hops), byref(linktype))
if rsmi_ret_ok(ret2," {} to {}".format(srcdevice, destdevice), 'get_link_topology_type', True):
if linktype.value != 2:
nonXgmi = True
silent= True
gpu_links_type[srcdevice][destdevice] = "N/A"
if rsmi_ret_ok(ret, " {} to {}".format(srcdevice, destdevice), 'get_link_topology_type',silent):
gpu_links_type[srcdevice][destdevice] = "{}-{}".format(minBW.value, maxBW.value)
else:
gpu_links_type[srcdevice][destdevice] = "N/A"
if PRINT_JSON:
formatMatrixToJSON(deviceList, "{}-{}".format(minBW.value, maxBW.value), " min-max bandwidth between DRM devices {} and {}".format(srcdevice, destdevice))
return
printTableRow(None, ' ')
for row in deviceList:
tmp = 'GPU%d' % row
printTableRow('%-12s', tmp)
printEmptyLine()
for gpu1 in deviceList:
tmp = 'GPU%d' % gpu1
printTableRow('%-6s', tmp)
for gpu2 in deviceList:
printTableRow('%-12s', gpu_links_type[gpu1][gpu2])
printEmptyLine()
printLog(None,"Format: min-max; Units: mps", None)
printLog(None,'"0-0" min-max bandwidth indicates devices are not connected directly', None)
if nonXgmi:
printLog(None,"Non-xGMI links detected and is currently not supported", None)
def showComputePartition(deviceList):
""" Returns the current compute partitioning for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
currentComputePartition = create_string_buffer(256)
printLogSpacer(' Current Compute Partition ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_compute_partition_get(device, currentComputePartition, 256)
if rsmi_ret_ok(ret, device, 'get_compute_partition', silent=True) and currentComputePartition.value.decode():
printLog(device, 'Compute Partition', currentComputePartition.value.decode())
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None)
else:
rsmi_ret_ok(ret, device, 'get_compute_partition')
printErrLog(device, 'Failed to retrieve compute partition, even though device supports it.')
printLogSpacer()
def showNPSMode(deviceList):
""" Returns the current NPS mode for a list of devices
@param deviceList: List of DRM devices (can be a single-item list)
"""
npsMode = create_string_buffer(256)
printLogSpacer(' Current NPS Mode ')
for device in deviceList:
ret = rocmsmi.rsmi_dev_nps_mode_get(device, npsMode, 256)
if rsmi_ret_ok(ret, device, 'get_NPS_mode',silent=True) and npsMode.value.decode():
printLog(device, 'NPS Mode', npsMode.value.decode())
elif ret == rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED:
printLog(device, 'Not supported on the given system', None)
else:
rsmi_ret_ok(ret, device, 'get_NPS_mode')
printErrLog(device, 'Failed to retrieve NPS mode, even though device supports it.')
printLogSpacer()
def checkAmdGpus(deviceList):
""" Check if there are any AMD GPUs being queried,
return False if there are none
@param deviceList: List of DRM devices (can be a single-item list)
"""
for device in deviceList:
if isAmdDevice(device):
return True
return False
def component_str(component):
""" Returns the component String value
@param component: Component (currently only driver)
"""
switcher = {
0: 'Driver'
}
return switcher.get(component, 'UNKNOWN')
def confirmOutOfSpecWarning(autoRespond):
""" Print the warning for running outside of specification and prompt user to accept the terms.
@param autoRespond: Response to automatically provide for all prompts
"""
print('''
******WARNING******\n
Operating your AMD GPU outside of official AMD specifications or outside of
factory settings, including but not limited to the conducting of overclocking,
over-volting or under-volting (including use of this interface software,
even if such software has been directly or indirectly provided by AMD or otherwise
affiliated in any way with AMD), may cause damage to your AMD GPU, system components
and/or result in system failure, as well as cause other problems.
DAMAGES CAUSED BY USE OF YOUR AMD GPU OUTSIDE OF OFFICIAL AMD SPECIFICATIONS OR
OUTSIDE OF FACTORY SETTINGS ARE NOT COVERED UNDER ANY AMD PRODUCT WARRANTY AND
MAY NOT BE COVERED BY YOUR BOARD OR SYSTEM MANUFACTURER'S WARRANTY.
Please use this utility with caution.
''')
if not autoRespond:
user_input = input('Do you accept these terms? [y/N] ')
else:
user_input = autoRespond
if user_input in ['Yes', 'yes', 'y', 'Y', 'YES']:
return
else:
sys.exit('Confirmation not given. Exiting without setting value')
def doesDeviceExist(device):
""" Check whether the specified device exists
@param device: DRM device identifier
"""
availableDevices = listDevices()
filePath = '/sys/kernel/debug/dri/%d/' % (int(device))
if device in availableDevices or os.path.exists(filePath):
return True
return False
def initializeRsmi():
""" initializes rocmsmi if the amdgpu driver is initialized
"""
# Check if amdgpu is initialized before initializing rsmi
if driverInitialized() is True:
ret_init = rocmsmi.rsmi_init(0)
if ret_init != 0:
logging.error('ROCm SMI returned %s (the expected value is 0)', ret_init)
exit(ret_init)
else:
logging.error('Driver not initialized (amdgpu not found in modules)')
exit(0)
def isAmdDevice(device):
""" Return whether the specified device is an AMD device or not
@param device: DRM device identifier
"""
vendorID = c_uint16()
# Retrieve card vendor
ret = rocmsmi.rsmi_dev_vendor_id_get(device, byref(vendorID))
# Only continue if GPU vendor is AMD, which is 1002
if ret == rsmi_status_t.RSMI_STATUS_SUCCESS and str(hex(vendorID.value)) == '0x1002':
return True
return False
def listDevices():
""" Returns a list of GPU devices """
numberOfDevices = c_uint32(0)
ret = rocmsmi.rsmi_num_monitor_devices(byref(numberOfDevices))
if rsmi_ret_ok(ret, metric='get_num_monitor_devices'):
deviceList = list(range(numberOfDevices.value))
return deviceList
else:
exit(ret)
def load(savefilepath, autoRespond):
""" Load clock frequencies and fan speeds from a specified file.
@param savefilepath: Path to the save file
@param autoRespond: Response to automatically provide for all prompts
"""
printLogSpacer(' Load Settings ')
if not os.path.isfile(savefilepath):
printLog(None, 'No settings file found at %s' % (savefilepath), None)
printLogSpacer()
sys.exit()
with open(savefilepath, 'r') as savefile:
jsonData = json.loads(savefile.read())
for (device, values) in jsonData.items():
if values['vJson'] != CLOCK_JSON_VERSION:
printLog(None, 'Unable to load legacy clock file - file v%s != current v%s' %
(str(values['vJson']), str(CLOCK_JSON_VERSION)), None)
break
device = int(device[4:])
if values['fan']:
setFanSpeed([device], values['fan'])
if values['overdrivesclk']:
setClockOverDrive([device], 'sclk', values['overdrivesclk'], autoRespond)
if values['overdrivemclk']:
setClockOverDrive([device], 'mclk', values['overdrivemclk'], autoRespond)
for clk in validClockNames:
if clk in values['clocks']:
setClocks([device], clk, values['clocks'][clk])
if values['profile']:
setProfile([device], values['profile'])
# Set Perf level last, since setting OverDrive sets the Performance level
# to manual, and Profiles only work when the Performance level is auto
if values['perflevel'] != -1:
setPerformanceLevel([device], values['perflevel'])
printLog(device, 'Successfully loaded values from ' + savefilepath, None)
printLogSpacer()
def padHexValue(value, length):
""" Pad a hexadecimal value with a given length of zeros
@param value: A hexadecimal value to be padded with zeros
@param length: Number of zeros to pad the hexadecimal value
"""
# Ensure value entered meets the minimum length and is hexadecimal
if len(value) > 2 and length > 1 and value[:2].lower() == '0x' \
and all(c in '0123456789abcdefABCDEF' for c in value[2:]):
# Pad with zeros after '0x' prefix
return '0x' + value[2:].zfill(length)
return value
def profileString(profile):
dictionary = {1: 'CUSTOM', 2: 'VIDEO', 4: 'POWER SAVING', 8: 'COMPUTE', 16: 'VR', 32: '3D FULL SCREEN',
64: 'BOOTUP DEFAULT'}
# TODO: We should dynamically generate this to avoid hardcoding
if str(profile).isnumeric() and int(profile) in dictionary.keys():
return dictionary.get(int(profile))
elif not str(profile).isnumeric() and str(profile) in dictionary.values():
return list(dictionary.keys())[list(dictionary.values()).index(str(profile))]
return 'UNKNOWN'
def relaunchAsSudo():
""" Relaunch the SMI as sudo
To use rocm_smi_lib functions that write to sysfs, the SMI requires root access
Use execvp to relaunch the script with sudo privileges
"""
if os.geteuid() != 0:
os.execvp('sudo', ['sudo'] + sys.argv)
#keeping below, if we want to run sudo with user's env variables
#os.execvp('sudo', ['sudo', '-E'] + sys.argv)
def rsmi_ret_ok(my_ret, device=None, metric=None, silent=False):
""" Returns true if RSMI call status is 0 (success)
If status is not 0, error logs are written to the debug log and false is returned
@param device: DRM device identifier
@param my_ret: Return of RSMI call (rocm_smi_lib API)
@param metric: Parameter of GPU currently being analyzed
@param silent: Echo verbose error reponse.
True siliences err output, False does not silience err output (default).
"""
global RETCODE
global PRINT_JSON
if my_ret != rsmi_status_t.RSMI_STATUS_SUCCESS:
err_str = c_char_p()
rocmsmi.rsmi_status_string(my_ret, byref(err_str))
returnString = ''
if device is not None:
returnString += '%s GPU[%s]:' % (my_ret, device)
if metric is not None:
returnString += ' %s: ' % (metric)
returnString += '%s\t' % (err_str.value.decode())
if not PRINT_JSON:
logging.debug('%s', returnString)
if not silent:
if my_ret in rsmi_status_verbose_err_out:
printLog(device, metric + ", " + rsmi_status_verbose_err_out[my_ret], None)
RETCODE = my_ret
return False
return True
def save(deviceList, savefilepath):
""" Save clock frequencies and fan speeds for a list of devices to a specified file path.
@param deviceList: List of DRM devices (can be a single-item list)
@param savefilepath: Path to use to create the save file
"""
perfLevels = {}
clocks = {}
fanSpeeds = {}
overDriveGpu = {}
overDriveGpuMem = {}
profiles = {}
jsonData = {}
printLogSpacer(' Save Settings ')
if os.path.isfile(savefilepath):
printLog(None, '%s already exists. Settings not saved' % (savefilepath), None)
printLogSpacer()
sys.exit()
for device in deviceList:
if getPerfLevel(device) != -1:
perfLevels[device] = str(getPerfLevel(device)).lower()
else:
perfLevels[device] = 'Unsupported'
freq = rsmi_frequencies_t()
for clk_type in sorted(rsmi_clk_names_dict):
clocks[device] = clocks.get(device, {})
ret = rocmsmi.rsmi_dev_gpu_clk_freq_get(device, rsmi_clk_names_dict[clk_type], byref(freq))
if rsmi_ret_ok(ret, device, 'get_gpu_clk_freq_' + str(clk_type), True):
clocks[device][clk_type] = str(freq.current)
else:
clocks[device][clk_type] = '0'
fanSpeeds[device] = getFanSpeed(device)[0]
od = c_uint32()
ret = rocmsmi.rsmi_dev_overdrive_level_get(device, byref(od))
if rsmi_ret_ok(ret, device, 'get_overdrive_level'):
overDriveGpu[device] = str(od.value)
else:
overDriveGpu[device] = '0'
# GPU memory Overdrive is legacy
overDriveGpuMem[device] = '0'
status = rsmi_power_profile_status_t()
ret = rocmsmi.rsmi_dev_power_profile_presets_get(device, 0, byref(status))
if rsmi_ret_ok(ret, device, 'get_profile_presets'):
profiles[device] = str(str(bin(status.current))[2:][::-1].index('1') + 1)
else:
profiles[device] = str('UNKNOWN')
jsonData['card%d' % (device)] = {'vJson': CLOCK_JSON_VERSION, 'clocks': clocks[device],
'fan': fanSpeeds[device], 'overdrivesclk': overDriveGpu[device],
'overdrivemclk': overDriveGpuMem[device], 'profile': profiles[device],
'perflevel': perfLevels[device]}
printLog(device, 'Current settings successfully saved to', savefilepath)
with open(savefilepath, 'w') as savefile:
json.dump(jsonData, savefile, ensure_ascii=True)
printLogSpacer()
# The code below is for when this script is run as an executable instead of when imported as a module
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='AMD ROCm System Management Interface | ROCM-SMI version: %s | Kernel version: %s' % (
__version__, getVersion(None, rsmi_sw_component_t.RSMI_SW_COMP_DRIVER)),
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=90, width=120))
groupDev = parser.add_argument_group()
groupDisplayOpt = parser.add_argument_group('Display Options')
groupDisplayTop = parser.add_argument_group('Topology')
groupDisplayPages = parser.add_argument_group('Pages information')
groupDisplayHw = parser.add_argument_group('Hardware-related information')
groupDisplay = parser.add_argument_group('Software-related/controlled information')
groupAction = parser.add_argument_group('Set options')
groupActionReset = parser.add_argument_group('Reset options')
groupActionGpuReset = parser.add_mutually_exclusive_group()
groupFile = parser.add_mutually_exclusive_group()
groupResponse = parser.add_argument_group('Auto-response options')
groupActionOutput = parser.add_argument_group('Output options')
groupDev.add_argument('-d', '--device', help='Execute command on specified device', type=int, nargs='+')
groupDisplayOpt.add_argument('--alldevices', action='store_true') # ------------- function deprecated, no help menu
groupDisplayOpt.add_argument('--showhw', help='Show Hardware details', action='store_true')
groupDisplayOpt.add_argument('-a', '--showallinfo', help='Show Temperature, Fan and Clock values',
action='store_true')
groupDisplayTop.add_argument('-i', '--showid', help='Show GPU ID', action='store_true')
groupDisplayTop.add_argument('-v', '--showvbios', help='Show VBIOS version', action='store_true')
groupDisplayTop.add_argument('-e', '--showevents', help='Show event list', metavar='EVENT', type=str, nargs='*')
groupDisplayTop.add_argument('--showdriverversion', help='Show kernel driver version', action='store_true')
groupDisplayTop.add_argument('--showtempgraph', help='Show Temperature Graph', action='store_true')
groupDisplayTop.add_argument('--showfwinfo', help='Show FW information', metavar='BLOCK', type=str, nargs='*')
groupDisplayTop.add_argument('--showmclkrange', help='Show mclk range', action='store_true')
groupDisplayTop.add_argument('--showmemvendor', help='Show GPU memory vendor', action='store_true')
groupDisplayTop.add_argument('--showsclkrange', help='Show sclk range', action='store_true')
groupDisplayTop.add_argument('--showproductname', help='Show SKU/Vendor name', action='store_true')
groupDisplayTop.add_argument('--showserial', help='Show GPU\'s Serial Number', action='store_true')
groupDisplayTop.add_argument('--showuniqueid', help='Show GPU\'s Unique ID', action='store_true')
groupDisplayTop.add_argument('--showvoltagerange', help='Show voltage range', action='store_true')
groupDisplayTop.add_argument('--showbus', help='Show PCI bus number', action='store_true')
groupDisplayPages.add_argument('--showpagesinfo', help='Show retired, pending and unreservable pages',
action='store_true')
groupDisplayPages.add_argument('--showpendingpages', help='Show pending retired pages', action='store_true')
groupDisplayPages.add_argument('--showretiredpages', help='Show retired pages', action='store_true')
groupDisplayPages.add_argument('--showunreservablepages', help='Show unreservable pages', action='store_true')
groupDisplayHw.add_argument('-f', '--showfan', help='Show current fan speed', action='store_true')
groupDisplayHw.add_argument('-P', '--showpower', help='Show current Average Graphics Package Power Consumption',
action='store_true')
groupDisplayHw.add_argument('-t', '--showtemp', help='Show current temperature', action='store_true')
groupDisplayHw.add_argument('-u', '--showuse', help='Show current GPU use', action='store_true')
groupDisplayHw.add_argument('--showmemuse', help='Show current GPU memory used', action='store_true')
groupDisplayHw.add_argument('--showvoltage', help='Show current GPU voltage', action='store_true')
groupDisplay.add_argument('-b', '--showbw', help='Show estimated PCIe use', action='store_true')
groupDisplay.add_argument('-c', '--showclocks', help='Show current clock frequencies', action='store_true')
groupDisplay.add_argument('-g', '--showgpuclocks', help='Show current GPU clock frequencies', action='store_true')
groupDisplay.add_argument('-l', '--showprofile', help='Show Compute Profile attributes', action='store_true')
groupDisplay.add_argument('-M', '--showmaxpower', help='Show maximum graphics package power this GPU will consume',
action='store_true')
groupDisplay.add_argument('-m', '--showmemoverdrive', help='Show current GPU Memory Clock OverDrive level',
action='store_true')
groupDisplay.add_argument('-o', '--showoverdrive', help='Show current GPU Clock OverDrive level',
action='store_true')
groupDisplay.add_argument('-p', '--showperflevel', help='Show current DPM Performance Level', action='store_true')
groupDisplay.add_argument('-S', '--showclkvolt', help='Show supported GPU and Memory Clocks and Voltages',
action='store_true')
groupDisplay.add_argument('-s', '--showclkfrq', help='Show supported GPU and Memory Clock', action='store_true')
groupDisplay.add_argument('--showmeminfo', help='Show Memory usage information for given block(s) TYPE',
metavar='TYPE', type=str, nargs='+')
groupDisplay.add_argument('--showpids', help='Show current running KFD PIDs', action='store_true')
groupDisplay.add_argument('--showpidgpus', help='Show GPUs used by specified KFD PIDs (all if no arg given)',
nargs='*')
groupDisplay.add_argument('--showreplaycount', help='Show PCIe Replay Count', action='store_true')
groupDisplay.add_argument('--showrasinfo',
help='Show RAS enablement information and error counts for the specified block(s) (all if no arg given)',
nargs='*')
groupDisplay.add_argument('--showvc', help='Show voltage curve', action='store_true')
groupDisplay.add_argument('--showxgmierr', help='Show XGMI error information since last read', action='store_true')
groupDisplay.add_argument('--showtopo', help='Show hardware topology information', action='store_true')
groupDisplay.add_argument('--showtopoaccess', help='Shows the link accessibility between GPUs ', action='store_true')
groupDisplay.add_argument('--showtopoweight', help='Shows the relative weight between GPUs ', action='store_true')
groupDisplay.add_argument('--showtopohops', help='Shows the number of hops between GPUs ', action='store_true')
groupDisplay.add_argument('--showtopotype', help='Shows the link type between GPUs ', action='store_true')
groupDisplay.add_argument('--showtoponuma', help='Shows the numa nodes ', action='store_true')
groupDisplay.add_argument('--showenergycounter', help='Energy accumulator that stores amount of energy consumed',
action='store_true')
groupDisplay.add_argument('--shownodesbw', help='Shows the numa nodes ', action='store_true')
groupDisplay.add_argument('--showcomputepartition', help='Shows current compute partitioning ', action='store_true')
groupDisplay.add_argument('--shownpsmode', help='Shows current NPS mode ', action='store_true')
groupActionReset.add_argument('-r', '--resetclocks', help='Reset clocks and OverDrive to default',
action='store_true')
groupActionReset.add_argument('--resetfans', help='Reset fans to automatic (driver) control', action='store_true')
groupActionReset.add_argument('--resetprofile', help='Reset Power Profile back to default', action='store_true')
groupActionReset.add_argument('--resetpoweroverdrive',
help='Set the maximum GPU power back to the device deafult state',
action='store_true')
groupActionReset.add_argument('--resetxgmierr', help='Reset XGMI error count', action='store_true')
groupActionReset.add_argument('--resetperfdeterminism', help='Disable performance determinism', action='store_true')
groupActionReset.add_argument('--resetcomputepartition', help='Resets to boot compute partition state', action='store_true')
groupActionReset.add_argument('--resetnpsmode', help='Resets to boot NPS mode state', action='store_true')
groupAction.add_argument('--setclock',
help='Set Clock Frequency Level(s) for specified clock (requires manual Perf level)',
metavar=('TYPE','LEVEL'), nargs=2)
groupAction.add_argument('--setsclk', help='Set GPU Clock Frequency Level(s) (requires manual Perf level)',
type=int, metavar='LEVEL', nargs='+')
groupAction.add_argument('--setmclk', help='Set GPU Memory Clock Frequency Level(s) (requires manual Perf level)',
type=int, metavar='LEVEL', nargs='+')
groupAction.add_argument('--setpcie', help='Set PCIE Clock Frequency Level(s) (requires manual Perf level)',
type=int, metavar='LEVEL', nargs='+')
groupAction.add_argument('--setslevel',
help='Change GPU Clock frequency (MHz) and Voltage (mV) for a specific Level',
metavar=('SCLKLEVEL', 'SCLK', 'SVOLT'), nargs=3)
groupAction.add_argument('--setmlevel',
help='Change GPU Memory clock frequency (MHz) and Voltage for (mV) a specific Level',
metavar=('MCLKLEVEL', 'MCLK', 'MVOLT'), nargs=3)
groupAction.add_argument('--setvc', help='Change SCLK Voltage Curve (MHz mV) for a specific point',
metavar=('POINT', 'SCLK', 'SVOLT'), nargs=3)
groupAction.add_argument('--setsrange', help='Set min and max SCLK speed', metavar=('SCLKMIN', 'SCLKMAX'), nargs=2)
groupAction.add_argument('--setmrange', help='Set min and max MCLK speed', metavar=('MCLKMIN', 'MCLKMAX'), nargs=2)
groupAction.add_argument('--setfan', help='Set GPU Fan Speed (Level or %%)', metavar='LEVEL')
groupAction.add_argument('--setperflevel', help='Set Performance Level', metavar='LEVEL')
groupAction.add_argument('--setoverdrive', help='Set GPU OverDrive level (requires manual|high Perf level)',
metavar='%')
groupAction.add_argument('--setmemoverdrive',
help='Set GPU Memory Overclock OverDrive level (requires manual|high Perf level)',
metavar='%')
groupAction.add_argument('--setpoweroverdrive', help='Set the maximum GPU power using Power OverDrive in Watts',
metavar='WATTS')
groupAction.add_argument('--setprofile',
help='Specify Power Profile level (#) or a quoted string of CUSTOM Profile attributes "# '
'# # #..." (requires manual Perf level)')
groupAction.add_argument('--setperfdeterminism',
help='Set clock frequency limit to get minimal performance variation', type=int,
metavar='SCLK', nargs=1)
groupAction.add_argument('--setcomputepartition', help='Set compute partition',
choices=compute_partition_type_l + [x.lower() for x in compute_partition_type_l],
type=str, nargs=1)
groupAction.add_argument('--setnpsmode', help='Set nps mode',
choices=nps_mode_type_l + [x.lower() for x in nps_mode_type_l],
type=str, nargs=1)
groupAction.add_argument('--rasenable', help='Enable RAS for specified block and error type', type=str, nargs=2,
metavar=('BLOCK', 'ERRTYPE'))
groupAction.add_argument('--rasdisable', help='Disable RAS for specified block and error type', type=str, nargs=2,
metavar=('BLOCK', 'ERRTYPE'))
groupAction.add_argument('--rasinject',
help='Inject RAS poison for specified block (ONLY WORKS ON UNSECURE BOARDS)', type=str,
metavar='BLOCK', nargs=1)
groupActionGpuReset.add_argument('--gpureset', help='Reset specified GPU (One GPU must be specified)',
action='store_true')
groupFile.add_argument('--load', help='Load Clock, Fan, Performance and Profile settings from FILE', metavar='FILE')
groupFile.add_argument('--save', help='Save Clock, Fan, Performance and Profile settings to FILE', metavar='FILE')
groupResponse.add_argument('--autorespond',
help='Response to automatically provide for all prompts (NOT RECOMMENDED)',
metavar='RESPONSE')
groupActionOutput.add_argument('--loglevel',
help='How much output will be printed for what program is doing, one of debug/info/warning/error/critical',
metavar='LEVEL')
groupActionOutput.add_argument('--json', help='Print output in JSON format', action='store_true')
groupActionOutput.add_argument('--csv', help='Print output in CSV format', action='store_true')
args = parser.parse_args()
# Initialize the rocm SMI library
initializeRsmi()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)
if args.loglevel is not None:
numericLogLevel = getattr(logging, args.loglevel.upper(), logging.WARNING)
logging.getLogger().setLevel(numericLogLevel)
if args.setsclk or args.setmclk or args.setpcie or args.resetfans or args.setfan or args.setperflevel or args.load \
or args.resetclocks or args.setprofile or args.resetprofile or args.setoverdrive or args.setmemoverdrive \
or args.setpoweroverdrive or args.resetpoweroverdrive or args.rasenable or args.rasdisable or \
args.rasinject or args.gpureset or args.setperfdeterminism or args.setslevel or args.setmlevel or \
args.setvc or args.setsrange or args.setmrange or args.setclock or \
args.setcomputepartition or args.setnpsmode or args.resetcomputepartition or args.resetnpsmode:
relaunchAsSudo()
# If there is one or more device specified, use that for all commands, otherwise use a
# list of all available devices. Also use "is not None" as device 0 would
# have args.device=0, and "if 0" returns false.
if args.device is not None:
deviceList = []
for device in args.device:
if not doesDeviceExist(device):
logging.warning('No such device card%s', str(device))
sys.exit()
if (isAmdDevice(device) or args.alldevices) and device not in deviceList:
deviceList.append(device)
else:
deviceList = listDevices()
if deviceList is None:
printLog(None, 'ERROR: No DRM devices available. Exiting', None)
sys.exit(1)
# If we want JSON/CSV output, initialize the keys (devices)
if args.json or args.csv:
PRINT_JSON = True
for device in deviceList:
JSON_DATA['card' + str(device)] = {}
if not PRINT_JSON:
print('\n')
printLogSpacer(headerString)
if args.showallinfo:
args.list = True
args.showid = True
args.showvbios = True
args.showdriverversion = True
args.showfwinfo = 'all'
args.showmclkrange = True
args.showmemvendor = True
args.showsclkrange = True
args.showproductname = True
args.showserial = True
args.showuniqueid = True
args.showvoltagerange = True
args.showbus = True
args.showpagesinfo = True
args.showfan = True
args.showpower = True
args.showtemp = True
args.showuse = True
args.showenergycounter = True
args.showmemuse = True
args.showvoltage = True
args.showclocks = True
args.showmaxpower = True
args.showmemoverdrive = True
args.showoverdrive = True
args.showperflevel = True
args.showpids = True
args.showpidgpus = []
args.showreplaycount = True
args.showvc = True
args.showcomputepartition = True
args.shownpsmode = True
if not PRINT_JSON:
args.showprofile = True
args.showclkfrq = True
args.showclkvolt = True
# Don't do reset in combination with any other command
if args.gpureset:
if not args.device:
logging.error('No device specified. One device must be specified for GPU reset')
printLogSpacer()
sys.exit(1)
logging.debug('Only executing GPU reset, no other commands will be executed')
resetGpu(args.device)
sys.exit(RETCODE)
if not checkAmdGpus(deviceList):
logging.warning('No AMD GPUs specified')
if len(sys.argv) == 1 or \
len(sys.argv) == 2 and (args.alldevices or (args.json or args.csv)) or \
len(sys.argv) == 3 and (args.alldevices and (args.json or args.csv)):
showAllConcise(deviceList)
if args.showhw:
showAllConciseHw(deviceList)
if args.showdriverversion:
showVersion(deviceList, rsmi_sw_component_t.RSMI_SW_COMP_DRIVER)
if args.showtempgraph:
showTempGraph(deviceList)
if args.showid:
showId(deviceList)
if args.showuniqueid:
showUId(deviceList)
if args.showvbios:
showVbiosVersion(deviceList)
if args.showevents or str(args.showevents) == '[]':
showEvents(deviceList, args.showevents)
if args.resetclocks:
resetClocks(deviceList)
if args.showtemp:
showCurrentTemps(deviceList)
if args.showclocks:
showCurrentClocks(deviceList)
if args.showgpuclocks:
showCurrentClocks(deviceList, 'sclk')
if args.showfan:
showCurrentFans(deviceList)
if args.showperflevel:
showPerformanceLevel(deviceList)
if args.showoverdrive:
showOverDrive(deviceList, 'sclk')
if args.showmemoverdrive:
showOverDrive(deviceList, 'mclk')
if args.showmaxpower:
showMaxPower(deviceList)
if args.showprofile:
showProfile(deviceList)
if args.showpower:
showPower(deviceList)
if args.showclkfrq:
showClocks(deviceList)
if args.showuse:
showGpuUse(deviceList)
if args.showmemuse:
showMemUse(deviceList)
if args.showmemvendor:
showMemVendor(deviceList)
if args.showbw:
showPcieBw(deviceList)
if args.showreplaycount:
showPcieReplayCount(deviceList)
if args.showserial:
showSerialNumber(deviceList)
if args.showpids:
showPids()
if args.showpidgpus or str(args.showpidgpus) == '[]':
showGpusByPid(args.showpidgpus)
if args.showclkvolt:
showPowerPlayTable(deviceList)
if args.showvoltage:
showVoltage(deviceList)
if args.showbus:
showBus(deviceList)
if args.showmeminfo:
showMemInfo(deviceList, args.showmeminfo)
if args.showrasinfo or str(args.showrasinfo) == '[]':
showRasInfo(deviceList, args.showrasinfo)
# The second condition in the below 'if' statement checks whether showfwinfo was given arguments.
# It compares itself to the string representation of the empty list and prints all firmwares.
# This allows the user to call --showfwinfo without the 'all' argument and still print all.
if args.showfwinfo or str(args.showfwinfo) == '[]':
showFwInfo(deviceList, args.showfwinfo)
if args.showproductname:
showProductName(deviceList)
if args.showxgmierr:
showXgmiErr(deviceList)
if args.shownodesbw:
showNodesBw(deviceList)
if args.showtopo:
showHwTopology(deviceList)
if args.showtopoaccess:
showAccessibleTopology(deviceList)
if args.showtopoweight:
showWeightTopology(deviceList)
if args.showtopohops:
showHopsTopology(deviceList)
if args.showtopotype:
showTypeTopology(deviceList)
if args.showtoponuma:
showNumaTopology(deviceList)
if args.showpagesinfo:
showRetiredPages(deviceList)
if args.showretiredpages:
showRetiredPages(deviceList, 'reserved')
if args.showpendingpages:
showRetiredPages(deviceList, 'pending')
if args.showunreservablepages:
showRetiredPages(deviceList, 'unreservable')
if args.showsclkrange:
showRange(deviceList, 'sclk')
if args.showmclkrange:
showRange(deviceList, 'mclk')
if args.showvoltagerange:
showRange(deviceList, 'voltage')
if args.showvc:
showVoltageCurve(deviceList)
if args.showenergycounter:
showEnergy(deviceList)
if args.showcomputepartition:
showComputePartition(deviceList)
if args.shownpsmode:
showNPSMode(deviceList)
if args.setclock:
setClocks(deviceList, args.setclock[0], [int(args.setclock[1])])
if args.setsclk:
setClocks(deviceList, 'sclk', args.setsclk)
if args.setmclk:
setClocks(deviceList, 'mclk', args.setmclk)
if args.setpcie:
setClocks(deviceList, 'pcie', args.setpcie)
if args.setslevel:
setPowerPlayTableLevel(deviceList, 'sclk', args.setslevel[0], args.setslevel[1], args.setslevel[2],
args.autorespond)
if args.setmlevel:
setPowerPlayTableLevel(deviceList, 'mclk', args.setmlevel[0], args.setmlevel[1], args.setmlevel[2],
args.autorespond)
if args.resetfans:
resetFans(deviceList)
if args.setfan:
setFanSpeed(deviceList, args.setfan)
if args.setperflevel:
setPerformanceLevel(deviceList, args.setperflevel)
if args.setoverdrive:
setClockOverDrive(deviceList, 'sclk', args.setoverdrive, args.autorespond)
if args.setmemoverdrive:
setClockOverDrive(deviceList, 'mclk', args.setmemoverdrive, args.autorespond)
if args.setpoweroverdrive:
setPowerOverDrive(deviceList, args.setpoweroverdrive, args.autorespond)
if args.resetpoweroverdrive:
resetPowerOverDrive(deviceList, args.autorespond)
if args.setprofile:
setProfile(deviceList, args.setprofile)
if args.setvc:
setVoltageCurve(deviceList, args.setvc[0], args.setvc[1], args.setvc[2], args.autorespond)
if args.setsrange:
setClockRange(deviceList, 'sclk', args.setsrange[0], args.setsrange[1], args.autorespond)
if args.setmrange:
setClockRange(deviceList, 'mclk', args.setmrange[0], args.setmrange[1], args.autorespond)
if args.setperfdeterminism:
setPerfDeterminism(deviceList, args.setperfdeterminism[0])
if args.setcomputepartition:
setComputePartition(deviceList, args.setcomputepartition[0])
if args.setnpsmode:
setNPSMode(deviceList, args.setnpsmode[0])
if args.resetprofile:
resetProfile(deviceList)
if args.resetxgmierr:
resetXgmiErr(deviceList)
if args.resetperfdeterminism:
resetPerfDeterminism(deviceList)
if args.resetcomputepartition:
resetComputePartition(deviceList)
if args.resetnpsmode:
resetNpsMode(deviceList)
if args.rasenable:
setRas(deviceList, 'enable', args.rasenable[0], args.rasenable[1])
if args.rasdisable:
setRas(deviceList, 'disable', args.rasdisable[0], args.rasdisable[1])
if args.rasinject:
setRas(deviceList, 'inject', args.rasinject[0], args.rasinject[1])
if args.load:
load(args.load, args.autorespond)
if args.save:
save(deviceList, args.save)
if RETCODE and not PRINT_JSON:
logging.debug(' \t\t One or more commands failed.')
# Set RETCODE value to 0, unless loglevel is None or 'warning' (default)
if args.loglevel is None or getattr(logging, args.loglevel.upper(), logging.WARNING) == logging.WARNING:
RETCODE = 0
if PRINT_JSON:
# Check that we have some actual data to print, instead of the
# empty list that we initialized above
for device in deviceList:
if not JSON_DATA['card' + str(device)]:
JSON_DATA.pop('card' + str(device))
if not JSON_DATA:
logging.warn("No JSON data to report")
sys.exit(RETCODE)
if not args.csv:
print(json.dumps(JSON_DATA))
else:
devCsv = ''
sysCsv = ''
# JSON won't have any 'system' data without one of these flags
if args.showdriverversion and args.showallinfo == False:
sysCsv = formatCsv(['system'])
print('%s' % (sysCsv))
elif args.showallinfo is True:
sysCsv = formatCsv(['system'])
devCsv = formatCsv(deviceList)
print('%s\n%s' % (sysCsv, devCsv))
else:
devCsv = formatCsv(deviceList)
print(devCsv)
printLogSpacer(footerString)
rsmi_ret_ok(rocmsmi.rsmi_shut_down())
exit(RETCODE)
| 161,265 | 41.889894 | 163 |
py
|
darkriscv
|
darkriscv-master/sim/trace.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from vcdvcd import VCDVCD
import subprocess
import sys
import getopt
import argparse
class Error(Exception):
"""addr2line exception."""
def __init__(self, str):
Exception.__init__(self, str)
class addr2line:
def __init__(self, binary, addr2line):
self.process = subprocess.Popen(
[addr2line, "-e", binary],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE)
def lookup(self, addr):
dbg_info = None
try:
self.process.stdin.write((addr + "\n").encode('utf-8'))
self.process.stdin.flush()
dbg_info = self.process.stdout.readline().decode("utf-8")
dbg_info = dbg_info.rstrip("\n")
except IOError:
raise Error(
"Communication error with addr2line.")
finally:
ret = self.process.poll();
if ret != None:
raise Error(
"addr2line terminated unexpectedly (%i)." % (ret))
return dbg_info
class source_printer:
def __init__(self):
self.cache = {}
def try_print_source(self, line, count):
if (line, count) not in self.cache:
self.cache[(line, count)] = self.__lookup(line, count)
print(self.cache[(line, count)])
def __lookup(self, line, count):
source = ""
try:
file_name, line_number = filter(None, line.split(':'))
line_number = line_number.split(' ', 1)[0] # get rid of 'discriminator X' stuff
#print(f'file: {file_name}, line: {line_number}')
start_line_number = int(line_number)
if count > 1:
start_line_number = int(start_line_number - count/2)
if start_line_number < 0:
start_line_number = 0
end_line_number = start_line_number + count
# print(f'file_name: {file_name} => {start_line_number}:{end_line_number}')
with open(file_name) as src_file:
for i, line_content in enumerate(src_file):
if (start_line_number <= i and i < end_line_number):
source += f"{i}:{line_content}"
except:
pass
# nothing to do here, error could be missing source file
# or addr2line failing to find the PC
finally:
return source if source else "No Source"
class lst_lookuper:
def __init__(self, filename):
# read the whole listing file in memory
with open(filename) as lst_file:
self.lst_array = lst_file.readlines()
def lst_lookup(self, pc):
pcstr = " " + str(pc)[2:] + ":\t"
asm = next((s for s in self.lst_array if pcstr in s), None)
if not asm:
asm = ""
else:
# .lst file format is:
# <space[s]>address:<tab>binary<spaces><tab>assembly instructuon
asm = asm.split("\t", 2)[-1]
asm = asm.rstrip()
return asm
# Execution starts here
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("vcdfile", nargs='?', help = "VCD trace file", default="darksocv.vcd")
parser.add_argument("-of", "--objectfile", help="Object file to look up into", default="../src/darksocv.o")
parser.add_argument("-a2l", "--addr2line", help="addr2line executable to use", default="/opt/riscv32e/bin/riscv32-unknown-elf-addr2line")
parser.add_argument("-s", "--source", help="Print out source code line, if possible", action="store_true")
parser.add_argument("-sl", "--source_lines", help="Number of source code lines to print", default=1, type=int)
parser.add_argument("-a", "--assembly", help="Print out assembly instruction", action="store_true")
parser.add_argument("-lf", "--listing_file", help="listing file to read assembly from", default="../src/darksocv.lst")
args = parser.parse_args()
# Do the parsing.
vcd = VCDVCD(args.vcdfile)
# Get a signal by human readable name.
signal = vcd['darksimv.darksocv.core0.PC[31:0]']
tv = signal.tv
# A crude "PC"->"line" cache as addr2line calls can be expensive
cache = {}
a2l = addr2line(args.objectfile, args.addr2line)
source_printer = source_printer()
if args.assembly:
lst_lookuper = lst_lookuper(args.listing_file)
for x in tv:
time = x[0]
if 'x' in x[1]:
pc = x[1]
line = "undef"
else:
pc = hex(int(str(x[1]), 2)) # get a hex string out of pc
if pc in cache:
line = cache[pc]
else:
line = a2l.lookup(pc)
if args.assembly:
line += " => " + lst_lookuper.lst_lookup(pc)
cache[pc] = line
print( f'{time:>12}' + ":" + f'{pc:>10}' + ":" + line)
if line != "undef" and args.source:
source_printer.try_print_source(line, args.source_lines)
| 4,927 | 32.073826 | 137 |
py
|
dive
|
dive-main/.github/scripts/coverage.py
|
#!/usr/bin/env python3
import subprocess
import sys
import shlex
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if len(sys.argv) < 3:
print("Usage: coverage.py [threshold] [go-coverage-report]")
sys.exit(1)
threshold = float(sys.argv[1])
report = sys.argv[2]
args = shlex.split(f"go tool cover -func {report}")
p = subprocess.run(args, capture_output=True, text=True)
percent_coverage = float(p.stdout.splitlines()[-1].split()[-1].replace("%", ""))
print(f"{bcolors.BOLD}Coverage: {percent_coverage}%{bcolors.ENDC}")
if percent_coverage < threshold:
print(f"{bcolors.BOLD}{bcolors.FAIL}Coverage below threshold of {threshold}%{bcolors.ENDC}")
sys.exit(1)
| 861 | 22.297297 | 96 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/test.py
| 0 | 0 | 0 |
py
|
|
Guava-disease-detection
|
Guava-disease-detection-main/configuration.py
|
import os
if__name__=="__main__":
print("in progress")
| 60 | 11.2 | 24 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/predict.py
|
import os
if__name__=="__main__":
print("in progress")
| 60 | 11.2 | 24 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/train.py
|
import os
if__name__=="__main__":
print("in progress")
| 60 | 11.2 | 24 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/datasets/dataloader.py
|
import os
if__name__=="__main__":
print("in progress")
| 60 | 11.2 | 24 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/datasets/utils.py
|
import os
if__name__=="__main__":
print("in progress")
| 60 | 11.2 | 24 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/optimization/conversion/run_tflite_model.py
|
import numpy as np
import tensorflow as tf
tflite_model_path = 'model.tflite'
# Load the TFLite model and allocate tensors
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on random input data
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# get_tensor() returns a copy of the tensor data
# use tensor() in order to get a pointer to the tensor
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
| 778 | 32.869565 | 77 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/optimization/conversion/onnx_to_tf.py
|
from onnx_tf.backend import prepare
import onnx
onnx_model_path = 'model.onnx'
tf_model_path = 'model_tf'
onnx_model = onnx.load(onnx_model_path)
tf_rep = prepare(onnx_model)
tf_rep.export_graph(tf_model_path)
| 211 | 22.555556 | 39 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/optimization/conversion/tf_to_tflite.py
|
import tensorflow as tf
saved_model_dir = 'model_tf'
tflite_model_path = 'model.tflite'
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Save the model
with open(tflite_model_path, 'wb') as f:
f.write(tflite_model)
| 299 | 24 | 69 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/optimization/conversion/run_tf_model.py
|
import tensorflow as tf
tf_model_path = 'model_tf'
model = tf.saved_model.load(tf_model_path)
model.trainable = False
input_tensor = tf.random.uniform([1, 3, 640, 640])
out = model(**{'input': input_tensor})
print(out)
| 222 | 19.272727 | 50 |
py
|
Guava-disease-detection
|
Guava-disease-detection-main/optimization/conversion/torch_to_onnx.py
|
import torch
from torchvision.models import mobilenet_v2
img_size = (640, 640)
batch_size = 1
onnx_model_path = 'model.onnx'
model = mobilenet_v2()
model.eval()
sample_input = torch.rand((batch_size, 3, *img_size))
y = model(sample_input)
torch.onnx.export(
model,
sample_input,
onnx_model_path,
verbose=False,
input_names=['input'],
output_names=['output'],
opset_version=12
)
| 411 | 16.913043 | 53 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/tools/captain/gather_results.py
|
#!/usr/bin/env python3
import argparse
from collections import defaultdict
import csv
import errno
import json
import logging
from multiprocessing import Pool
import os
import shutil
import subprocess
import sys
from tempfile import mkdtemp
import pandas as pd
ddr = lambda: defaultdict(ddr)
def parse_args():
parser = argparse.ArgumentParser(description=(
"Collects data from the experiment workdir and outputs a summary as "
"a JSON file."
))
parser.add_argument("--workers",
default=4,
help="The number of concurrent processes to launch.")
parser.add_argument("workdir",
help="The path to the Captain tool output workdir.")
parser.add_argument("outfile",
default="-",
help="The file to which the output will be written, or - for stdout.")
parser.add_argument('-v', '--verbose', action='count', default=0,
help=("Controls the verbosity of messages. "
"-v prints info. -vv prints debug. Default: warnings and higher.")
)
return parser.parse_args()
def walklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
num_sep_this = root.count(os.path.sep)
yield root, dirs, files, (num_sep_this - num_sep)
if num_sep + level <= num_sep_this:
del dirs[:]
def path_split_last(path, n):
sp = []
for _ in range(n):
path, tmp = os.path.split(path)
sp = [tmp] + sp
return (path, *sp)
def find_campaigns(workdir):
ar_dir = os.path.join(workdir, "ar")
for root, dirs, _, level in walklevel(ar_dir, 3):
if level == 3:
for run in dirs:
# `run` directories always have integer-only names
if not run.isdigit():
logging.warning((
"Detected invalid workdir hierarchy! Make sure to point "
"the script to the root of the original workdir."
))
path = os.path.join(root, run)
yield path
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def clear_dir(path):
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
logging.exception('Failed to delete %s. Reason: %s', file_path, e)
def extract_monitor_dumps(tarball, dest):
clear_dir(dest)
# get the path to the monitor dir inside the tarball
monitor = subprocess.check_output(f'tar -tf "{tarball}" | grep -Po ".*monitor" | uniq', shell=True)
monitor = monitor.decode().rstrip()
# strip all path components until and excluding the monitor dir
ccount = len(monitor.split("/")) - 1
os.system(f'tar -xf "{tarball}" --strip-components={ccount} -C "{dest}" {monitor}')
def generate_monitor_df(dumpdir, campaign):
def row_generator():
files = os.listdir(dumpdir)
if 'tmp' in files:
files.remove('tmp')
files.sort(key=int)
for timestamp in files:
fname = os.path.join(dumpdir, timestamp)
try:
with open(fname, newline='') as csvfile:
reader = csv.DictReader(csvfile)
row = next(reader)
row['TIME'] = timestamp
yield row
except StopIteration:
logging.debug((
"Truncated monitor file contains no rows!"
))
continue
# use a list in case pd.DataFrame() can pre-allocate ahead of time
rows = list(row_generator())
if len(rows) == 0:
workdir, _, fuzzer, target, program, run = path_split_last(campaign, 5)
name = f"{fuzzer}/{target}/{program}/{run}"
logfile = os.path.join(workdir, "log",
f"{name.replace('/', '_')}_container.log")
logging.warning(
"%s contains no monitor logs. Check the corresponding campaign "
"log file for more information: %s", name, logfile
)
df = pd.DataFrame(rows)
df.set_index('TIME', inplace=True)
df.fillna(0, inplace=True)
df = df.astype(int)
del rows
return df
def process_one_campaign(path):
logging.info("Processing %s", path)
_, fuzzer, target, program, run = path_split_last(path, 4)
tarball = os.path.join(path, "ball.tar")
istarball = False
if os.path.isfile(tarball):
istarball = True
dumpdir = mkdtemp(dir=tmpdir)
logging.debug("Campaign is tarballed. Extracting to %s", dumpdir)
extract_monitor_dumps(tarball, dumpdir)
else:
dumpdir = path
df = None
try:
df = generate_monitor_df(os.path.join(dumpdir, "monitor"), path)
except Exception as ex:
name = f"{fuzzer}/{target}/{program}/{run}"
logging.exception("Encountered exception when processing %s. Details: "
"%s", name, ex)
finally:
if istarball:
clear_dir(dumpdir)
os.rmdir(dumpdir)
return fuzzer, target, program, run, df
def collect_experiment_data(workdir, workers):
def init(*args):
global tmpdir
tmpdir, = tuple(args)
experiment = ddr()
tmpdir = os.path.join(workdir, "tmp")
ensure_dir(tmpdir)
with Pool(processes=workers, initializer=init, initargs=(tmpdir,)) as pool:
results = pool.starmap(process_one_campaign,
((path,) for path in find_campaigns(workdir))
)
for fuzzer, target, program, run, df in results:
if df is not None:
experiment[fuzzer][target][program][run] = df
else:
# TODO add an empty df so that the run is accounted for
name = f"{fuzzer}/{target}/{program}/{run}"
logging.warning("%s has been omitted!", name)
return experiment
def get_ttb_from_df(df):
reached = {}
triggered = {}
bugs = set(x[:-2] for x in df.columns)
logging.debug("Bugs found: %s", bugs)
for bug in bugs:
R = df[df[f"{bug}_R"] > 0]
if not R.empty:
reached[bug] = int(R.index[0])
T = df[df[f"{bug}_T"] > 0]
if not T.empty:
triggered[bug] = int(T.index[0])
return reached, triggered
def default_to_regular(d):
if isinstance(d, defaultdict):
d = {k: default_to_regular(v) for k, v in d.items()}
return d
def get_experiment_summary(experiment):
summary = ddr()
for fuzzer, f_data in experiment.items():
for target, t_data in f_data.items():
for program, p_data in t_data.items():
for run, df in p_data.items():
reached, triggered = get_ttb_from_df(df)
summary[fuzzer][target][program][run] = {
"reached": reached,
"triggered": triggered
}
return default_to_regular(summary)
def configure_verbosity(level):
mapping = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
# will raise exception when level is invalid
numeric_level = mapping[level]
logging.basicConfig(level=numeric_level)
def main():
args = parse_args()
configure_verbosity(args.verbose)
experiment = collect_experiment_data(args.workdir, int(args.workers))
summary = get_experiment_summary(experiment)
output = {
'results': summary,
# TODO add configuration options and other experiment parameters
}
data = json.dumps(output).encode()
if args.outfile == "-":
sys.stdout.buffer.write(data)
else:
with open(args.outfile, "w+") as f:
#f.write(data)
json.dump(output, f, indent=4)
if __name__ == '__main__':
main()
| 8,163 | 31.787149 | 103 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/tools/captain/gather_detected.py
|
#!/usr/bin/env python3
from json import dump, load
from os import walk
if __name__ == '__main__':
detected = {}
_, _, filenames = next(walk('./workdir/poc/'))
for f in filenames:
try:
fuzzer = f[:f.index('_')]
except ValueError:
continue
bug = f[:-4][-6:]
try:
bug.index('_')
except ValueError:
if fuzzer not in detected:
detected[fuzzer] = []
if bug not in detected[fuzzer]:
detected[fuzzer].append(bug)
results = {'reached': {}, 'triggered': {}, 'detected': detected}
with open('./benchd_results') as infile:
bench = load(infile)['results']
for fuzzer in bench:
for program in bench[fuzzer]:
for target in bench[fuzzer][program]:
for run in bench[fuzzer][program][target]:
for metric in bench[fuzzer][program][target][run]:
for bug in bench[fuzzer][program][target][run][metric]:
if fuzzer not in results[metric]:
results[metric][fuzzer] = []
if bug not in results[metric][fuzzer]:
results[metric][fuzzer].append(bug)
with open('./final_results', 'w+') as outfile:
dump(results, outfile, indent=4)
| 1,420 | 35.435897 | 83 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/targsel.py
|
#!/usr/bin/env python3
import os
import subprocess
import sys
TARGET = ''
PREFIX = ''
FUZZER_DIR = ''
COMMIT = ''
COMMITS = []
def run_cmd(command_array, **kwargs):
return subprocess.run(command_array, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, **kwargs)
def run_cmd_warn_msg(command, message, **kwargs):
if run_cmd(command, **kwargs).returncode != 0:
print(f'WARNING: {message}')
def run_cmd_error_msg(command, message, **kwargs):
code = run_cmd(command, **kwargs).returncode
if code != 0:
print(f'ERROR: {message}')
sys.exit(code)
def get_output(command_array, **kwargs):
return subprocess.run(command_array, capture_output=True, text=True, **kwargs).stdout.strip('\n"')
def get_sqlite3_commits():
global COMMITS
with open(f'{PREFIX}/commits', 'r') as file:
lines = file.readlines()
for line in lines:
COMMITS.append(line[0:8])
def compile_targets():
targets = []
if TARGET == 'poppler':
run_cmd_warn_msg([f'{PREFIX}/../../poppler_compile.sh'], "Could not compile fuzz targets!")
else:
run_cmd_warn_msg([f'{PREFIX}_compile.sh'], "Could not compile fuzz targets!")
if TARGET == 'lua':
run_cmd_error_msg(['git', '-C', PREFIX, 'checkout', '.'], "Could not do git checkout .!")
if TARGET == 'php':
for file in os.listdir(FUZZER_DIR):
if 'php-fuzz-' in file:
targets.append(file)
if TARGET == 'openssl':
for file in os.listdir(FUZZER_DIR):
if '.' not in file:
if '-test' not in file:
if 'corp' not in file:
targets.append(file)
if TARGET == 'sqlite3':
if 'fuzzcheck' in os.listdir(FUZZER_DIR):
targets.append('fuzzcheck')
if TARGET == 'poppler':
if 'pdftoppm' in os.listdir(FUZZER_DIR):
targets.append('pdftoppm')
if 'pdfimages' in os.listdir(FUZZER_DIR):
targets.append('pdfimages')
if TARGET == 'lua':
if 'lua' in os.listdir(FUZZER_DIR):
targets.append('lua')
if TARGET == 'libxml2':
if 'xmllint' in os.listdir(FUZZER_DIR):
targets.append('xmllint')
if TARGET == 'libsndfile':
if 'sndfile_fuzzer' in os.listdir(FUZZER_DIR):
targets.append('sndfile_fuzzer')
if TARGET == 'libpng':
if 'libpng_read_fuzzer' in os.listdir(FUZZER_DIR):
targets.append('libpng_read_fuzzer')
if TARGET == 'libtiff':
if 'tiffcp' in os.listdir(FUZZER_DIR):
targets.append('tiffcp')
if 'tiff_read_rgba_fuzzer' in os.listdir(FUZZER_DIR):
targets.append('tiff_read_rgba_fuzzer')
return targets
def hash(targets):
hashes = {}
for target in targets:
hashes[target] = get_output(['sha256sum', target], cwd=FUZZER_DIR).split()[0]
return hashes
def compare_hashes(new_hashes, old_hashes):
equal = 0
for target in new_hashes:
if target in old_hashes:
if new_hashes[target] == old_hashes[target]:
equal += 1
return equal
def checkout_parent(targets = []):
global COMMIT, COMMITS
if TARGET == 'sqlite3':
run_cmd(['rm', '-rf', 'sqlite.tar.gz', 'repo'], cwd=PREFIX)
if COMMIT == '':
COMMIT = COMMITS[1]
else:
COMMIT = COMMITS[COMMITS.index(COMMIT) + 1]
run_cmd_error_msg(['curl', f'https://www.sqlite.org/src/tarball/{COMMIT}/SQLite-{COMMIT}.tar.gz', '-o', 'sqlite.tar.gz'], "Could not curl parent commit!", cwd=PREFIX)
run_cmd(['mkdir', '-p', 'repo'], cwd=PREFIX)
run_cmd_error_msg(['tar', '-C', 'repo', '--strip-components=1', '-xzf', 'sqlite.tar.gz'], "Could not curl parent commit!", cwd=PREFIX)
else:
if TARGET not in ['poppler', 'lua']:
run_cmd_error_msg(['make', 'clean'], "Could not make clean!", cwd=PREFIX)
run_cmd_error_msg(['git', '-C', PREFIX, 'checkout', 'HEAD^1'], "Could not checkout parent commit!")
for target in targets:
run_cmd(['rm', target], cwd=FUZZER_DIR)
def get_commit_hash():
if TARGET == 'sqlite3':
return COMMIT
return get_output(['git', '-C', PREFIX, 'log', '-1', '--format="%H"'])
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <library>')
sys.exit()
if sys.argv[1] not in ['php', 'openssl', 'sqlite3', 'poppler', 'lua', 'libxml2', 'libsndfile', 'libpng', 'libtiff']:
print('<library> has to be one of [php, openssl, sqlite3, poppler, lua, libxml2, libsndfile, libpng, libtiff]')
sys.exit()
TARGET = sys.argv[1]
PREFIX = f'/home/ubuntu/targsel/{TARGET}'
if TARGET == 'php':
FUZZER_DIR = f'{PREFIX}/sapi/fuzzer/'
if TARGET == 'openssl':
FUZZER_DIR = f'{PREFIX}/fuzz/'
if TARGET == 'sqlite3':
FUZZER_DIR = f'{PREFIX}/repo/'
COMMIT = ''
get_sqlite3_commits()
checkout_parent()
if TARGET == 'poppler':
FUZZER_DIR = PREFIX
PREFIX = f'{PREFIX}/{TARGET}'
if TARGET == 'libsndfile':
FUZZER_DIR = f'{PREFIX}/ossfuzz/'
if TARGET in ['lua', 'libxml2', 'libpng', 'libtiff']:
FUZZER_DIR = PREFIX
iteration = 0
equal_hashes = 0
total_hashes = 0
percentage = 0.0
fuzz_targets = []
hashes = {}
try:
while True:
fuzz_targets = compile_targets()
for target in fuzz_targets:
if target not in hashes:
hashes[target] = ''
new_hashes = hash(fuzz_targets)
equal_hashes += compare_hashes(new_hashes, hashes)
if iteration != 0:
total_hashes += len(fuzz_targets)
if total_hashes != 0:
percentage = equal_hashes / total_hashes * 100
commit = get_commit_hash()
print(f'iteration={iteration} \t commit={commit} \t equal={equal_hashes} \t total={total_hashes} \t percentage={percentage}')
checkout_parent(fuzz_targets)
hashes = new_hashes
iteration += 1
except KeyboardInterrupt:
print(f'\nINFO: Program was interrupted by the user.')
| 6,250 | 33.535912 | 174 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/identical_targets_pie.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
from plotly.subplots import make_subplots
labels = ['Identical Targets', 'Different Targets']
libs = {'php': [32198, 50454-32198],
'openssl': [57081, 90177-57081],
'sqlite3': [0, 483-0],
'poppler': [1686, 3840-1686],
'lua': [462, 2285-462],
'libxml2': [0, 625-0],
'libpng': [472, 1159-472],
'libtiff': [853, 1602-853],
'libsndfile': [154, 241-154]}
specs = [[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}],
[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}],
[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}]]
fig = make_subplots(3, 3, specs=specs, subplot_titles=list(libs.keys()))
colors = ['rgb(0,128,0)', 'rgb(255,228,181)']
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['php'], name='php'), 1, 1)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['openssl'], name='openssl'), 1, 2)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['sqlite3'], name='sqlite3'), 1, 3)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['poppler'], name='poppler'), 2, 1)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['lua'], name='lua'), 2, 2)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['libxml2'], name='libxml2'), 2, 3)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['libpng'], name='libpng'), 3, 1)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['libtiff'], name='libtiff'), 3, 2)
fig.add_trace(go.Pie(labels=labels, marker_colors=colors, textfont=dict(size=20), values=libs['libsndfile'], name='libsndfile'), 3, 3)
fig.update_layout(
title='Identical Fuzz Targets per Library',
titlefont_size=36,
legend=dict(font_size=26)
)
fig.update_annotations(font_size=26)
fig.show()
| 2,194 | 50.046512 | 134 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/targsel_bubbles.py
|
#!/usr/bin/python3
import math
import sys
import plotly.graph_objects as go
prefix = './targsel/'
logfiles = ['libpng-01.log', 'libpng-03.log', 'libsndfile-01.log', 'libtiff-02.log', 'libxml2-01.log', 'openssl-01.log', 'openssl-03.log', 'php-02.log', 'poppler-02.log',
'sqlite3-02.log', 'libpng-02.log', 'libpng-04.log', 'libtiff-01.log', 'libtiff-03.log', 'lua-01.log', 'openssl-02.log', 'php-01.log', 'poppler-01.log', 'sqlite3-01.log']
results = {'php': [], 'openssl': [], 'sqlite3': [], 'poppler': [], 'lua': [], 'libxml2': [], 'libsndfile': [], 'libpng': [], 'libtiff': []}
for logfile in logfiles:
lib = logfile.split('-')[0]
prev_eq = 0
prev_tot = 0
with open(f'{prefix}{logfile}', 'r') as logs:
for line in logs:
if 'iteration=' in line:
split = line.split()
equal = int(split[2].split('=')[1])
total = int(split[3].split('=')[1])
if total == 0 or total == prev_tot:
continue
results[lib].append((equal - prev_eq) * 100 / (total - prev_tot))
# print(f'{equal}\t{total}\t{equal - prev_eq}\t{total - prev_tot}\t{((equal - prev_eq) * 100 / (total - prev_tot))}%')
prev_eq = equal
prev_tot = total
fig = go.Figure()
for library in results:
xs = []
ys = []
zs = []
for i in set(results[library]):
xs.append(library)
ys.append(i)
zs.append(5 * math.log(results[library].count(i) * 10000 / len(results[library])))
fig.add_trace(go.Scatter(
x=xs,
y=ys,
mode='markers',
marker_size=zs)
)
fig.update_layout(xaxis_title='Library', yaxis_title='Percentage of identical fuzz targets (%)', yaxis_range=[0,100], showlegend=False)
fig.show()
| 1,813 | 38.434783 | 177 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/changed_targets_per_library.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
prefix = './targsel/'
logfiles = ['libpng-01.log', 'libpng-03.log', 'libsndfile-01.log', 'libtiff-02.log', 'libxml2-01.log', 'openssl-01.log', 'openssl-03.log', 'php-02.log', 'poppler-02.log',
'sqlite3-02.log', 'libpng-02.log', 'libpng-04.log', 'libtiff-01.log', 'libtiff-03.log', 'lua-01.log', 'openssl-02.log', 'php-01.log', 'poppler-01.log', 'sqlite3-01.log']
results = {'php': [], 'openssl': [], 'sqlite3': [], 'poppler': [], 'lua': [], 'libxml2': [], 'libsndfile': [], 'libpng': [], 'libtiff': []}
for logfile in logfiles:
lib = logfile.split('-')[0]
prev_eq = 0
prev_tot = 0
with open(f'{prefix}{logfile}', 'r') as logs:
for line in logs:
if 'iteration=' in line:
split = line.split()
equal = int(split[2].split('=')[1])
total = int(split[3].split('=')[1])
if total == 0 or total == prev_tot:
continue
results[lib].append((equal - prev_eq) * 100 / (total - prev_tot))
# print(f'{equal}\t{total}\t{equal - prev_eq}\t{total - prev_tot}\t{((equal - prev_eq) * 100 / (total - prev_tot))}%')
prev_eq = equal
prev_tot = total
fig = go.Figure()
for library in results:
fig.add_trace(go.Box(name=library, y=results[library], marker_color='#3D9970', boxpoints="all"))
fig.update_layout(title='Identical fuzz targets per commit per library', xaxis_title='Library', yaxis_title='Percentage of identical fuzz targets', showlegend=False)
fig.show()
| 1,597 | 48.9375 | 177 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/commits_processed.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
libs = ['php', 'openssl', 'sqlite3', 'poppler', 'lua', 'libxml2', 'libsndfile', 'libpng', 'libtiff']
equal = [7821, 7847, 483, 1919, 2285, 625, 1158, 801, 241]
total = [127000, 30000, 25000, 7000, 5000, 5000, 4000, 4000, 3000]
perc = [round((equal[i]/total[i]*100),2) for i in range(len(libs))]
inv = [(100.0-perc[i]) for i in range(len(libs))]
colors = ['rgb(0,128,0)', 'rgb(255,228,181)']
fig = go.Figure(data=[
go.Bar(name='Processed', x=libs, y=perc, marker_color=colors[0]),
go.Bar(name='Unprocessed', x=libs, y=inv, marker_color=colors[1])])
fig.update_layout(
title='Processed commits per library',
titlefont_size=36,
legend=dict(font_size=26),
xaxis=dict(
title='Library',
titlefont_size=26,
tickfont_size=20
),
yaxis=dict(
title='Commits processed from the repository (%)',
titlefont_size=26,
tickfont_size=20
)
)
fig.update_layout(barmode='stack')
fig.show()
| 1,010 | 29.636364 | 100 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/targsel_violins.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
prefix = './targsel/'
logfiles = ['libpng-01.log', 'libpng-03.log', 'libsndfile-01.log', 'libtiff-02.log', 'libxml2-01.log', 'openssl-01.log', 'openssl-03.log', 'php-02.log', 'poppler-02.log',
'sqlite3-02.log', 'libpng-02.log', 'libpng-04.log', 'libtiff-01.log', 'libtiff-03.log', 'lua-01.log', 'openssl-02.log', 'php-01.log', 'poppler-01.log', 'sqlite3-01.log']
results = {'php': [], 'openssl': [], 'sqlite3': [], 'poppler': [], 'lua': [], 'libxml2': [], 'libsndfile': [], 'libpng': [], 'libtiff': []}
for logfile in logfiles:
lib = logfile.split('-')[0]
prev_eq = 0
prev_tot = 0
with open(f'{prefix}{logfile}', 'r') as logs:
for line in logs:
if 'iteration=' in line:
split = line.split()
equal = int(split[2].split('=')[1])
total = int(split[3].split('=')[1])
if total == 0 or total == prev_tot:
continue
results[lib].append((equal - prev_eq) * 100 / (total - prev_tot))
# print(f'{equal}\t{total}\t{equal - prev_eq}\t{total - prev_tot}\t{((equal - prev_eq) * 100 / (total - prev_tot))}%')
prev_eq = equal
prev_tot = total
fig = go.Figure()
for library in results:
fig.add_trace(go.Violin(x=[library for i in range(len(results[library]))],
y=results[library],
name=library,
points="all",
box_visible=False,
meanline_visible=False))
fig.update_layout(xaxis_title='Library', yaxis_title='Proportion of identical fuzz targets', yaxis_range=[0,100], showlegend=False)
fig.show()
| 1,773 | 46.945946 | 177 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/identical_targets.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
libs = ['php', 'openssl', 'sqlite3', 'poppler', 'lua', 'libxml2', 'libsndfile', 'libpng', 'libtiff']
equal = [32198, 57081, 0, 1686, 462, 0, 472, 853, 154]
total = [50454, 90177, 483, 3840, 2285, 625, 1159, 1602, 241]
perc = [round((equal[i]/total[i]*100),2) for i in range(len(libs))]
inv = [(100.0-perc[i]) for i in range(len(libs))]
colors = ['rgb(0,128,0)', 'rgb(255,228,181)']
fig = go.Figure(data=[
go.Bar(name='Identical Targets', x=libs, y=perc, marker_color=colors[0]),
go.Bar(name='Different Targets', x=libs, y=inv, marker_color=colors[1])])
fig.update_layout(
title='Identical fuzz targets per library',
titlefont_size=36,
legend=dict(font_size=26),
xaxis=dict(
title='Library',
titlefont_size=26,
tickfont_size=20
),
yaxis=dict(
title='Identical fuzz targets (%)',
titlefont_size=26,
tickfont_size=20
)
)
fig.update_layout(barmode='stack')
fig.show()
| 1,005 | 29.484848 | 100 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/commits_per_library.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
libs = ['php', 'openssl', 'sqlite3', 'poppler', 'lua', 'libxml2', 'libsndfile', 'libpng', 'libtiff']
targets = [7821, 7847, 483, 1919, 2285, 625, 1158, 801, 241]
colors = ['rgb(0,128,0)', 'rgb(255,228,181)']
fig = go.Figure(data=[
go.Bar(name='Processed', x=libs, y=targets, text=targets, textfont=dict(size=20),
textposition='auto', marker_color=colors[0])])
fig.update_layout(
title='Commits processed per library',
titlefont_size=36,
legend=dict(font_size=26),
xaxis=dict(
title='Library',
titlefont_size=26,
tickfont_size=20
),
yaxis=dict(
title='Number of commits processed',
titlefont_size=26,
tickfont_size=20
)
)
fig.update_layout(barmode='stack')
fig.show()
| 816 | 26.233333 | 100 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/target selection/visualizations/targets_per_library.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
libs = ['php', 'openssl', 'sqlite3', 'poppler', 'lua', 'libxml2', 'libsndfile', 'libpng', 'libtiff']
targets = [9, 12, 1, 2, 1, 1, 1, 2, 1]
colors = ['rgb(0,128,0)', 'rgb(255,228,181)']
fig = go.Figure(data=[
go.Bar(name='Processed', x=libs, y=targets, text=targets, textfont=dict(size=20),
textposition='auto', marker_color=colors[0])])
fig.update_layout(
title='Fuzz targets per library',
titlefont_size=36,
legend=dict(font_size=26),
xaxis=dict(
title='Library',
titlefont_size=26,
tickfont_size=20
),
yaxis=dict(
title='Number of fuzz targets',
titlefont_size=26,
tickfont_size=20
)
)
fig.update_layout(barmode='stack')
fig.show()
| 784 | 25.166667 | 100 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/archive/target-selection-compare.py
|
import json
RUNS = [[x for x in range(16, 94 + 1)],
[x for x in range(95, 181 + 1)],
[x for x in range(183, 269 + 1)],
[x for x in range(271, 356 + 1)],
[x for x in range(358, 444 + 1)],
[x for x in range(446, 532 + 1)],
[x for x in range(534, 620 + 1)],
[x for x in range(622, 710 + 1)],
[x for x in range(712, 798 + 1)],
[x for x in range(800, 888 + 1)],
[x for x in range(890, 980 + 1)],
[x for x in range(982, 1068 + 1)]]
def check_sha_overall():
cnt_total = 0
cnt_equal = 0
missing_targets = 0
prev_shas = None
for i in RUNS:
with open(f'/srv/results/real/{i:04d}/sha', 'r') as file:
shas = json.load(file)
if prev_shas is None:
prev_shas = shas
continue
for fuzzer in shas:
for target in shas[fuzzer]:
cnt_total += 1
try:
if shas[fuzzer][target] == prev_shas[fuzzer][target]:
cnt_equal += 1
except KeyError:
missing_targets += 1
prev_shas = shas
return cnt_equal, cnt_total, missing_targets
def check_sha_per_fuzzer():
overall = {'libfuzzer': [], 'honggfuzz': [], 'aflplusplus': []}
cnt_total = {}
cnt_equal = {}
missing_targets = {}
prev_shas = None
for j in RUNS:
for i in j:
with open(f'/srv/results/real/{i:04d}/sha', 'r') as file:
shas = json.load(file)
if prev_shas is None:
prev_shas = shas
continue
for fuzzer in shas:
if fuzzer not in cnt_equal:
cnt_equal[fuzzer] = 0
if fuzzer not in cnt_total:
cnt_total[fuzzer] = 0
if fuzzer not in missing_targets:
missing_targets[fuzzer] = 0
for target in shas[fuzzer]:
cnt_total[fuzzer] += 1
try:
if shas[fuzzer][target] == prev_shas[fuzzer][target]:
cnt_equal[fuzzer] += 1
except KeyError:
missing_targets[fuzzer] += 1
prev_shas = shas
print(cnt_equal, cnt_total, missing_targets)
for fuzzer in overall:
overall[fuzzer].append(cnt_equal[fuzzer] / cnt_total[fuzzer] * 100)
cnt_total = {}
cnt_equal = {}
missing_targets = {}
print(overall)
def check_sha_per_target():
default = {'openssl-bignum': 0, 'openssl-asn1parse': 0, 'openssl-x509': 0, 'openssl-server': 0, 'openssl-client': 0}
overall = {'openssl-bignum': [], 'openssl-asn1parse': [], 'openssl-x509': [], 'openssl-server': [],
'openssl-client': []}
cnt_total = default.copy()
cnt_equal = default.copy()
missing_targets = default.copy()
prev_shas = None
for j in RUNS:
for i in j:
with open(f'/srv/results/real/{i:04d}/sha', 'r') as file:
shas = json.load(file)
if prev_shas is None:
prev_shas = shas
continue
for fuzzer in shas:
for target in shas[fuzzer]:
cnt_total[target] += 1
try:
if shas[fuzzer][target] == prev_shas[fuzzer][target]:
cnt_equal[target] += 1
except KeyError:
missing_targets[target] += 1
prev_shas = shas
print(cnt_equal, cnt_total, missing_targets, '\n')
for target in overall:
overall[target].append(cnt_equal[target] / cnt_total[target] * 100)
cnt_total = default.copy()
cnt_equal = default.copy()
missing_targets = default.copy()
print(overall)
if __name__ == '__main__':
check_sha_per_fuzzer()
check_sha_per_target()
equal, total, missing = check_sha_overall()
print(
f'Equal hashes: {equal}\tTotal hashes: {total}\tPercentage: {equal / total * 100}%\tMissing targets: {missing}')
| 4,153 | 34.810345 | 120 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/archive/target-selection-visualize.py
|
def bar_equal_hashes():
import plotly.graph_objects as go
runs = [x for x in range(1, 13)]
equal = [819, 924, 744, 891, 834, 594, 750, 842, 1083, 864, 846, 746]
total = [1170, 1290, 1275, 1275, 1275, 1260, 1245, 1245, 1275, 1245, 1230, 1275]
fig = go.Figure(data=[
go.Bar(name='Equal hashes', x=runs, y=equal),
go.Bar(name='Total hashes', x=runs, y=total)
])
fig.update_layout(barmode='group', xaxis_title='Index of successive run', yaxis_title='Number of hashes')
fig.add_hline(828)
fig.add_hline(1255)
fig.show()
def box_equal_hashes():
equal = [819, 924, 744, 891, 834, 594, 750, 842, 1083, 864, 846, 746]
total = [1170, 1290, 1275, 1275, 1275, 1260, 1245, 1245, 1275, 1245, 1230, 1275]
percent = []
for i in range(0, 12):
percent.append(equal[i] / total[i] * 100)
import plotly.express as px
fig = px.box(percent)
fig.update_layout(xaxis_title='OpenSSL', yaxis_title='Percentage of equal hashes')
fig.show()
def box_per_fuzzer():
percent = {'libFuzzer': [70.0, 71.95402298850576, 57.674418604651166, 70.23255813953489, 64.65116279069767,
46.588235294117645, 60.71428571428571, 66.42857142857143, 85.11627906976744,
69.76190476190476, 68.67469879518072, 58.6046511627907],
'Honggfuzz': [70.0, 71.95402298850576, 58.837209302325576, 69.06976744186046, 64.65116279069767,
46.588235294117645, 60.71428571428571, 66.42857142857143, 85.11627906976744,
69.76190476190476, 68.67469879518072, 58.6046511627907],
'AFL++': [70.0, 71.95402298850576, 58.837209302325576, 71.3953488372093, 64.65116279069767,
46.588235294117645, 60.71428571428571, 67.61904761904762, 85.11627906976744,
69.76190476190476, 68.67469879518072, 59.76744186046512]}
import plotly.express as px
fig = px.box(percent)
fig.update_layout(xaxis_title='Fuzzer', yaxis_title='Percentage of equal hashes')
fig.show()
def box_per_target():
percent = {'openssl-bignum': [73.07692307692307, 72.41379310344827, 58.91472868217055, 72.09302325581395,
67.44186046511628, 47.05882352941176, 63.095238095238095, 68.25396825396825,
86.04651162790698, 70.23809523809523, 71.08433734939759, 60.85271317829457],
'openssl-asn1parse': [73.07692307692307, 72.41379310344827, 58.91472868217055, 72.09302325581395,
65.11627906976744, 47.05882352941176, 63.095238095238095, 68.25396825396825,
86.04651162790698, 70.23809523809523, 71.08433734939759, 60.85271317829457],
'openssl-x509': [73.07692307692307, 72.41379310344827, 58.91472868217055, 72.09302325581395,
67.44186046511628, 47.05882352941176, 63.095238095238095, 68.25396825396825,
86.04651162790698, 70.23809523809523, 71.08433734939759, 60.85271317829457],
'openssl-server': [65.38461538461539, 71.26436781609196, 57.751937984496124, 67.44186046511628,
61.627906976744185, 45.88235294117647, 57.14285714285714, 64.68253968253968,
83.72093023255815, 69.04761904761905, 65.06024096385542, 56.201550387596896],
'openssl-client': [65.38461538461539, 71.26436781609196, 57.751937984496124, 67.44186046511628,
61.627906976744185, 45.88235294117647, 57.14285714285714, 64.68253968253968,
83.72093023255815, 69.04761904761905, 65.06024096385542, 56.201550387596896]}
import plotly.express as px
fig = px.box(percent)
fig.update_layout(xaxis_title='Fuzz Target', yaxis_title='Percentage of equal hashes')
fig.show()
if __name__ == '__main__':
bar_equal_hashes()
box_equal_hashes()
box_per_fuzzer()
box_per_target()
| 4,070 | 52.565789 | 113 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/archive/target-selection.py
|
import os
import common as c
REPO_LOCATION = f'../{c.TARGET}/'
CURRENT_COMMIT = None
def init_repo():
global CURRENT_COMMIT
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'reset', '--hard'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'clean', '-df'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', 'master'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'pull'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', '54c0480dac6c25f262d537048503a660aaa4b568'])
CURRENT_COMMIT = c.get_stdout(c.run_cmd_capture_output(['git', '-C', REPO_LOCATION, 'log', '-1', '--format="%H"']))
c.log_info(f'The current commit is {CURRENT_COMMIT}.')
def fuzz_current_commit():
c.log_info(f'Checking the SHA256 for the fuzz targets of commit {CURRENT_COMMIT}...')
new_result_index = int(max(os.listdir('/srv/results/real'))) + 1
new_result_index = f'{new_result_index:04d}'
c.run_cmd_enable_output(['mkdir', f'/srv/results/real/{new_result_index}'])
c.configure_settings(new_result_index, 'real', commit=CURRENT_COMMIT, timeout='10s')
c.run_cmd_enable_output(['rm', '-rf', './tools/captain/workdir', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['mkdir', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['cp', '-a', f'{REPO_LOCATION}.', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['cp', f'./targets/{c.TARGET}/src/abilist.txt', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['./run.sh'], cwd='./tools/captain/')
c.log_info('The fuzzing process has finished.')
c.log_info('Gathering results...')
c.save_sha(new_result_index, 'real')
c.log_info(f'The results of this fuzzing campaign were stored in /srv/results/real/{new_result_index}/.')
def checkout_prev_commit():
global CURRENT_COMMIT
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', 'HEAD^1'])
CURRENT_COMMIT = c.get_stdout(c.run_cmd_capture_output(['git', '-C', REPO_LOCATION, 'log', '-1', '--format="%H"']))
c.log_info(f'The current commit is {CURRENT_COMMIT}.')
if __name__ == '__main__':
try:
init_repo()
c.empty_seed_corpus()
fuzz_current_commit()
while True:
checkout_prev_commit()
fuzz_current_commit()
except KeyboardInterrupt:
print(f'\nINFO: Program was interrupted by the user.')
| 2,426 | 43.944444 | 119 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/archive/commit-monitor.py
|
import os
import time
import common as c
POLL_TIME = 60 # every minute
REPO_LOCATION = f'../{c.TARGET}/'
CURRENT_COMMIT = None
def init_repo():
global CURRENT_COMMIT
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'reset', '--hard'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'clean', '-df'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', 'master'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'pull'])
def fuzz_current_commit(commit_sha):
new_result_index = int(max(os.listdir('/srv/results/real'))) + 1
new_result_index = f'{new_result_index:04d}'
c.run_cmd_enable_output(['mkdir', f'/srv/results/real/{new_result_index}'])
c.configure_settings(new_result_index, 'real', commit=commit_sha, timeout='10m')
c.run_cmd_enable_output(['rm', '-rf', './tools/captain/workdir', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['mkdir', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['cp', '-a', f'{REPO_LOCATION}.', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['cp', f'./targets/{c.TARGET}/src/abilist.txt', f'./targets/{c.TARGET}/repo'])
c.run_cmd_enable_output(['./run.sh'], cwd='./tools/captain/')
c.log_info('The fuzzing process has finished.')
c.log_info('Gathering results...')
c.save_coverage_statistics(new_result_index, 'real')
c.save_nr_crashes(new_result_index, 'real')
c.save_new_corpus()
c.log_info(f'The results of this fuzzing campaign were stored in /srv/results/real/{new_result_index}/.')
def check_for_new_commits():
global CURRENT_COMMIT
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'pull'])
most_recent_commit = c.get_stdout(
c.run_cmd_capture_output(['git', '-C', REPO_LOCATION, 'log', '-1', '--format="%H"']))
if most_recent_commit != CURRENT_COMMIT:
c.log_info(f'Starting the fuzzing process for new commit {most_recent_commit}!')
fuzz_current_commit(most_recent_commit)
CURRENT_COMMIT = most_recent_commit
return True
return False
if __name__ == '__main__':
try:
init_repo()
c.initialize_seed_corpus()
while True:
start = time.time()
new = check_for_new_commits()
stop = time.time()
elapsed = int(stop - start)
if new:
c.log_info(f'Fuzzing commit {CURRENT_COMMIT} took {elapsed}s.')
else:
c.log_info(f'No new commits found. Sleeping for {POLL_TIME - elapsed}s...')
time.sleep(POLL_TIME - elapsed)
except KeyboardInterrupt:
print(f'\nINFO: Program was interrupted by the user.')
| 2,692 | 39.80303 | 109 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/fuzz-duration.py
|
import json
import os
import sys
import time
import common as c
TARGET = ''
REPO_LOCATION = ''
SETUP_LOCATION = ''
PATCH_LOCATION = ''
EXPERIMENT_TYPE = 'artificial'
BUGS = []
BUGS_ACTIVE = []
DURATIONS = []
ITERATIONS = 0
BASE_COMMITS = {'libpng': 'a37d4836519517bdce6cb9d956092321eca3e73b',
'libsndfile': '86c9f9eb7022d186ad4d0689487e7d4f04ce2b29',
'libtiff': 'c145a6c14978f73bb484c955eb9f84203efcb12e', # additional fetch step!
'libxml2': 'ec6e3efb06d7b15cf5a2328fabd3845acea4c815',
'lua': 'dbdc74dc5502c2e05e1c1e2ac894943f418c8431',
'openssl': '3bd5319b5d0df9ecf05c8baba2c401ad8e3ba130', # additional fetch step!
'php': 'bc39abe8c3c492e29bc5d60ca58442040bbf063b', # additional fetch step!
'poppler': '1d23101ccebe14261c6afc024ea14f29d209e760', # additional fetch step!
'sqlite3': '0000000000000000000000000000000000000000' # no git!
}
def checkout_base():
c.log_info(f'Checking out the base commit {BASE_COMMITS[TARGET]}.')
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'reset', '--hard'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'clean', '-df'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', 'master'])
c.run_cmd_disable_output(['git', '-C', REPO_LOCATION, 'checkout', BASE_COMMITS[TARGET]])
def apply_setup_patches():
try:
for file in os.listdir(SETUP_LOCATION):
if file.endswith('.patch'):
if c.run_cmd_disable_output(['patch', '-p1', '-d', REPO_LOCATION, '-i',
os.path.join(SETUP_LOCATION, file)]).returncode == 0:
c.log_info(f'Setup patch {file} has been applied successfully.')
else:
c.log_error(f'Setup patch {file} could not be applied!')
sys.exit(1)
except Exception as e:
c.log_error('There are no setup patches for this target.')
def find_and_apply_patches():
try:
for file in os.listdir(PATCH_LOCATION):
if file.endswith('.patch'):
BUGS.append(os.path.join(PATCH_LOCATION, file))
BUGS_ACTIVE.append(False)
except Exception as e:
c.log_error('The patches were not found!')
c.log_error(e)
sys.exit(1)
for idx in range(len(BUGS)):
introduce_or_fix_bug(idx)
def introduce_or_fix_bug(bug_index):
if BUGS_ACTIVE[bug_index]:
c.log_info(f'Including bugfix {BUGS[bug_index]}.')
code = c.run_cmd_disable_output(['patch', '-p1', '-R', '-d', REPO_LOCATION, '-i', BUGS[bug_index]]).returncode
if code == 0:
BUGS_ACTIVE[bug_index] = False
else:
c.log_error(f'Bug {BUGS[bug_index]} is active yet it could not be patched...')
sys.exit(code)
else:
c.log_info(f'Including bug {BUGS[bug_index]}.')
code = c.run_cmd_disable_output(['patch', '-p1', '-d', REPO_LOCATION, '-i', BUGS[bug_index]]).returncode
if code == 0:
BUGS_ACTIVE[bug_index] = True
else:
c.log_error(f'Bug {BUGS[bug_index]} is inactive yet it could not be included...')
sys.exit(code)
def fuzz_commit(timeout):
c.log_info('Starting the fuzzing process!')
new_result_index = int(max(os.listdir(f'../results/{TARGET}/{EXPERIMENT_TYPE}'))) + 1
new_result_index = f'{new_result_index:04d}'
c.run_cmd_enable_output(['mkdir', f'../results/{TARGET}/{EXPERIMENT_TYPE}/{new_result_index}'])
c.configure_settings(new_result_index, EXPERIMENT_TYPE, TARGET, timeout=timeout)
c.run_cmd_enable_output(['rm', '-rf', './tools/captain/workdir', f'./targets/{TARGET}/repo', f'./targets/{TARGET}/freetype2'])
c.run_cmd_enable_output(['rm', '-rf', './tools/captain/benchd_results', './tools/captain/final_results'])
c.run_cmd_enable_output(['mkdir', f'./targets/{TARGET}/repo'])
c.run_cmd_enable_output(['cp', '-a', f'{REPO_LOCATION}.', f'./targets/{TARGET}/repo'])
if TARGET == 'poppler':
c.run_cmd_enable_output(['cp', '-a', '../freetype2', f'./targets/{TARGET}/'])
if TARGET == 'openssl':
c.run_cmd_enable_output(['cp', f'./targets/openssl/src/abilist.txt', f'./targets/{TARGET}/repo'])
c.run_cmd_enable_output(['./run.sh'], cwd='./tools/captain/')
c.log_info('The fuzzing process has finished.')
c.log_info('Gathering results...')
c.run_cmd_disable_output(['python3', 'gather_results.py', 'workdir/', 'benchd_results'], cwd='./tools/captain/')
c.run_cmd_enable_output(['python3', 'gather_detected.py'], cwd='./tools/captain/')
c.run_cmd_enable_output(['cp', './tools/captain/benchd_results', './tools/captain/final_results',
f'../results/{TARGET}/{EXPERIMENT_TYPE}/{new_result_index}'])
save_bug_status(new_result_index)
c.save_coverage_statistics(new_result_index, EXPERIMENT_TYPE, TARGET)
c.save_nr_crashes(new_result_index, EXPERIMENT_TYPE, TARGET)
c.save_new_corpus(TARGET)
c.log_info(
f'The results of this fuzzing campaign were stored in ../results/{TARGET}/{EXPERIMENT_TYPE}/{new_result_index}/.')
def save_bug_status(result_index):
bug_status = {'active': [], 'inactive': []}
active = 0
for j in range(len(BUGS)):
if BUGS_ACTIVE[j]:
bug_status['active'].append(BUGS[j][-12:-6])
active += 1
else:
bug_status['inactive'].append(BUGS[j][-12:-6])
bug_status['nr_active_bugs'] = active
bug_status['nr_inactive_bugs'] = len(BUGS) - active
bug_status['nr_total_bugs'] = len(BUGS)
with open(f'../results/{TARGET}/{EXPERIMENT_TYPE}/{result_index}/bug_status', 'w') as f:
json.dump(bug_status, f, indent=4)
if __name__ == '__main__':
try:
if len(sys.argv) != 2:
print(f'Usage: $ python3 {sys.argv[0]} <target-library>')
sys.exit()
if sys.argv[1] not in BASE_COMMITS:
print(f'<target-library> has to be one of {list(BASE_COMMITS.keys())}')
sys.exit()
TARGET = sys.argv[1]
DURATIONS = ['5m', '10m', '15m', '20m', '30m', '1h', '2h', '4h', '8h']
ITERATIONS = 10
REPO_LOCATION = f'../{TARGET}/'
SETUP_LOCATION = f'../CometFuzz/targets/{TARGET}/patches/setup/'
PATCH_LOCATION = f'../CometFuzz/targets/{TARGET}/patches/bugs/'
# checkout_base() # disabled, otherwise it overrides manual checkout
if "COMETFUZZ_INJECT_BUGS" in os.environ:
apply_setup_patches()
find_and_apply_patches()
sys.exit()
for duration in DURATIONS:
c.log_info('Cleaning up disk space.')
c.run_cmd_disable_output(['docker', 'system', 'prune', '-af'])
c.log_info(f'Starting the run with a duration of {duration}.')
# c.empty_seed_corpus() # disabled, start with non-empty seed corpus
c.initialize_seed_corpus(TARGET)
for i in range(ITERATIONS):
c.log_info(f'Starting iteration {i + 1} of {ITERATIONS} for the duration of {duration}.')
start = time.time()
fuzz_commit(duration)
stop = time.time()
c.log_info(
f'Iteration {i + 1} of {ITERATIONS} for the duration of {duration} took {int(stop - start)}s.')
except KeyboardInterrupt:
print(f'\nProgram was interrupted by the user.')
| 7,572 | 44.89697 | 130 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/common.py
|
import datetime
import json
import os
import subprocess
import sys
DEFAULT_TIMEOUT = '10m'
DEFAULT_FUZZERS = '(aflplusplus honggfuzz libfuzzer)'
def log_info(entry):
print(f'[{datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}] (INFO) {entry}')
def log_error(entry):
print(f'[{datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}] (ERROR) {entry}')
def run_cmd_disable_output(command_array, **kwargs):
return subprocess.run(command_array, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, **kwargs)
def run_cmd_enable_output(command_array, **kwargs):
return subprocess.run(command_array, **kwargs)
def run_cmd_capture_output(command_array, **kwargs):
return subprocess.run(command_array, capture_output=True, text=True, **kwargs)
def get_stdout(result):
return result.stdout.strip('\n"')
def save_coverage_statistics(result_index, experiment_type, library):
logfiles = []
stats = {'libfuzzer': {}, 'honggfuzz': {}, 'aflplusplus': {}}
try:
for filename in os.listdir('./tools/captain/workdir/log/'):
if 'container.log' in filename:
logfiles.append(os.path.join('./tools/captain/workdir/log/', filename))
except Exception as e:
log_error(e)
for logfile in logfiles:
try:
with open(logfile, 'r') as log:
target = logfile.split('_')
subtarget = f'{target[1]}-{target[2]}'
if len(target) > 5:
for idx in range(len(target)-5):
subtarget = f'{subtarget}_{target[idx+3]}'
temp = []
stat = []
if target[0].endswith('libfuzzer'):
for line in log:
if 'Fuzz target sha256: ' in line:
lib_sha = line.split()[3]
if ' corpus size: ' in line:
temp.append(line.split(':')[1].strip())
if 'oom/timeout/crash:' in line:
stat.append(line)
start = stat[0].split()
stop = stat[-1].split()
stats['libfuzzer'][subtarget] = {}
stats['libfuzzer'][subtarget]['start'] = {'coverage': start[2], 'features': start[4],
'corpus': start[6], 'exec/s': start[8], 'time': start[12]}
stats['libfuzzer'][subtarget]['stop'] = {'coverage': stop[2], 'features': stop[4],
'corpus': stop[6], 'exec/s': stop[8], 'time': stop[12]}
stats['libfuzzer'][subtarget]['corpus'] = {'start': temp[0], 'min': temp[1], 'stop': temp[2]}
stats['libfuzzer'][subtarget]['sha'] = lib_sha
elif target[0].endswith('honggfuzz'):
for line in log:
if 'Fuzz target sha256: ' in line:
hongg_sha = line.split()[3]
if ' corpus size: ' in line:
temp.append(line.split(':')[1].strip())
if 'Summary iterations:' in line:
stat.append(line)
stop = stat[0].split()
stats['honggfuzz'][subtarget] = {'coverage_percent': stop[9].split(':')[1],
'guard_nb': stop[8].split(':')[1],
'new_units': stop[6].split(':')[1],
'exec/s': stop[3].split(':')[1], 'time': stop[2].split(':')[1],
'start_corp': temp[0], 'min_corp': temp[1], 'stop_corp': temp[2],
'sha': hongg_sha}
elif target[0].endswith('aflplusplus'):
for line in log:
if 'Fuzz target sha256: ' in line:
afl_sha = line.split()[3]
if ' corpus size: ' in line:
temp.append(line.split(':')[1].strip())
if 'A coverage of ' in line:
stat.append(line)
stop = stat[0].split()
stats['aflplusplus'][subtarget] = {'coverage_percent': stop[12].split('%')[0][1:],
'covered_edges': stop[4], 'total_edges': stop[10],
'inputs': stop[14],
'start_corp': temp[0], 'min_corp': temp[1], 'stop_corp': temp[2],
'sha': afl_sha}
except Exception as e:
log_error(e)
with open(f'../results/{library}/{experiment_type}/{result_index}/coverage_results', 'w') as f:
json.dump(stats, f, indent=4)
def save_sha(result_index, experiment_type):
logfiles = []
stats = {'libfuzzer': {}, 'honggfuzz': {}, 'aflplusplus': {}}
try:
for filename in os.listdir('./tools/captain/workdir/log/'):
if 'container.log' in filename:
logfiles.append(os.path.join('./tools/captain/workdir/log/', filename))
except Exception as e:
log_error(e)
for logfile in logfiles:
try:
with open(logfile, 'r') as log:
target = logfile.split('_')
subtarget = f'{target[1]}-{target[2]}'
if target[0].endswith('libfuzzer'):
for line in log:
if 'Fuzz target sha256: ' in line:
lib_sha = line.split()[3]
stats['libfuzzer'][subtarget] = lib_sha
elif target[0].endswith('honggfuzz'):
for line in log:
if 'Fuzz target sha256: ' in line:
hongg_sha = line.split()[3]
stats['honggfuzz'][subtarget] = hongg_sha
elif target[0].endswith('aflplusplus'):
for line in log:
if 'Fuzz target sha256: ' in line:
afl_sha = line.split()[3]
stats['aflplusplus'][subtarget] = afl_sha
except Exception as e:
log_error(e)
with open(f'../results/{experiment_type}/{result_index}/sha', 'w') as f:
json.dump(stats, f, indent=4)
def save_nr_crashes(result_index, experiment_type, library):
crashes = {'libfuzzer': {}, 'honggfuzz': {}, 'aflplusplus': {}}
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/aflplusplus/{library}/'):
nr_crashes = len(
os.listdir(f'./tools/captain/workdir/ar/aflplusplus/{library}/{dirname}/0/findings/crashes/'))
if nr_crashes > 0:
nr_crashes -= 1
crashes['aflplusplus'][f'{library}-{dirname}'] = nr_crashes
except Exception as e:
log_error(e)
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/libfuzzer/{library}/'):
nr_crashes = len(os.listdir(f'./tools/captain/workdir/ar/libfuzzer/{library}/{dirname}/0/findings/'))
crashes['libfuzzer'][f'{library}-{dirname}'] = nr_crashes
except Exception as e:
log_error(e)
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/honggfuzz/{library}/'):
nr_crashes = len(os.listdir(f'./tools/captain/workdir/ar/honggfuzz/{library}/{dirname}/0/findings/'))
if nr_crashes > 0:
nr_crashes -= 1
crashes['honggfuzz'][f'{library}-{dirname}'] = nr_crashes
except Exception as e:
log_error(e)
with open(f'../results/{library}/{experiment_type}/{result_index}/nr_crashes', 'w') as f:
json.dump(crashes, f, indent=4)
def configure_settings(result_index, experiment_type, library, timeout=DEFAULT_TIMEOUT, fuzzers=DEFAULT_FUZZERS, commit=None):
settings = {}
with open('./tools/captain/captainrc', 'r') as file:
data = file.readlines()
for idx in range(len(data)):
if '#' not in data[idx]:
if 'TIMEOUT=' in data[idx]:
data[idx] = f'TIMEOUT={timeout}\n'
if 'FUZZERS=' in data[idx]:
data[idx] = f'FUZZERS={fuzzers}\n'
if 'aflplusplus_TARGETS=' in data[idx]:
data[idx] = f'aflplusplus_TARGETS=({library})\n'
if 'honggfuzz_TARGETS=' in data[idx]:
data[idx] = f'honggfuzz_TARGETS=({library})\n'
if 'libfuzzer_TARGETS=' in data[idx]:
data[idx] = f'libfuzzer_TARGETS=({library})\n'
if len(data[idx]) > 1:
setting = data[idx].split('=')
settings[setting[0]] = setting[1][:-1]
with open('./tools/captain/captainrc', 'w') as file:
file.writelines(data)
with open(f'./targets/{library}/configrc', 'r') as file:
data = file.readlines()
for idx in range(len(data)):
if len(data[idx]) > 1:
setting = data[idx].split('=')
settings[setting[0]] = setting[1][:-1]
if library == 'php': # seed corpus for php is downloaded at runtime
programs = settings['PROGRAMS'][1:-1].split(' ')
for program in programs:
run_cmd_enable_output(['mkdir', '-p', f'./targets/{library}/corpus/{program}'])
if commit:
settings['COMMIT'] = commit
with open(f'../results/{library}/{experiment_type}/{result_index}/settings', 'w') as f:
json.dump(settings, f, indent=4)
def save_new_corpus(library):
try:
for dirname in os.listdir(f'./targets/{library}/corpus/'):
run_cmd_enable_output(['rm', '-rf', f'./targets/{library}/corpus/{dirname}'])
run_cmd_enable_output(['mkdir', f'./targets/{library}/corpus/{dirname}'])
except Exception as e:
log_error(e)
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/libfuzzer/{library}/'):
run_cmd_enable_output(['cp', '-a', f'./tools/captain/workdir/ar/libfuzzer/{library}/{dirname}/0/corpus/.',
f'./targets/{library}/corpus/{dirname}/'])
except Exception as e:
log_error(e)
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/honggfuzz/{library}/'):
run_cmd_enable_output(['cp', '-a', f'./tools/captain/workdir/ar/honggfuzz/{library}/{dirname}/0/output/.',
f'./targets/{library}/corpus/{dirname}/'])
except Exception as e:
log_error(e)
try:
for dirname in os.listdir(f'./tools/captain/workdir/ar/aflplusplus/{library}/'):
run_cmd_enable_output(
['cp', '-a', f'./tools/captain/workdir/ar/aflplusplus/{library}/{dirname}/0/findings/queue/.',
f'./targets/{library}/corpus/{dirname}/'])
except Exception as e:
log_error(e)
def initialize_seed_corpus(library):
log_info('Initializing seed corpus...')
run_cmd_enable_output(['rm', '-rf', f'./targets/{library}/corpus'])
if library != 'php': # seed corpus for php is downloaded at runtime
if run_cmd_enable_output(['cp', '-r', f'../magma/targets/{library}/corpus', f'./targets/{library}/']).returncode != 0:
log_error('Seed corpus initialization failed!')
sys.exit(1)
def empty_seed_corpus(library):
log_info('Initializing empty seed corpus...')
run_cmd_enable_output(['rm', '-rf', f'./targets/{library}/corpus'])
run_cmd_enable_output(['mkdir', f'./targets/{library}/corpus'])
run_cmd_enable_output(['mkdir', 'asn1', 'asn1parse', 'bignum', 'client', 'server', 'x509'],
cwd=f'./targets/{library}/corpus/')
run_cmd_enable_output(['cp', 'zero', 'corpus/asn1/0'], cwd=f'./targets/{library}')
run_cmd_enable_output(['cp', 'zero', 'corpus/asn1parse/0'], cwd=f'./targets/{library}')
run_cmd_enable_output(['cp', 'zero', 'corpus/bignum/0'], cwd=f'./targets/{library}')
run_cmd_enable_output(['cp', 'zero', 'corpus/client/0'], cwd=f'./targets/{library}')
run_cmd_enable_output(['cp', 'zero', 'corpus/server/0'], cwd=f'./targets/{library}')
run_cmd_enable_output(['cp', 'zero', 'corpus/x509/0'], cwd=f'./targets/{library}')
| 12,444 | 45.785714 | 126 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/data/bug_types.py
|
#!/usr/bin/python3
import json
DEBUG = False
iter = 10
runs = ['5 minutes', '10 minutes', '15 minutes', '20 minutes', '30 minutes', '1 hours', '2 hours', '4 hours', '8 hours']
libs = {'php': 15, 'openssl': 17, 'sqlite3': 15, 'poppler': 59, 'lua': 14, 'libxml2': 43, 'libpng': 106, 'libtiff': 37, 'libsndfile': 50}
bugs = {'reached': {}, 'triggered': {}, 'detected': {}}
for metric in bugs:
if DEBUG:
print(f'\n{metric}:\n')
for i in range(len(runs)):
bugs[metric][runs[i]] = []
if DEBUG:
print(runs[i])
for lib in libs:
dir = f"./{lib}/"
start = libs[lib] + iter * i
stop = libs[lib] + iter * (i + 1)
if DEBUG:
print('->', dir, start, stop)
for j in range(start, stop):
with open(f'{dir}{j:04d}/final_results', 'r') as file:
data = json.load(file)
for fuzzer in data[metric]:
for bug in data[metric][fuzzer]:
bugs[metric][runs[i]].append(bug)
if DEBUG:
print('bugs[metric][runs[i]] length:', len(bugs[metric][runs[i]]))
print('set(bugs[metric][runs[i]]) length:', len(set(bugs[metric][runs[i]])))
print(set(bugs[metric][runs[i]]))
bugs[metric][runs[i]] = list(set(bugs[metric][runs[i]]))
for metric in bugs:
print('\n')
for i in range(len(runs)):
print()
data = bugs[metric][runs[i]]
data.sort()
print(metric, runs[i], data)
| 1,546 | 34.159091 | 137 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/data/significance.py
|
#!/usr/bin/python3
import json
from scipy.stats import mannwhitneyu
from VD_A import VD_A
# User params
DEBUG = False
FUZZ_DUR_DATA = "./fuzz_dur_data"
MANN_WHITNEY = False
VARGHA_DELANEY_AND_MANN_WHITNEY = True
# Experiment values
metrics = ['reached', 'triggered', 'detected']
iter = 10
runs = ['5 minutes', '10 minutes', '15 minutes', '20 minutes', '30 minutes', '1 hours', '2 hours', '4 hours', '8 hours']
libs = {'php': 15, 'openssl': 17, 'sqlite3': 15, 'poppler': 59, 'lua': 14,
'libxml2': 43, 'libpng': 106, 'libtiff': 37, 'libsndfile': 50}
for metric in metrics:
print(f'\n{metric}:\n')
nrofbugs = {}
for i in range(len(runs)):
if DEBUG:
print(runs[i])
nrofbugs[runs[i]] = []
for lib in libs:
dir = f"{FUZZ_DUR_DATA}/{lib}/"
start = libs[lib] + iter * i
stop = libs[lib] + iter * (i + 1)
if DEBUG:
print('->', dir, start, stop)
for j in range(start, stop):
with open(f'{dir}{j:04d}/final_results', 'r') as file:
data = json.load(file)
bugs = []
for fuzzer in data[metric]:
for bug in data[metric][fuzzer]:
bugs.append(bug)
nrofbugs[runs[i]].append(len(set(bugs)))
if DEBUG:
print(nrofbugs, 'length:', len(nrofbugs[runs[i]]))
if VARGHA_DELANEY_AND_MANN_WHITNEY:
for i in range(len(runs)):
for j in range(len(runs)):
if j >= i:
continue
estimate, magnitude = VD_A(treatment=nrofbugs[runs[i]], control=nrofbugs[runs[j]])
res = mannwhitneyu(nrofbugs[runs[i]], nrofbugs[runs[j]], alternative="greater", method="auto")
print(" \multicolumn{1}{r|}{", end='')
if res.pvalue < 0.05:
print("\\textbf{", end='')
print(round(estimate, 2), end='')
print("}} &")
else:
print(round(estimate, 2), end='')
print("} &")
if DEBUG:
print("Effect size of", runs[i], "is", magnitude, "with respect to", runs[j])
print()
print()
if MANN_WHITNEY:
for run in range(len(runs) - 1):
res = mannwhitneyu(nrofbugs[runs[run+1]], nrofbugs[runs[run]], alternative="greater", method="auto")
print(runs[run+1], metric, 'more bugs than', runs[run], 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
print()
res = mannwhitneyu(nrofbugs['5 minutes'], nrofbugs['10 minutes'], alternative="less", method="auto")
print('5 minutes', metric, 'less bugs than 10 minutes', 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
for run in runs:
if run not in ['5 minutes', '10 minutes']:
res = mannwhitneyu(nrofbugs[run], nrofbugs['10 minutes'], alternative="greater", method="auto")
print(run, metric, 'more bugs than 10 minutes', 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
for run in runs:
if 'minutes' not in run:
res = mannwhitneyu(nrofbugs[run], nrofbugs['30 minutes'], alternative="greater", method="auto")
print(run, metric, 'more bugs than 30 minutes', 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
res = mannwhitneyu(nrofbugs['4 hours'], nrofbugs['2 hours'], alternative="greater", method="auto")
print('4 hours', metric, 'more bugs than 2 hours', 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
res = mannwhitneyu(nrofbugs['8 hours'], nrofbugs['2 hours'], alternative="greater", method="auto")
print('8 hours', metric, 'more bugs than 2 hours', 'TRUE' if res.pvalue < 0.05 else 'FALSE', res, round(res.pvalue,3))
| 3,965 | 45.658824 | 135 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/data/VD_A.py
|
import itertools as it
from bisect import bisect_left
from typing import List
import numpy as np
import pandas as pd
import scipy.stats as ss
from pandas import Categorical
def VD_A(treatment: List[float], control: List[float]):
"""
Computes Vargha and Delaney A index
A. Vargha and H. D. Delaney.
A critique and improvement of the CL common language
effect size statistics of McGraw and Wong.
Journal of Educational and Behavioral Statistics, 25(2):101-132, 2000
The formula to compute A has been transformed to minimize accuracy errors
See: http://mtorchiano.wordpress.com/2014/05/19/effect-size-of-r-precision/
:param treatment: a numeric list
:param control: another numeric list
:returns the value estimate and the magnitude
"""
m = len(treatment)
n = len(control)
if m != n:
raise ValueError("Data d and f must have the same length")
r = ss.rankdata(treatment + control)
r1 = sum(r[0:m])
# Compute the measure
# A = (r1/m - (m+1)/2)/n # formula (14) in Vargha and Delaney, 2000
A = (2 * r1 - m * (m + 1)) / (
2 * n * m
) # equivalent formula to avoid accuracy errors
levels = [0.147, 0.33, 0.474] # effect sizes from Hess and Kromrey, 2004
magnitude = ["negligible", "small", "medium", "large"]
scaled_A = (A - 0.5) * 2
magnitude = magnitude[bisect_left(levels, abs(scaled_A))]
estimate = A
return estimate, magnitude
if __name__ == "__main__":
# Examples
# negligible
F = [
0.8236111111111111,
0.7966666666666666,
0.923611111111111,
0.8197222222222222,
0.7108333333333333,
]
G = [
0.8052777777777779,
0.8172222222222221,
0.8322222222222223,
0.783611111111111,
0.8141666666666666,
]
print(VD_A(G, F))
# small
A = [
0.478515625,
0.4638671875,
0.4638671875,
0.4697265625,
0.4638671875,
0.474609375,
0.4814453125,
0.4814453125,
0.4697265625,
0.4814453125,
0.474609375,
0.4833984375,
0.484375,
0.44921875,
0.474609375,
0.484375,
0.4814453125,
0.4638671875,
0.484375,
0.478515625,
0.478515625,
0.45703125,
0.484375,
0.419921875,
0.4833984375,
0.478515625,
0.4697265625,
0.484375,
0.478515625,
0.4638671875,
]
B = [
0.4814453125,
0.478515625,
0.44921875,
0.4814453125,
0.4638671875,
0.478515625,
0.474609375,
0.4638671875,
0.474609375,
0.44921875,
0.474609375,
0.478515625,
0.478515625,
0.474609375,
0.4697265625,
0.474609375,
0.45703125,
0.4697265625,
0.478515625,
0.4697265625,
0.4697265625,
0.484375,
0.45703125,
0.474609375,
0.474609375,
0.4638671875,
0.45703125,
0.474609375,
0.4638671875,
0.4306640625,
]
print(VD_A(A, B))
# medium
C = [
0.9108333333333334,
0.8755555555555556,
0.900277777777778,
0.9274999999999999,
0.8777777777777779,
]
E = [
0.8663888888888888,
0.8802777777777777,
0.7816666666666667,
0.8377777777777776,
0.9305555555555556,
]
print(VD_A(C, E))
# Large
D = [
0.7202777777777778,
0.77,
0.8544444444444445,
0.7947222222222222,
0.7577777777777778,
]
print(VD_A(C, D))
| 3,710 | 21.089286 | 79 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/visualizations/combine_plots.py
|
#!/usr/bin/python3
import plotly.graph_objects as go
import random
import json
import statistics
iter = 10
runs = ['5 minutes', '10 minutes', '15 minutes', '20 minutes', '30 minutes', '1 hours', '2 hours', '4 hours', '8 hours']
libs = {'php': 15, 'openssl': 17, 'sqlite3': 15, 'poppler': 59, 'lua': 14,
'libxml2': 43, 'libpng': 106, 'libtiff': 37, 'libsndfile': 50}
fig_reached = go.Figure()
fig_triggered = go.Figure()
fig_detected = go.Figure()
fig = go.Figure()
for lib in libs:
dir = f"./results/{lib}/"
start = [x for x in range(libs[lib], libs[lib] + (len(runs)-1) * iter + 1, iter)]
stop = [x for x in range(libs[lib] + iter - 1, libs[lib] + (len(runs)-1) * iter + iter, iter)]
bugs = {'reached': [], 'triggered': [], 'detected': []}
means = {'reached': [], 'triggered': [], 'detected': []}
std_devs = {'reached': [], 'triggered': [], 'detected': []}
for i in range(len(start)):
for j in range(start[i], stop[i] + 1):
count = {'reached': [], 'triggered': [], 'detected': []}
with open(f'{dir}{j:04d}/final_results', 'r') as file:
data = json.load(file)
for metric in data:
for fuzzer in data[metric]:
count[metric] = [*count[metric], *data[metric][fuzzer]]
bugs[metric].append(len(set(count[metric])))
for metric in bugs:
means[metric].append(statistics.mean(bugs[metric]))
std_devs[metric].append(statistics.stdev(bugs[metric]))
bugs = {'reached': [], 'triggered': [], 'detected': []}
fig_reached.add_trace(go.Bar(x=runs, y=means['reached'], name=lib,
error_y=dict(type='data', array=std_devs['reached'])))
fig_triggered.add_trace(go.Bar(x=runs, y=means['triggered'], name=lib,
error_y=dict(type='data', array=std_devs['triggered'])))
fig_detected.add_trace(go.Bar(x=runs, y=means['detected'], name=lib,
error_y=dict(type='data', array=std_devs['detected'])))
# Layout for the reached figure
fig_reached.update_layout(
yaxis_range=[0,17],
xaxis=dict(
title='Fuzz duration',
titlefont_size=15,
tickfont_size=13
),
yaxis=dict(
title='Reached bugs',
titlefont_size=15,
tickfont_size=13,
tickmode = 'linear',
tick0 = 0,
dtick = 2
),
legend=dict(
font_size=15,
),
barmode='group'
)
# Layout for the triggered figure
fig_triggered.update_layout(
yaxis_range=[0,7],
xaxis=dict(
title='Fuzz duration',
titlefont_size=15,
tickfont_size=13
),
yaxis=dict(
title='Triggered bugs',
titlefont_size=15,
tickfont_size=13,
tickmode = 'linear',
tick0 = 0,
dtick = 1
),
legend=dict(
font_size=15,
),
barmode='group'
)
# Layout for the detected figure
fig_detected.update_layout(
yaxis_range=[0,4],
xaxis=dict(
title='Fuzz duration',
titlefont_size=15,
tickfont_size=13
),
yaxis=dict(
title='Detected bugs',
titlefont_size=15,
tickfont_size=13,
tickmode = 'linear',
tick0 = 0,
dtick = 1
),
legend=dict(
font_size=15,
),
barmode='group'
)
# Render the figures
fig_reached.show()
fig_triggered.show()
fig_detected.show()
| 3,457 | 28.555556 | 120 |
py
|
CICDFuzzBench
|
CICDFuzzBench-master/experiments/fuzz duration/visualizations/fuzz-duration-visualize.py
|
import json
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
PREFIX = '/srv'
TARGET = ''
RESULT_DIR = ''
DURATIONS = []
RUNS = []
START = []
STOP = []
# /// Experiment runs of 6 durations x 5 iterations, with seed corpora. ///
# RESULT_DIR = '../results/libxml2/artificial/'
# DURATIONS = ['5m', '10m', '15m', '20m', '30m', '45m', '60m']
# RUNS = [x + 1 for x in range(len(DURATIONS))]
# START = [x for x in range(60, 91, 5)]
# STOP = [x for x in range(64, 96, 5)]
# /// Original experiment runs from the thesis. ///
# RUNS = [8, 9, 10, 1, 2, 3, 4, 7, 12, 13, 6, 5, 11]
# DURATIONS = ['1m (run 8)', '1m (run 9)', '5m (run 10)', '10m (run 1)', '10m (run 2)', '15m (run 3)', '20m (run 4)',
# '30m (run 7)', '1h (run 12)', '8h (run 13)', '12h (run 6)', '12h (run 5)', '48h (run 11)']
# START = [409, 423, 470, 74, 144, 226, 308, 369, 489, 502, 361, 350, 483]
# STOP = [413, 451, 477, 109, 224, 306, 346, 398, 500, 506, 367, 353, 484]
# /// Experiment runs of 8x5 iterations, without seed corpora. ///
# RUNS = [1, 2, 3, 4, 5, 6, 7, 8]
# DURATIONS = ['1m', '5m', '10m', '15m', '20m', '30m', '1h', '8h']
# START = [513, 518, 523, 528, 533, 538, 543, 548]
# STOP = [517, 522, 527, 532, 537, 542, 547, 552]
# /// Experiment runs of 8x5 iterations, with seed corpora. ///
# RUNS = [1, 2, 3, 4, 5, 6, 7, 8]
# DURATIONS = ['1m', '5m', '10m', '15m', '20m', '30m', '1h', '8h']
# START = [593, 598, 603, 608, 613, 618, 623, 628]
# STOP = [597, 602, 607, 612, 617, 622, 627, 632]
def coverage_results(start_dir, stop_dir):
with open(f'{PREFIX}/results/example/coverage_results', 'r') as file:
data = json.load(file)
for fuzzer in data:
for target in data[fuzzer]:
for metric in data[fuzzer][target]:
if fuzzer == 'libfuzzer':
for submetric in data[fuzzer][target][metric]:
data[fuzzer][target][metric][submetric] = []
else:
data[fuzzer][target][metric] = []
for i in range(start_dir, stop_dir + 1):
with open(f'{RESULT_DIR}{i:04d}/coverage_results', 'r') as file:
new = json.load(file)
for fuzzer in data:
for target in data[fuzzer]:
for metric in data[fuzzer][target]:
if fuzzer == 'libfuzzer':
for submetric in data[fuzzer][target][metric]:
try:
data[fuzzer][target][metric][submetric].append(
new[fuzzer][target][metric][submetric])
except:
data[fuzzer][target][metric][submetric].append(None)
else:
try:
data[fuzzer][target][metric].append(new[fuzzer][target][metric])
except:
data[fuzzer][target][metric].append(None)
for fuzzer in data:
if fuzzer == 'libfuzzer':
for metric in data['libfuzzer']['openssl-client']['stop']:
res = {}
for target in data[fuzzer]:
res[target] = data[fuzzer][target]['stop'][metric]
df = pd.DataFrame.from_dict(res, dtype=float)
fig = px.line(df, title=f'{fuzzer} - {metric} over time')
fig.show()
else:
for metric in data[fuzzer]['openssl-client']:
res = {}
for target in data[fuzzer]:
res[target] = data[fuzzer][target][metric]
df = pd.DataFrame.from_dict(res, dtype=float)
fig = px.line(df, title=f'{fuzzer} - {metric} over time')
fig.show()
def bugs_found(start_dir, stop_dir):
fuzzers = []
res = {'active_bugs': []}
overall_res = {'active': [], 'reached': [], 'triggered': [], 'detected': []}
with open(f'{PREFIX}/results/example/coverage_results', 'r') as file:
data = json.load(file)
for fuzzer in data:
fuzzers.append(fuzzer)
res[fuzzer] = {}
for i in range(start_dir, stop_dir + 1):
bug_codes = {'reached': [], 'triggered': [], 'detected': []}
with open(f'{RESULT_DIR}{i:04d}/bug_status', 'r') as file:
bug_status = json.load(file)
res['active_bugs'].append(bug_status['nr_active_bugs'])
overall_res['active'].append(bug_status['nr_active_bugs'])
with open(f'{RESULT_DIR}{i:04d}/final_results', 'r') as file:
results = json.load(file)
for status in results:
for fuzzer in fuzzers:
if status not in res[fuzzer]:
res[fuzzer][status] = []
if fuzzer in results[status]:
res[fuzzer][status].append(len(results[status][fuzzer]))
for bug in results[status][fuzzer]:
bug_codes[status].append(bug)
else:
res[fuzzer][status].append(0)
for status in bug_codes:
overall_res[status].append(len(set(bug_codes[status])))
for fuzzer in fuzzers:
plot_data = res[fuzzer]
plot_data['active'] = res['active_bugs']
df = pd.DataFrame.from_dict(plot_data, dtype=float)
fig = px.line(df, title=f'Bug status for {fuzzer} over time')
fig.show()
df = pd.DataFrame.from_dict(overall_res, dtype=float)
fig = px.line(df, title=f'Bug status for all fuzzers over time')
fig.show()
new_overall = {'reached': [], 'triggered': [], 'detected': []}
for i in range(start_dir, stop_dir + 1):
for status in new_overall:
new_overall[status].append(overall_res[status][i - start_dir] / overall_res['active'][i - start_dir] * 100)
df = pd.DataFrame.from_dict(new_overall, dtype=float)
fig = px.line(df, title=f'Percentage of bugs reached, triggered and detected for all fuzzers over time')
fig.update_layout(xaxis_title='Time (commit number)', yaxis_title='Percentage of bugs (%)',
legend_title='Bug status')
fig.show()
def nr_crashes(start_dir, stop_dir):
plot_data = {'overall': []}
for i in range(start_dir, stop_dir + 1):
target_crashes = {}
total_crashes = 0
with open(f'{RESULT_DIR}{i:04d}/nr_crashes', 'r') as file:
crashes = json.load(file)
for fuzzer in crashes:
for target in crashes[fuzzer]:
if target not in target_crashes:
target_crashes[target] = crashes[fuzzer][target]
else:
target_crashes[target] = target_crashes[target] + crashes[fuzzer][target]
for target in crashes[fuzzer]:
if target not in plot_data:
plot_data[target] = []
plot_data[target].append(target_crashes[target])
total_crashes += target_crashes[target]
plot_data['overall'].append(total_crashes)
df = pd.DataFrame.from_dict(plot_data, dtype=float)
fig = px.line(df, title=f'Number of crashes per fuzz target for all fuzzers over time')
fig.update_layout(xaxis_title='Time (commit number)', yaxis_title='Number of crashes',
legend_title='Fuzz target')
fig.show()
def box_fuzz_dur_crashes():
fig = go.Figure()
for i in range(len(START)):
crashes = []
for j in range(START[i], STOP[i] + 1):
with open(f'{RESULT_DIR}{j:04d}/nr_crashes', 'r') as file:
data = json.load(file)
count = 0
for fuzzer in data:
for target in data[fuzzer]:
count += data[fuzzer][target]
crashes.append(count)
fig.add_trace(go.Box(name=f'Run {RUNS[i]}', y=crashes, x0=DURATIONS[i], marker_color='#3D9970'))
fig.update_yaxes(type='log')
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Number of crashes', showlegend=False)
fig.write_image(f'../images/{TARGET}/{TARGET}-crashes.png')
def box_fuzz_dur_bugs():
fig = go.Figure()
bugs = {'reached': [], 'triggered': [], 'detected': [], 'x': []}
for i in range(len(START)):
for j in range(START[i], STOP[i] + 1):
count = {'reached': [], 'triggered': [], 'detected': []}
with open(f'{RESULT_DIR}{j:04d}/final_results', 'r') as file:
data = json.load(file)
for metric in data:
for fuzzer in data[metric]:
count[metric] = [*count[metric], *data[metric][fuzzer]]
bugs[metric].append(len(set(count[metric])))
bugs['x'].append(DURATIONS[i])
fig.add_trace(go.Box(name='reached', y=bugs['reached'], x=bugs['x'], marker_color='#FF4136'))
fig.add_trace(go.Box(name='triggered', y=bugs['triggered'], x=bugs['x'], marker_color='#FF851B'))
fig.add_trace(go.Box(name='detected', y=bugs['detected'], x=bugs['x'], marker_color='#3D9970'))
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Number of bugs', boxmode='group')
fig.update_yaxes(rangemode="tozero")
fig.write_image(f'../images/{TARGET}/{TARGET}-bugs.png')
def box_fuzz_dur_bug_time():
fig = go.Figure()
bugs = {'reached': [], 'triggered': [], 'xreached': [], 'xtriggered': []}
for i in range(len(START)):
for j in range(START[i], STOP[i] + 1):
with open(f'{RESULT_DIR}{j:04d}/benchd_results', 'r') as file:
data = json.load(file)
for fuzzer in data['results']:
for program in data['results'][fuzzer]:
for target in data['results'][fuzzer][program]:
for run in data['results'][fuzzer][program][target]:
for metric in data['results'][fuzzer][program][target][run]:
for bug in data['results'][fuzzer][program][target][run][metric]:
bugs[metric].append(data['results'][fuzzer][program][target][run][metric][bug])
bugs[f'x{metric}'].append(DURATIONS[i])
fig.add_trace(go.Box(name='reached', y=bugs['reached'], x=bugs['xreached'], marker_color='#FF4136'))
fig.add_trace(go.Box(name='triggered', y=bugs['triggered'], x=bugs['xtriggered'], marker_color='#FF851B'))
fig.update_yaxes(type='log')
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Time to bug (seconds)', boxmode='group')
fig.write_image(f'../images/{TARGET}/{TARGET}-time_to_bug.png')
def box_fuzz_dur_coverage():
fig = go.Figure()
coverage = {'aflplusplus': [], 'honggfuzz': [], 'libfuzzer': [], 'xaflplusplus': [], 'xhonggfuzz': [],
'xlibfuzzer': []}
for i in range(len(START)):
for j in range(START[i], STOP[i] + 1):
with open(f'{RESULT_DIR}{j:04d}/coverage_results', 'r') as file:
data = json.load(file)
for fuzzer in data:
for target in data[fuzzer]:
try:
if fuzzer == 'libfuzzer':
total_edges = int(data['aflplusplus'][target]['total_edges'])
coverage[fuzzer].append(float(data[fuzzer][target]['stop']['coverage']) / total_edges * 100)
coverage['xlibfuzzer'].append(DURATIONS[i])
elif fuzzer == 'honggfuzz':
coverage[fuzzer].append(float(data[fuzzer][target]['coverage_percent']))
coverage['xhonggfuzz'].append(DURATIONS[i])
elif fuzzer == 'aflplusplus':
coverage[fuzzer].append(float(data[fuzzer][target]['coverage_percent']))
coverage['xaflplusplus'].append(DURATIONS[i])
except:
print(f'[WARNING] {TARGET} {fuzzer} {target} coverage in result dir {j:04d} could not be retrieved...')
fig.add_trace(go.Box(name='AFL++', y=coverage['aflplusplus'], x=coverage['xaflplusplus'], marker_color='#FF4136'))
fig.add_trace(go.Box(name='Honggfuzz', y=coverage['honggfuzz'], x=coverage['xhonggfuzz'], marker_color='#FF851B'))
fig.add_trace(go.Box(name='libFuzzer', y=coverage['libfuzzer'], x=coverage['xlibfuzzer'], marker_color='#3D9970'))
fig.update_yaxes(type='log')
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Coverage', boxmode='group')
fig.write_image(f'../images/{TARGET}/{TARGET}-coverage.png')
def box_fuzz_dur_coverage_targets():
fig = go.Figure()
coverage = {}
for i in range(len(START)):
for j in range(START[i], STOP[i] + 1):
with open(f'{RESULT_DIR}{j:04d}/coverage_results', 'r') as file:
data = json.load(file)
for fuzzer in data:
for target in data[fuzzer]:
if target not in coverage:
coverage[target] = []
coverage[f'x{target}'] = []
try:
if fuzzer == 'libfuzzer':
total_edges = int(data['aflplusplus'][target]['total_edges'])
coverage[target].append(float(data[fuzzer][target]['stop']['coverage']) / total_edges * 100)
coverage[f'x{target}'].append(DURATIONS[i])
else:
coverage[target].append(float(data[fuzzer][target]['coverage_percent']))
coverage[f'x{target}'].append(DURATIONS[i])
except:
print(f'[WARNING] {TARGET} {fuzzer} {target} coverage in result dir {j:04d} could not be retrieved...')
for target in coverage:
if target[0] != 'x':
fig.add_trace(go.Box(name=target, y=coverage[target], x=coverage[f'x{target}']))
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Coverage', boxmode='group')
fig.write_image(f'../images/{TARGET}/{TARGET}-coverage_per_target.png')
def box_fuzz_dur_cov_seeds():
fig = go.Figure()
coverage = {'aflplusplus': [], 'honggfuzz': [], 'libfuzzer': [], 'xaflplusplus': [], 'xhonggfuzz': [],
'xlibfuzzer': []}
for i in range(len(START)):
for j in range(START[i], STOP[i] + 1):
with open(f'{RESULT_DIR}{j:04d}/coverage_results', 'r') as file:
data = json.load(file)
for fuzzer in data:
for target in data[fuzzer]:
if fuzzer == 'libfuzzer':
coverage[fuzzer].append(float(data[fuzzer][target]['stop']['coverage']) / float(
data[fuzzer][target]['stop']['corpus']))
coverage['xlibfuzzer'].append(DURATIONS[i])
elif fuzzer == 'honggfuzz':
coverage[fuzzer].append(float(data[fuzzer][target]['coverage_percent']) / float(
float(data[fuzzer][target]['stop_corp'])))
coverage['xhonggfuzz'].append(DURATIONS[i])
elif fuzzer == 'aflplusplus':
coverage[fuzzer].append(float(data[fuzzer][target]['coverage_percent']) / float(
float(data[fuzzer][target]['stop_corp'])))
coverage['xaflplusplus'].append(DURATIONS[i])
fig.add_trace(go.Box(name='AFL++', y=coverage['aflplusplus'], x=coverage['xaflplusplus'], marker_color='#FF4136'))
fig.add_trace(go.Box(name='Honggfuzz', y=coverage['honggfuzz'], x=coverage['xhonggfuzz'], marker_color='#FF851B'))
fig.add_trace(go.Box(name='libFuzzer', y=coverage['libfuzzer'], x=coverage['xlibfuzzer'], marker_color='#3D9970'))
fig.update_yaxes(type='log')
fig.update_layout(xaxis_title='Fuzz duration', yaxis_title='Coverage', boxmode='group')
fig.show()
if __name__ == '__main__':
# box_fuzz_dur_cov_seeds()
# libraries = {'libpng': 3, 'libsndfile': 1, 'libtiff': 1, 'libxml2': 6, 'lua': 1, 'openssl': 1, 'php': 1, 'poppler': 3, 'sqlite3': 1}
libraries = {'libsndfile': 9}
for lib in libraries:
TARGET = lib
RESULT_DIR = f'../results/{TARGET}/artificial/'
DURATIONS = ['5m', '10m', '15m', '20m', '30m', '45m', '60m']
ITERATIONS = 5
RUNS = [x + 1 for x in range(len(DURATIONS))]
START = [x for x in range(libraries[lib], libraries[lib] + (len(DURATIONS)-1) * ITERATIONS + 1, ITERATIONS)]
STOP = [x for x in range(libraries[lib] + ITERATIONS - 1, libraries[lib] + (len(DURATIONS)-1) * ITERATIONS + ITERATIONS, ITERATIONS)]
if not os.path.exists('../images'):
os.mkdir('../images')
if not os.path.exists(f'../images/{TARGET}'):
os.mkdir(f'../images/{TARGET}')
box_fuzz_dur_coverage_targets()
box_fuzz_dur_coverage()
box_fuzz_dur_bug_time()
box_fuzz_dur_bugs()
box_fuzz_dur_crashes()
# start = 226
# stop = 306
# coverage_results(start, stop)
# bugs_found(start, stop)
# nr_crashes(start, stop)
| 17,338 | 47.568627 | 142 |
py
|
rgbmcmr
|
rgbmcmr-master/rgbmcmr.py
|
from __future__ import division, print_function
from collections import namedtuple
import numpy as np
from scipy.special import erf, erfc
import emceemr
from astropy import units as u
MINF = -np.inf
class RGBModel(emceemr.Model):
"""
Note if biasfunc is used, the sense is mag_real = mag_measured + bias
"""
param_names = 'tipmag, alphargb, alphaother, fracother'.split(', ')
AutoFuncmags = namedtuple('AutoFuncmags', ['startat', 'endat', 'uncspacing'])
def __init__(self, magdata, magunc=None, priors=None,
uncfunc=None, biasfunc=None, complfunc=None,
funcmags=None):
self.magdata = np.array(magdata)
self.maxdata = np.max(magdata)
self.mindata = np.min(magdata)
self._magunc = magunc
if isinstance(funcmags, self.AutoFuncmags):
self._funcmags = self._auto_funcmags(uncfunc, *funcmags)
else:
self._funcmags = funcmags
self._uncfunc = uncfunc
self._biasfunc = biasfunc
self._complfunc = complfunc
self._validate_lnprob_func()
super(RGBModel, self).__init__(priors)
def _auto_funcmags(self, uncfunc, startat, endat, uncspacing):
fmags = [startat]
while fmags[-1] < endat:
dmag = abs(uncfunc(fmags[-1])*uncspacing)
if dmag==0:
raise ValueError('auto_funcmags got stuck at an unc of 0. Might'
' your uncfunc be non-positive somewhere?')
fmags.append(fmags[-1]+dmag)
if fmags[-1] > endat:
fmags[-1] = endat
return np.array(fmags)
@property
def sorted_magdata(self):
if self._sorted_magdata is None:
self._sorted_magdata = np.sort(self.magdata)
return self._sorted_magdata
@property
def magdata(self):
return self._magdata
@magdata.setter
def magdata(self, value):
self._sorted_magdata = None
self._magdata = value
def _validate_lnprob_func(self):
"""
Checks that the various ways of giving uncertainties or not make sense
"""
if self.magunc is None:
if self.uncfunc is not None:
self._lnprob_func = self._lnprob_uncfuncs
else:
self._lnprob_func = self._lnprob_no_unc
elif self.funcmags is not None:
raise ValueError('Cannot give both uncertainties and the various uncfuncs')
else:
self._lnprob_func = self._lnprob_w_unc
# need to do getstate/setstate b/c _lnprob_func can't be pickled as a method
def __getstate__(self):
state = self.__dict__.copy()
if state['_lnprob_func'] is not None:
state['_lnprob_func'] = self._lnprob_func.im_func.__name__
return state
def __setstate__(self, state):
meth = getattr(self, state['_lnprob_func'])
state['_lnprob_func'] = meth
self.__dict__ = state
def lnprob(self, tipmag, alphargb, alphaother, fracother):
"""
This does *not* sum up the lnprobs - that goes in __call__. Instead it
gives the lnprob per data point
"""
return self._lnprob_func(self.magdata, tipmag, alphargb, alphaother, fracother)
def _lnprob_no_unc(self, magdata, tipmag, alphargb, alphaother, fracother):
dmags = magdata - tipmag
rgbmsk = dmags > 0
lnpall = np.zeros_like(dmags)
lnpall[rgbmsk] = alphargb * dmags[rgbmsk]
lnpall[~rgbmsk] = alphaother * dmags[~rgbmsk] + np.log(fracother)
eterm1 = 1 - np.exp(alphaother*(self.mindata - tipmag))
eterm2 = np.exp(alphargb*(self.maxdata - tipmag)) - 1
lnN = np.log(fracother * eterm1 / alphaother + eterm2 / alphargb)
return lnpall - lnN
def _lnprob_w_unc(self, magdata, tipmag, alphargb, alphaother, fracother):
dmag_upper = self.maxdata - tipmag
dmag_lower = self.mindata - tipmag
return np.log(self._exp_gauss_conv_normed(magdata - tipmag,
alphargb, alphaother,
fracother, self.magunc,
dmag_lower, dmag_upper))
def _lnprob_uncfuncs(self, magdata, tipmag, alphargb, alphaother, fracother, _normalizationint=False):
funcmags = self._funcmags.reshape(1, self._funcmags.size)
if self._uncfunc is None:
raise ValueError('Funcmags given but uncfunc is None')
elif callable(self._uncfunc):
uncs = self._uncfunc(funcmags)
else:
uncs = self._uncfunc
if self._biasfunc is None:
biasedmags = funcmags
elif callable(self._biasfunc):
biasedmags = funcmags - self._biasfunc(funcmags)
else:
biasedmags = funcmags - self._biasfunc.reshape(1, funcmags.size)
if self._complfunc is None:
compl = 1
elif callable(self._complfunc):
compl = self._complfunc(funcmags)
else:
compl = self._complfunc.reshape(1, funcmags.size)
magdata_reshaped = magdata.reshape(magdata.size, 1)
lf = self._lnprob_no_unc(biasedmags, tipmag, alphargb, alphaother, fracother)
uncterm = (2*np.pi)**-0.5 * np.exp(-0.5*((magdata_reshaped - biasedmags)/uncs)**2)/uncs
dataintegrand = compl*uncterm*np.exp(lf)
Idata = np.trapz(y=dataintegrand, x=funcmags, axis=-1)
if _normalizationint:
return Idata
else:
intN = self._lnprob_uncfuncs(self.sorted_magdata,tipmag,
alphargb, alphaother, fracother,
_normalizationint=True)
N = np.trapz(y=intN, x=self.sorted_magdata)
self.normed = intN,funcmags.ravel(), N
return np.log(Idata) - np.log(N)
def plot_lnprob(self, tipmag, alphargb, alphaother, fracother, magrng=100, doplot=True, delog=False, **plotkwargs):
"""
Plots (optionally) and returns arrays suitable for plotting the pdf. If
`magrng` is a scalar, it gives the number of samples over the data
domain. If an array, it's used as the x axis.
"""
from copy import copy
from astropy.utils import isiterable
from matplotlib import pyplot as plt
fakemod = copy(self)
if isiterable(magrng):
fakemod.magdata = np.sort(magrng)
else:
fakemod.magdata = np.linspace(self.mindata, self.maxdata, magrng)
if fakemod.magunc is not None:
sorti = np.argsort(self.magdata)
fakemod.magunc = np.interp(fakemod.magdata, self.magdata[sorti], self.magunc[sorti])
lnpb = fakemod.lnprob(tipmag, alphargb, alphaother, fracother)
if delog:
lnpb = np.exp(lnpb - np.min(lnpb))
if doplot:
plt.plot(fakemod.magdata, lnpb, **plotkwargs)
return fakemod.magdata, lnpb
def plot_data_and_model(self, samplerorparams, perc=50, datakwargs={}, lfkwargs={}):
from astropy.utils import isiterable
from matplotlib import pyplot as plt
if isiterable(samplerorparams):
ps = samplerorparams
else:
sampler = samplerorparams
ps = np.percentile(sampler.flatchain, perc, axis=0)
self.plot_lnprob(*ps, **lfkwargs)
n, edges = np.histogram(self.magdata, bins=datakwargs.pop('bins', 100))
cens = (edges[1:]+edges[:-1])/2
N = np.trapz(x=cens, y=n)
plt.scatter(cens, np.log(n/N), **datakwargs)
plt.ylabel('log(lf/data)')
@staticmethod
def _exp_gauss_conv_normed(x, a, b, F, s, x_lower, x_upper):
# from scipy.integrate import quad
# N = quad(exp_gauss_conv, x_lower, x_upper, args=(a, b, F, np.mean(s)))[0]
# return exp_gauss_conv(x, a, b, F, s)/N
norm_term_a = RGBModel._exp_gauss_conv_int(x_upper, a, s, g=1) - RGBModel._exp_gauss_conv_int(x_lower, a, s, g=1)
norm_term_b = RGBModel._exp_gauss_conv_int(x_upper, b, s, g=-1) - RGBModel._exp_gauss_conv_int(x_lower, b, s, g=-1)
return RGBModel._exp_gauss_conv(x, a, b, F, s)/(norm_term_a + F * norm_term_b)
@staticmethod
def _exp_gauss_conv(x, a, b, F, s):
"""
Convolution of broken power law w/ gaussian.
"""
A = np.exp(a*x+a**2*s**2/2.)
B = np.exp(b*x+b**2*s**2/2.)
ua = (x+a*s**2)*2**-0.5/s
ub = (x+b*s**2)*2**-0.5/s
return (A*(1+erf(ua))+F*B*erfc(ub))
@staticmethod
def _exp_gauss_conv_int(x, ab, s, g=1):
"""
Integral for a *single* term of exp_gauss_conv.
g should be 1/-1
"""
prefactor = np.exp(-ab**2*s**2 / 2.) / ab
term1 = np.exp(ab*(ab*s**2 + x))*(1 + g * erf((ab*s**2 + x)*2**-0.5/s))
term2 = np.exp(ab**2*s**2 / 2.)*g*erf(x * 2**-0.5 / s)
return prefactor*(term1 - term2)
#properties for the alternate uncertainty functions
@property
def funcmags(self):
return self._funcmags
@funcmags.setter
def funcmags(self, value):
oldval = self._funcmags
self._funcmags = value
try:
self._validate_lnprob_func()
except:
self._funcmags = oldval
raise
@property
def magunc(self):
return self._magunc
@magunc.setter
def magunc(self, value):
oldval = self._magunc
self._magunc = value
try:
self._validate_lnprob_func()
except:
self._magunc = oldval
raise
@property
def uncfunc(self):
return self._uncfunc
@uncfunc.setter
def uncfunc(self, value):
oldval = self._uncfunc
self._uncfunc = value
try:
self._validate_lnprob_func()
except:
self._uncfunc = oldval
raise
@property
def biasfunc(self):
return self._biasfunc
@biasfunc.setter
def biasfunc(self, value):
oldval = self._biasfunc
self._biasfunc = value
try:
self._validate_lnprob_func()
except:
self._biasfunc = oldval
raise
@property
def complfunc(self):
return self._complfunc
@complfunc.setter
def complfunc(self, value):
oldval = self._complfunc
self._complfunc = value
try:
self._validate_lnprob_func()
except:
self._complfunc = oldval
raise
class NormalColorModel(emceemr.Model):
param_names = 'colorcen, colorsig, askew'.split(', ')
has_blobs = True
def __init__(self, magdata, tipdistr, colordata, colorunc, nstarsbelow=100,
priors=None):
self.magdata = np.array(magdata)
self.colordata = np.array(colordata)
self.colorunc = None if colorunc is None else np.array(colorunc)
self.tipdistr = np.array(tipdistr)
self._len_tipdistr = self.tipdistr.size
self.nstarsbelow = nstarsbelow
super(NormalColorModel, self).__init__(priors)
def lnprob(self, colorcen, colorsig, askew):
tipmag = self.tipdistr[np.random.randint(self._len_tipdistr)]
sorti = np.argsort(self.magdata)
idxs = sorti[np.in1d(sorti, np.where(self.magdata > tipmag)[0])]
msk = idxs[:self.nstarsbelow]
assert len(self.magdata[msk]) == self.nstarsbelow
assert np.all(self.magdata[msk] > tipmag)
sig = np.hypot(self.colorunc[msk], colorsig)
x = (self.colordata[msk]-colorcen)/sig
lnpnorm = -0.5*(x**2 + np.log(sig))
lnpskew = np.log1p(erf(askew*x*2**-0.5))
return lnpnorm + lnpskew, tipmag
| 11,835 | 33.011494 | 123 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.