source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
|---|---|
print_affinity.c
|
#define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h> // gethostname, getopt
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef __APPLE__
#include <sched.h> // sched_getaffinity
extern void runnable (cpu_set_t *, int *, int *);
void print_affinity_ ()
{
char hnbuf[64];
int thread = 0;
int lo;
int hi;
cpu_set_t coremask;
gethostname (hnbuf, sizeof (hnbuf));
#pragma omp parallel private (thread, coremask, lo, hi)
{
#ifdef _OPENMP
thread = omp_get_thread_num ();
#endif
// Passing zero means use the calling process
sched_getaffinity (0, sizeof (coremask), &coremask);
runnable (&coremask, &lo, &hi);
#pragma omp critical
{
printf ("Thread %d on %s. (Runnable range: lo=%d hi=%d)\n",
thread, hnbuf, lo, hi);
fflush (stdout);
}
}
}
#else
void print_affinity_ ()
{
printf("print_affinity is not supported on Mac OS\n");
}
#endif
|
cpl_test.c
|
/*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include <complex.h>
#include "cpl_test.h"
#include "cpl_init.h"
#include "cpl_errorstate.h"
#include "cpl_memory.h"
#include "cpl_msg.h"
#include "cpl_tools.h"
#include "cpl_stats_impl.h"
#include "cpl_io_fits.h"
#include "cpl_image_io_impl.h"
#include "cpl_image_basic_impl.h"
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <signal.h>
/* Needed for fabs() */
#include <math.h>
#undef CPL_HAVE_TIMES
#ifdef HAVE_SYS_TIMES_H
#include <sys/times.h>
#ifdef HAVE_TIME_H
#include <time.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#define CPL_HAVE_TIMES
#endif
#endif
#endif
#ifdef HAVE_SYS_STAT_H
#include <sys/types.h>
#include <sys/stat.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#endif
#undef CPL_HAVE_CLOCK_GETTIME
#undef CPL_HAVE_GETTIMEOFDAY
#ifdef HAVE_TIME_H
#ifdef HAVE_CLOCK_GETTIME
#include <time.h>
#define CPL_HAVE_CLOCK_GETTIME
#endif
#endif
#ifndef CPL_HAVE_CLOCK_GETTIME
#ifdef HAVE_SYS_TIME_H
#ifdef HAVE_GETTIMEOFDAY
#include <sys/time.h>
#define CPL_HAVE_GETTIMEOFDAY
#endif
#endif
#endif
#ifdef HAVE_SYS_TIME_H
/* Used for gettimeofday() */
#include <sys/time.h>
#endif
/* Needed for CFITSIO_VERSION */
#include <fitsio.h>
#if defined CPL_FFTW_INSTALLED || defined CPL_FFTWF_INSTALLED
/* Needed for fftw_version */
#include <fftw3.h>
#endif
#if defined CPL_WCS_INSTALLED && CPL_WCS_INSTALLED == 1
/* Used for WCSLIB_VERSION */
#include <wcslib.h>
#endif
#ifndef inline
#define inline /* inline */
#endif
/*----------------------------------------------------------------------------*/
/**
* @defgroup cpl_test Unit testing functions
*
* This module provides various functions for unit testing.
*
* @par Synopsis:
* @code
* #include "cpl_test.h"
* @endcode
*/
/*----------------------------------------------------------------------------*/
/**@{*/
/*-----------------------------------------------------------------------------
Private variables
-----------------------------------------------------------------------------*/
static cpl_errorstate cleanstate;
static cpl_size cpl_test_count = 0;
static cpl_size cpl_test_failures = 0;
static const char * cpl_test_report = NULL;
static double cpl_test_time_start;
static double cpl_test_time_one;
static cpl_flops cpl_test_flops_one = 0;
/* O: Uninitialized, 1: Initialized, 2: Deinitialized */
static int cpl_test_state_ = 0;
/*-----------------------------------------------------------------------------
Private function prototypes
-----------------------------------------------------------------------------*/
static void cpl_test_reset(cpl_errorstate);
static void cpl_errorstate_dump_debug(unsigned, unsigned, unsigned);
static void cpl_errorstate_dump_info(unsigned, unsigned, unsigned);
static const char * cpl_test_get_description(void) CPL_ATTR_CONST;
static void cpl_test_one(int, double, cpl_flops, cpl_errorstate, cpl_boolean,
const char *, cpl_boolean, const char *,
const char *, unsigned) CPL_ATTR_NONNULL;
static void cpl_test_dump_status(void);
static char * cpl_test_fits_file(const char *, const char *) CPL_ATTR_NONNULL;
/*-----------------------------------------------------------------------------
Function codes
-----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
/**
@brief Get the process CPU time, when available (from times())
@return The process CPU time in seconds.
@note Will always return 0 if times() is unavailable
Example of usage:
@code
int my_benchmark (void)
{
double cputime, tstop;
const double tstart = cpl_test_get_cputime();
myfunc();
tstop = cpl_test_get_cputime();
cputime = tstop - tstart;
cpl_msg_info(cpl_func, "The call took %g seconds of CPU-time", cputime);
}
@endcode
*/
/*----------------------------------------------------------------------------*/
double cpl_test_get_cputime(void) {
#if defined HAVE_SYS_TIMES_H && defined HAVE_SYSCONF && defined _SC_CLK_TCK
struct tms buf;
(void)times(&buf);
return (double)buf.tms_utime / (double)sysconf(_SC_CLK_TCK);
#else
return 0.0;
#endif
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the process wall-clock time, when available
@return The process wall-clock time in seconds.
@note Will always return 0 if clock_gettime() and gettimeofday()
are unavailable or failing
@see clock_gettime(), gettimeofday()
Example of usage:
@code
int my_benchmark (void)
{
double walltime, tstop;
const double tstart = cpl_test_get_walltime();
myfunc();
tstop = cpl_test_get_walltime();
walltime = tstop - tstart;
cpl_msg_info(cpl_func, "The call took %g seconds of wall-clock time",
walltime);
}
@endcode
*/
/*----------------------------------------------------------------------------*/
double cpl_test_get_walltime(void) {
#ifdef CPL_HAVE_CLOCK_GETTIME
struct timespec buf;
return clock_gettime(CLOCK_REALTIME, &buf) ? 0.0
: (double)buf.tv_sec + 1.0e-9* (double)buf.tv_nsec;
#elif defined CPL_HAVE_GETTIMEOFDAY
struct timeval buf;
return gettimeofday(&buf, 0) ? 0.0
: (double)buf.tv_sec + 1.0e-6 * (double)buf.tv_usec;
#else
return 0.0;
#endif
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Get the FLOPS count.
@return The FLOPS count
@note This function is intended to be used only by the CPL test macros.
*/
/*----------------------------------------------------------------------------*/
inline cpl_flops cpl_test_get_flops(void)
{
return cpl_tools_get_flops();
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the number of CPL tests performed.
@return The test count
@see cpl_test_get_failed()
*/
/*----------------------------------------------------------------------------*/
cpl_size cpl_test_get_tested(void)
{
return cpl_test_count;
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the number of failed CPL tests.
@return The count of failed tests
@see cpl_test_get_tested()
Example of usage:
@code
void my_tester (void)
{
const cpl_size prefailed = cpl_test_get_failed();
cpl_test(mytest());
if (cpl_test_get_failed() > prefailed) {
cpl_msg_info(cpl_func, "The function mytest() failed!");
}
}
@endcode
*/
/*----------------------------------------------------------------------------*/
cpl_size cpl_test_get_failed(void)
{
return cpl_test_failures;
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the amount of storage [bytes] for the CPL object
@param self The CPL object
@return The size in bytes
@note Passing NULL is allowed and will return zero
Example of usage:
@code
int my_benchmark (void)
{
const size_t storage = cpl_test_get_bytes_vector(mydata);
double walltime, tstop;
const double tstart = cpl_test_get_walltime();
myfunc(mydata);
tstop = cpl_test_get_walltime();
walltime = tstop - tstart;
if (walltime > 0.0) {
cpl_msg_info(cpl_func, "Processing rate: %g",
(double)storage/walltime);
}
}
@endcode
*/
/*----------------------------------------------------------------------------*/
size_t cpl_test_get_bytes_vector(const cpl_vector * self)
{
return self == NULL ? 0
: (size_t)cpl_vector_get_size(self) * sizeof(double);
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the amount of storage [bytes] for the CPL object
@param self The CPL object
@return The size in bytes
@note Passing NULL is allowed and will return zero
@see cpl_test_get_bytes_vector
*/
/*----------------------------------------------------------------------------*/
size_t cpl_test_get_bytes_matrix(const cpl_matrix * self)
{
return self == NULL ? 0
: (size_t)cpl_matrix_get_nrow(self)
* (size_t)cpl_matrix_get_ncol(self) * sizeof(double);
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the amount of storage [bytes] for the CPL object
@param self The CPL object
@return The size in bytes
@note Passing NULL is allowed and will return zero
@see cpl_test_get_bytes_vector
*/
/*----------------------------------------------------------------------------*/
size_t cpl_test_get_bytes_image(const cpl_image * self)
{
return self == NULL ? 0
: (size_t)cpl_image_get_size_x(self)
* (size_t)cpl_image_get_size_y(self)
* cpl_type_get_sizeof(cpl_image_get_type(self));
}
/*----------------------------------------------------------------------------*/
/**
@brief Get the amount of storage [bytes] for the CPL object
@param self The CPL object
@return The size in bytes
@note Passing NULL is allowed and will return zero
@see cpl_test_get_bytes_vector
*/
/*----------------------------------------------------------------------------*/
size_t cpl_test_get_bytes_imagelist(const cpl_imagelist * self)
{
return self == NULL ? 0
: (size_t)cpl_imagelist_get_size(self)
* (size_t)cpl_test_get_bytes_image(cpl_imagelist_get_const(self, 0));
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Initialize CPL and unit-test environment
@param filename __FILE__ of unit-test source code for log-file
@param report The email address for the error message
@param default_level Default level for messaging system
@return void
@see cpl_test_init()
@note This function should only be called by cpl_test_init()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_init_macro(const char * filename, const char * report,
cpl_msg_severity default_level)
{
/* ISO/IEC 9899:1999 (E) 7.5 3:
The value of errno is zero at program startup, but is never set to
zero by any library function. The value of errno may be set to
nonzero by a library function call whether or not there is an error,
provided the use of errno is not documented in the
description of the function in this International Standard.
*/
/* As such it is safe to read errno at this stage,
but a non-zero value has no significance. */
const int errnopre = errno;
int errnoini;
assert(report != NULL);
assert(filename != NULL);
#ifdef _OPENMP
#pragma omp master
#endif
{
cpl_test_report = report;
errno = 0;
/* make sure we are checking for memory leaks */
if (CPL_XMEMORY_MODE == 0) {
setenv("CPL_MEMORY_MODE", "1", 0);
}
cpl_init(CPL_INIT_DEFAULT);
cpl_test_time_start = cpl_test_get_walltime();
cpl_test_time_one = cpl_test_time_start;
errnoini = errno;
errno = 0;
/* Needed on alphaev56 */
if (signal(SIGFPE, SIG_IGN) == SIG_ERR) {
cpl_msg_warning(cpl_func, "Could not install new signal handler "
"(SIG_IGN) for SIGFPE");
}
cleanstate = cpl_errorstate_get();
cpl_msg_set_level(default_level);
cpl_msg_set_level_from_env();
if (filename != NULL) {
const char * dotpos = NULL;
char * logfile = NULL;
/* Create a new string, where the extension is replaced with .log */
/* First trim away any directory names before the filename. Check
for both POSIX and Windows directory separator characters. This
means no files can have these characters in their name, which is
a reasonable requirement for portable software anyway. */
const char * fslashpos = strrchr(filename, '\\');
const char * bslashpos = strrchr(filename, '/');
const char * slashpos = bslashpos > fslashpos ? bslashpos : fslashpos;
if (slashpos != NULL) filename = slashpos+1;
/* Check the special case of having filename = "..". In that case
set filename to use "." instead, so that we end up with having
".log" as the log file name. */
if (strcmp(filename, "..") == 0) filename = ".";
/* Append .log in case there is no extension. In case there is an
extension shorter than three characters, the new string will
also be (more than) long enough for the .log extension.
Take into account possible directory separator characters. */
dotpos = strrchr(filename, '.');
logfile = cpl_sprintf("%s.log", filename);
if (dotpos != NULL) {
/* Need to write new extension after the last '.' */
(void)strcpy(logfile + (1 + dotpos - filename), "log");
}
cpl_msg_set_log_name(logfile);
if (cpl_error_get_code()) {
/* The log-file name could not be set */
cpl_msg_warning(cpl_func, "Ignoring failed setting of "
"log-file:");
cpl_errorstate_dump(cleanstate, CPL_FALSE,
cpl_errorstate_dump_one_warning);
cpl_test_reset(cleanstate);
}
/* Drop .log */
logfile[strlen(logfile)-strlen(".log")] = '\0';
cpl_msg_set_domain(logfile);
cpl_free(logfile);
}
cpl_msg_set_log_level(CPL_MSG_DEBUG);
if (errnopre != 0) {
/* May be useful for debugging - but see the above errno comment */
/* See also DFS04285 */
cpl_msg_debug(cpl_func, "%s() was called with errno=%d: %s (Unless "
"you are debugging code prior to the cpl_init() call "
"you can ignore this message)",
cpl_func, errnopre, strerror(errnopre));
}
if (errnoini != 0) {
/* May be useful for debugging - but see the above errno comment */
cpl_msg_debug(cpl_func, "cpl_init() set errno=%d: %s (Unless "
"you are debugging cpl_init() you can ignore "
"this message)", errnoini, strerror(errnoini));
}
}
#ifdef _OPENMP
/* No thread can start testing before the master is ready */
#pragma omp barrier
#endif
if (cpl_error_get_code() != CPL_ERROR_NONE) {
cpl_errorstate_dump_one(1, 1, 1); /* Dump the most recent error */
assert(cpl_error_get_code() == CPL_ERROR_NONE);
abort();
}
cpl_msg_debug(cpl_func, "sizeof(cpl_size): %u", (unsigned)sizeof(cpl_size));
#ifdef OFF_T
cpl_msg_debug(cpl_func, "sizeof(OFF_T)=%u", (unsigned)sizeof(OFF_T));
#endif
cpl_msg_debug(cpl_func, "%s", cpl_get_description(CPL_DESCRIPTION_DEFAULT));
if (errno != 0) {
cpl_msg_warning(cpl_func, "%s() set errno=%d: %s", cpl_func, errno,
strerror(errno));
errno = 0;
}
cpl_test_state_ = 1;
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test a given boolean expression
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param expr The integer expression to evaluate
@param fail_on_zero Fail iff the expression is zero
@param expr_txt The integer expression to evaluate as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test()
@note This function should only be called via cpl_test()
@note CPL_FALSE of the boolean is a failure, CPL_TRUE is not
*/
/*----------------------------------------------------------------------------*/
void cpl_test_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre, cpl_size expression,
cpl_boolean fail_on_zero, const char * expr_txt,
const char * function, const char * file, unsigned line)
{
char * message = cpl_sprintf(fail_on_zero ? "(%s) = %" CPL_SIZE_FORMAT :
"(%s) = %" CPL_SIZE_FORMAT " <=> 0",
expr_txt, expression);
const cpl_boolean bool = fail_on_zero
? (expression ? CPL_TRUE : CPL_FALSE)
: (expression ? CPL_FALSE : CPL_TRUE);
cpl_test_one(errnopre, twallpre, flopspre, statepre, bool, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if a pointer is NULL
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param pointer The pointer to check, side-effects are allowed
@param pointer_string The pointer as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_null
@note This function should only be called from the macro cpl_test_null()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_null_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const void * pointer, const char * pointer_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %p == NULL", pointer_string,
pointer);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
pointer == NULL ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if a pointer is non-NULL
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param pointer The pointer to check, side-effects are allowed
@param pointer_string The pointer as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_nonnull
@note This function should only be called from the macro cpl_test_nonnull()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_nonnull_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const void * pointer, const char * pointer_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %p != NULL", pointer_string,
pointer);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
pointer != NULL ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two integer expressions are equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_eq
@note This function should only be called from the macro cpl_test_eq()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_eq_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
cpl_size first, const char * first_string,
cpl_size second, const char * second_string,
const char * function, const char * file, unsigned line)
{
char * message = cpl_sprintf("(%s) = %" CPL_SIZE_FORMAT "; (%s) = %"
CPL_SIZE_FORMAT, first_string, first,
second_string, second);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first == second ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two integer expressions are not equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_noneq
@note This function should only be called from the macro cpl_test_noneq()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_noneq_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
cpl_size first, const char * first_string,
cpl_size second, const char * second_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %" CPL_SIZE_FORMAT "; (%s) = %"
CPL_SIZE_FORMAT,
first_string, first,
second_string, second);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first != second ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two pointer expressions are equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_eq_ptr
@note This function should only be called from the macro cpl_test_eq_ptr()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_eq_ptr_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const void * first, const char * first_string,
const void * second, const char * second_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %p; (%s) = %p",
first_string, first,
second_string, second);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first == second ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two pointer expressions are not equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_noneq_ptr
@note This function should only be called from the macro cpl_test_noneq_ptr()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_noneq_ptr_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const void * first, const char * first_string,
const void * second, const char * second_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %p; (%s) = %p",
first_string, first,
second_string, second);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first != second ? CPL_TRUE : CPL_FALSE,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two strings are equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first string or NULL of the comparison
@param first_string The first value as a string
@param second The second string or NULL of the comparison
@param second_string The second value as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_eq_string()
@note This function should only be called from cpl_test_eq_string()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_eq_string_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const char * first, const char * first_string,
const char * second, const char * second_string,
const char * function,
const char * file, unsigned line)
{
char * fsquote = first == NULL ? NULL : cpl_sprintf("'%s'", first);
char * ssquote = second == NULL ? NULL : cpl_sprintf("'%s'", second);
const char * fquote = fsquote == NULL ? "NULL" : fsquote;
const char * squote = ssquote == NULL ? "NULL" : ssquote;
char * message = cpl_sprintf("%s = %s; %s = %s", first_string, fquote,
second_string, squote);
cpl_free(fsquote);
cpl_free(ssquote);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first != NULL && second != NULL && strcmp(first, second) == 0,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two strings are not equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first string or NULL of the comparison
@param first_string The first value as a string
@param second The second string or NULL of the comparison
@param second_string The second value as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_noneq_string()
@note This function should only be called from cpl_test_noneq_string()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_noneq_string_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const char * first, const char * first_string,
const char * second,
const char * second_string,
const char * function,
const char * file, unsigned line)
{
char * fsquote = first == NULL ? NULL : cpl_sprintf("'%s'", first);
char * ssquote = second == NULL ? NULL : cpl_sprintf("'%s'", second);
const char * fquote = fsquote == NULL ? "NULL" : fsquote;
const char * squote = ssquote == NULL ? "NULL" : ssquote;
char * message = cpl_sprintf("%s = %s; %s = %s", first_string, fquote,
second_string, squote);
cpl_free(fsquote);
cpl_free(ssquote);
cpl_test_one(errnopre, twallpre, flopspre, statepre,
first != NULL && second != NULL && strcmp(first, second) != 0,
message, CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if a file is valid FITS using an external verification utility
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param filename The file to verify
@param filename_string The file to verify as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_fits()
@note This function should only be called from cpl_test_fits()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_fits_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const char * filename, const char * filename_string,
const char * function, const char * file,
unsigned line)
{
const char * checker = getenv(CPL_TEST_FITS);
char * message;
cpl_boolean expression;
if (filename == NULL) {
message = cpl_sprintf(CPL_TEST_FITS " unusable on NULL-file "
"%s", filename_string);
expression = CPL_FALSE; /* Unable to do an actual test */
} else if (checker == NULL) {
message = cpl_test_fits_file(filename, filename_string);
if (message != NULL) {
expression = CPL_FALSE; /* File cannot be FITS */
} else {
/* The previous FITS validation is so primitive that its
success is not reported */
message = cpl_sprintf(CPL_TEST_FITS " undefined for file %s='%s', "
"try: export " CPL_TEST_FITS "=fitsverify",
filename_string, filename);
expression = CPL_TRUE; /* Unable to do an actual test */
}
} else {
const char * redir = cpl_msg_get_level() < CPL_MSG_WARNING ? ""
: (cpl_msg_get_level() < CPL_MSG_ERROR ? " > /dev/null"
: " > /dev/null 2>&1");
char * cmd = cpl_sprintf("%s %s %s", checker, filename, redir);
message = cpl_sprintf(CPL_TEST_FITS " on file %s: %s",
filename_string, cmd);
expression = system(cmd) == 0 ? CPL_TRUE : CPL_FALSE;
cpl_free(cmd);
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL masks are equal
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first mask or NULL of the comparison
@param first_mask The first value as a string
@param second The second mask or NULL of the comparison
@param second_mask The second value as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_eq_mask()
@note This function should only be called from cpl_test_eq_mask()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_eq_mask_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const cpl_mask * first, const char * first_string,
const cpl_mask * second, const char * second_string,
const char * function, const char * file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
const cpl_size nx1 = cpl_mask_get_size_x(first);
const cpl_size ny1 = cpl_mask_get_size_y(first);
const cpl_size nx2 = cpl_mask_get_size_x(second);
const cpl_size ny2 = cpl_mask_get_size_y(second);
cpl_boolean expression;
char * message;
if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s input error",
first_string, second_string);
} else if (nx1 != nx2 || ny1 != ny2) {
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s incompatible input, nx: %"
CPL_SIZE_FORMAT " <=> %" CPL_SIZE_FORMAT ", ny: %"
CPL_SIZE_FORMAT " <=> %" CPL_SIZE_FORMAT,
first_string, second_string,
nx1, nx2, ny1, ny2);
} else if (memcmp(cpl_mask_get_data_const(first),
cpl_mask_get_data_const(second), (size_t)(nx1 * ny1))) {
/* The test has failed, now spend extra time to report why */
cpl_size i;
cpl_size k = 0;
cpl_size n = 0;
const cpl_binary * pbpm1 = cpl_mask_get_data_const(first);
const cpl_binary * pbpm2 = cpl_mask_get_data_const(second);
for (i = 0; i < nx1 * ny1; i++) {
if (pbpm1[i] != pbpm2[i]) {
k = i;
n++;
}
}
assert( n != 0 );
expression = CPL_FALSE;
message
= cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
") = %u <=> %u = %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ") (%" CPL_SIZE_FORMAT " of %"
CPL_SIZE_FORMAT " x %" CPL_SIZE_FORMAT " "
"differ(s))", first_string, 1+k%nx1, 1+k/nx1,
(unsigned)pbpm1[k], (unsigned)pbpm2[k], second_string,
1+k%nx2, 1+k/nx2, n, nx1, ny1);
} else {
expression = CPL_TRUE;
message = cpl_sprintf("%s == %s", first_string, second_string);
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_errorstate_set(mystate);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Evaluate A <= B and update an internal counter if it is not true
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param value The double-precision number to test
@param tolerance The double-precision upper limit to compare against
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_leq()
@note This function should only be called via cpl_test_leq_macro()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_leq_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
double value, const char * value_string,
double tolerance, const char * tolerance_string,
const char * function, const char * file, unsigned line)
{
const cpl_boolean expression
= (value <= tolerance) ? CPL_TRUE : CPL_FALSE;
char * message = cpl_sprintf("%s = %g <= %g = %s", value_string,
value, tolerance, tolerance_string);
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Evaluate A < B and update an internal counter if it is not true
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param value The double-precision number to test
@param tolerance The double-precision upper limit to compare against
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_lt()
@note This function should only be called via cpl_test_leq_macro()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_lt_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
double value, const char * value_string,
double tolerance, const char * tolerance_string,
const char * function, const char * file, unsigned line)
{
const cpl_boolean expression
= (value < tolerance) ? CPL_TRUE : CPL_FALSE;
char * message = cpl_sprintf("%s = %g < %g = %s", value_string,
value, tolerance, tolerance_string);
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two numerical expressions are
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_abs()
@note This function should only be called from the macro cpl_test_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_abs_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
double first, const char *first_string,
double second, const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file, unsigned line)
{
const cpl_boolean expression
= (fabs(first - second) <= tolerance) ? CPL_TRUE : CPL_FALSE;
char *message = cpl_sprintf("|%s - %s| = |%g - %g| = |%g| <= %g = %s",
first_string, second_string, first,
second, first - second, tolerance,
tolerance_string);
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two complex expressions are
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_abs_complex()
@note This function should only be called from the macro
cpl_test_abs_complex()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_abs_complex_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
double complex first,
const char *first_string,
double complex second,
const char *second_string,
double tolerance,
const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
const cpl_boolean expression
= (cabs(first - second) <= tolerance) ? CPL_TRUE : CPL_FALSE;
char *message =
cpl_sprintf("|%s - %s| = |(%g%+gi) - (%g%+gi)| = "
"|%g%+gi| = %g <= %g = %s",
first_string, second_string, creal(first),
cimag(first), creal(second), cimag(second),
creal(first - second), cimag(first - second),
cabs(first - second),
tolerance, tolerance_string);
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two numerical expressions are
within a given relative tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_rel()
@note This function should only be called from the macro cpl_test_rel()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_rel_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
double first, const char *first_string,
double second, const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file, unsigned line)
{
char * message = NULL; /* Avoid (false) uninit warnings */
cpl_boolean expression;
if (tolerance < 0.0) {
expression = CPL_FALSE;
message = cpl_sprintf("%s = %g; %s = %g. Negative tolerance %s = %g",
first_string, first,
second_string, second,
tolerance_string, tolerance);
} else if (first == second) {
/* Not needed. Used only for prettier messaging */
expression = CPL_TRUE;
message = cpl_sprintf("%s = %g = %s. (Tolerance %s = %g)",
first_string, first, second_string,
tolerance_string, tolerance);
} else if (first == 0.0) {
/* Not needed. Used only for prettier messaging */
expression = CPL_FALSE;
message = cpl_sprintf("%s = zero; %s = non-zero (%g). (Tolerance "
"%s = %g)", first_string, second_string,
second, tolerance_string, tolerance);
} else if (second == 0.0) {
/* Not needed. Used only for prettier messaging */
expression = CPL_FALSE;
message = cpl_sprintf("%s = non-zero (%g); %s = zero. (Tolerance "
"%s = %g)", first_string, first, second_string,
tolerance_string, tolerance);
} else if (fabs(first) < fabs(second)) {
expression = fabs(first - second) <= tolerance * fabs(first)
? CPL_TRUE : CPL_FALSE;
message = cpl_sprintf("|%s - %s|/|%s| = |%g - %g|/|%g| = |%g|/|%g|"
" <= %g = %s", first_string, second_string,
first_string, first, second, first,
first - second, first, tolerance,
tolerance_string);
} else {
/* assert(fabs(second) < fabs(first)) */
expression = fabs(first - second) <= tolerance * fabs(second)
? CPL_TRUE : CPL_FALSE;
message = cpl_sprintf("|%s - %s|/|%s| = |%g - %g|/|%g| = |%g|/|%g|"
" <= %g = %s", first_string, second_string,
second_string, first, second, second,
first - second, second, tolerance,
tolerance_string);
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL vectors are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first vector in the comparison
@param first_string The first vector as a string
@param second The second vector of identical size in the comparison
@param second_string The second vector as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_vector_abs()
@note This function should only be called from the macro cpl_test_vector_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_vector_abs_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const cpl_vector * first,
const char *first_string,
const cpl_vector * second,
const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
cpl_vector * diff = cpl_vector_duplicate(first);
cpl_boolean expression;
char * message;
(void)cpl_vector_subtract(diff, second);
if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
} else {
const double * pdiff = cpl_vector_get_data_const(diff);
double difval = pdiff[0];
const cpl_size n = cpl_vector_get_size(diff);
cpl_size pos = 0;
cpl_size i;
for (i = 1; i < n; i++) {
if (fabs(pdiff[i]) > fabs(difval)) {
pos = i;
difval = pdiff[i];
}
}
if (cpl_errorstate_is_equal(mystate)) {
const double val1 = cpl_vector_get(first, pos);
const double val2 = cpl_vector_get(second, pos);
expression = (fabs(difval) <= tolerance) ? CPL_TRUE : CPL_FALSE;
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ") - %s(%"
CPL_SIZE_FORMAT ")| = "
"|%g - %g| = |%g| <= %g = %s",
first_string, pos,
second_string, pos,
val1, val2, difval, tolerance,
tolerance_string);
} else {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
}
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_errorstate_set(mystate);
cpl_vector_delete(diff);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL matrices are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first matrix in the comparison
@param first_string The first matrix as a string
@param second The second matrix of identical size in the comparison
@param second_string The second matrix as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_matrix_abs()
@note This function should only be called from the macro cpl_test_matrix_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_matrix_abs_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const cpl_matrix * first,
const char *first_string,
const cpl_matrix * second,
const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
cpl_matrix * diff = cpl_matrix_duplicate(first);
cpl_boolean expression;
char * message;
(void)cpl_matrix_subtract(diff, second);
if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
} else {
const double * pdiff = cpl_matrix_get_data_const(diff);
double difval = pdiff[0];
const cpl_size ncol = cpl_matrix_get_ncol(diff);
const cpl_size n = cpl_matrix_get_nrow(diff) * ncol;
cpl_size pos = 0;
cpl_size i;
for (i = 1; i < n; i++) {
if (fabs(pdiff[i]) > fabs(difval)) {
pos = i;
difval = pdiff[i];
}
}
if (cpl_errorstate_is_equal(mystate)) {
const cpl_size irow = pos / ncol;
const cpl_size icol = pos % ncol;
const double val1 = cpl_matrix_get(first, irow, icol);
const double val2 = cpl_matrix_get(second, irow, icol);
expression = (fabs(difval) <= tolerance) ? CPL_TRUE : CPL_FALSE;
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
") - %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ")| = |%g - %g| = "
"|%g| <= %g = %s",
first_string, irow, icol,
second_string, irow, icol,
val1, val2, difval, tolerance,
tolerance_string);
} else {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
}
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
cpl_errorstate_set(mystate);
cpl_matrix_delete(diff);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL numerical arrays are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first array in the comparison
@param first_string The first array as a string
@param second The second array of identical size in the comparison
@param second_string The second array as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_array_abs()
@note This function should only be called from the macro cpl_test_array_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_array_abs_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const cpl_array * first,
const char *first_string,
const cpl_array * second,
const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
/* Modified from cpl_test_image_abs_macro() */
cpl_errorstate mystate = cpl_errorstate_get();
const cpl_type type1 = cpl_array_get_type(first);
const cpl_type type2 = cpl_array_get_type(second);
#ifdef CPL_SIZE_FORMAT
const cpl_size nbad1 = cpl_array_count_invalid(first);
const cpl_size nbad2 = cpl_array_count_invalid(second);
const cpl_size nx = cpl_array_get_size(first);
#else
const int nbad1 = cpl_array_count_invalid(first);
const int nbad2 = cpl_array_count_invalid(second);
const int nx = cpl_array_get_size(first);
#endif
cpl_array * diff = cpl_array_duplicate(first);
cpl_boolean expression;
char * message;
(void)cpl_array_subtract(diff, second);
if (tolerance < 0.0) {
expression = CPL_FALSE;
message = cpl_sprintf("array1=%s; array2=%s. Negative tolerance %s = "
"%g", first_string, second_string,
tolerance_string, tolerance);
} else if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
#ifdef CPL_SIZE_FORMAT
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ", %s) <=> %s(%"
CPL_SIZE_FORMAT ", %s) (tol=%s) input error:",
first_string, nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string);
#else
message = cpl_sprintf("%s(%d, %s) <=> %s(%d, %s) (tol=%s) input error:",
first_string, nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string);
#endif
} else if (nbad1 == nbad2 && nbad1 == nx) {
expression = CPL_TRUE;
#ifdef CPL_SIZE_FORMAT
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ", %s) <=> %s(%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All elements "
"are bad",
first_string, nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string);
#else
message = cpl_sprintf("%s(%d, %s) <=> %s(%d, %s) (tol=%s) "
"All elements are bad", first_string,
nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string);
#endif
} else if (cpl_array_count_invalid(diff) == nx) {
expression = CPL_FALSE;
#ifdef CPL_SIZE_FORMAT
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ", %s) <=> %s(%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All elements "
"are bad in the first (%" CPL_SIZE_FORMAT
") or second (%" CPL_SIZE_FORMAT "d) array",
first_string, nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string,
nbad1, nbad2);
#else
message = cpl_sprintf("%s(%d, %s) <=> %s(%d, %s) (tol=%s) All elements "
"are bad in the first (%d) or second (%d) array",
first_string, nx, cpl_type_get_name(type1),
second_string, cpl_array_get_size(second),
cpl_type_get_name(type2), tolerance_string,
nbad1, nbad2);
#endif
} else {
const double maxdif = cpl_array_get_max(diff);
const double mindif = cpl_array_get_min(diff);
const cpl_boolean is_pos = (maxdif >= -mindif) ? CPL_TRUE : CPL_FALSE;
const double difval = is_pos ? maxdif : mindif;
#ifdef CPL_SIZE_FORMAT
cpl_size posx;
#else
int posx;
#endif
const cpl_error_code error = (is_pos ? cpl_array_get_maxpos
: cpl_array_get_minpos) (diff, &posx);
int is_bad1;
int is_bad2;
const double val1
= (type1 == CPL_TYPE_INT ? (double)cpl_array_get_int(first, posx,
&is_bad1)
: (type1 == CPL_TYPE_FLOAT
? (double)cpl_array_get_float(first, posx, &is_bad1)
: cpl_array_get_double(first, posx, &is_bad1)));
const double val2
= (type2 == CPL_TYPE_INT
? (double)cpl_array_get_int(second, posx, &is_bad2)
: (type2 == CPL_TYPE_FLOAT
? (double)cpl_array_get_float(second, posx, &is_bad2)
: cpl_array_get_double(second, posx, &is_bad2)));
if (!error && cpl_errorstate_is_equal(mystate)) {
const char * rejstr1 = is_bad1 ? " invalid" : " valid";
const char * rejstr2 = is_bad2 ? " invalid" : " valid";
expression = (fabs(difval) <= tolerance) ? CPL_TRUE : CPL_FALSE;
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%s, %s) - %s(%"
CPL_SIZE_FORMAT ",%s, %s)| = "
"|%g - %g| = |%g| <= %g = %s",
first_string, posx, rejstr1,
cpl_type_get_name(type1), second_string,
posx, rejstr2, cpl_type_get_name(type2),
val1, val2, difval, tolerance,
tolerance_string);
} else {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
}
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
if (!expression && cpl_errorstate_is_equal(mystate) &&
cpl_msg_get_level() <= CPL_MSG_ERROR) {
cpl_msg_warning(cpl_func, "Structure of the compared arrays:");
cpl_array_dump_structure(first, stderr);
cpl_array_dump_structure(second, stderr);
}
cpl_errorstate_set(mystate);
cpl_array_delete(diff);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL images are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first image in the comparison
@param first_string The first image as a string
@param second The second image of identical size in the comparison
@param second_string The second image as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_image_abs()
@note This function should only be called from the macro cpl_test_image_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_image_abs_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const cpl_image * first,
const char *first_string,
const cpl_image * second,
const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
const char * stype1 = cpl_type_get_name(cpl_image_get_type(first));
const char * stype2 = cpl_type_get_name(cpl_image_get_type(second));
const cpl_size nbad1 = cpl_image_count_rejected(first);
const cpl_size nbad2 = cpl_image_count_rejected(second);
const cpl_size nx = cpl_image_get_size_x(first);
const cpl_size ny = cpl_image_get_size_y(first);
cpl_image * cdiff = cpl_image_subtract_create(first, second);
cpl_image * diff = (cpl_image_get_type(cdiff) & CPL_TYPE_COMPLEX) ?
cpl_image_extract_mod(cdiff) : cdiff;
cpl_boolean expression;
char * message;
if (tolerance < 0.0) {
expression = CPL_FALSE;
message = cpl_sprintf("image1=%s; image2=%s. Negative tolerance %s = "
"%g", first_string, second_string,
tolerance_string, tolerance);
} else if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) input error:",
first_string, nx, ny, stype1, second_string,
cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
} else if (nbad1 == nbad2 && nbad1 == nx * ny) {
expression = CPL_TRUE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All pixels "
"are bad",
first_string, nx, ny, stype1, second_string,
cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
} else if (cpl_image_count_rejected(diff) == nx * ny) {
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All pixels "
"are bad in the first (%" CPL_SIZE_FORMAT
") or second (%" CPL_SIZE_FORMAT ") image",
first_string, nx, ny, stype1,
second_string, cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string, nbad1, nbad2);
} else {
cpl_stats istats;
const cpl_error_code icode =
cpl_stats_fill_from_image(&istats, diff, CPL_STATS_MIN
| CPL_STATS_MAX
| CPL_STATS_MINPOS
| CPL_STATS_MAXPOS);
const cpl_stats * stats = icode ? NULL : &istats;
const double maxdif = cpl_stats_get_max(stats);
const double mindif = cpl_stats_get_min(stats);
const cpl_boolean is_pos = (maxdif >= -mindif) ? CPL_TRUE : CPL_FALSE;
const double difval = is_pos ? maxdif : mindif;
const cpl_size posx
= is_pos ? cpl_stats_get_max_x(stats) : cpl_stats_get_min_x(stats);
const cpl_size posy
= is_pos ? cpl_stats_get_max_y(stats) : cpl_stats_get_min_y(stats);
int is_bad1;
int is_bad2;
if (cpl_errorstate_is_equal(mystate)) {
expression = (fabs(difval) <= tolerance) ? CPL_TRUE : CPL_FALSE;
if (cpl_image_get_type(cdiff) & CPL_TYPE_COMPLEX) {
const double complex val1 =
cpl_image_get_complex(first, posx, posy, &is_bad1);
const double complex val2 =
(cpl_image_get_type(second) & CPL_TYPE_COMPLEX)
? cpl_image_get_complex(second, posx, posy, &is_bad2)
: cpl_image_get(second, posx, posy, &is_bad2);
const char * rejstr1 = is_bad1 ? " bad" : " not bad";
const char * rejstr2 = is_bad2 ? " bad" : " not bad";
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT
",%s, %s) - %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ",%s, %s)| = "
"|%g - %g + I (%g - %g) | = |%g| "
"<= %g = %s",
first_string, posx, posy, rejstr1, stype1,
second_string, posx, posy, rejstr2,
stype2, creal(val1), creal(val2),
cimag(val1), cimag(val2), difval,
tolerance, tolerance_string);
} else {
const double val1 = cpl_image_get(first, posx, posy, &is_bad1);
const double val2 = cpl_image_get(second, posx, posy, &is_bad2);
const char * rejstr1 = is_bad1 ? " bad" : " not bad";
const char * rejstr2 = is_bad2 ? " bad" : " not bad";
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT
",%s, %s) - %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ",%s, %s)| = "
"|%g - %g| = |%g| <= %g = %s",
first_string, posx, posy, rejstr1, stype1,
second_string, posx, posy, rejstr2,
stype2, val1, val2, difval, tolerance,
tolerance_string);
}
if (!expression && cpl_msg_get_level() <= CPL_MSG_ERROR)
cpl_stats_dump(stats, CPL_STATS_MIN
| CPL_STATS_MAX
| CPL_STATS_MINPOS
| CPL_STATS_MAXPOS, stderr);
} else {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) input error:",
first_string, nx, ny, stype1,
second_string, cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
}
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
if (!expression && cpl_errorstate_is_equal(mystate) &&
cpl_msg_get_level() <= CPL_MSG_ERROR) {
cpl_msg_warning(cpl_func, "Structure of the compared images:");
cpl_image_dump_structure(first, stderr);
cpl_image_dump_structure(second, stderr);
}
cpl_errorstate_set(mystate);
cpl_image_delete(cdiff);
if (diff != cdiff) cpl_image_delete(diff);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL images are identical
within a given (relative) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first image in the comparison
@param first_string The first image as a string
@param second The second image of identical size in the comparison
@param second_string The second image as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_image_rel()
@note This function should only be called from the macro cpl_test_image_rel()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_image_rel_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
const cpl_image * first,
const char *first_string,
const cpl_image * second,
const char *second_string,
double tolerance, const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
const char * stype1 = cpl_type_get_name(cpl_image_get_type(first));
const char * stype2 = cpl_type_get_name(cpl_image_get_type(second));
const cpl_size nbad1 = cpl_image_count_rejected(first);
const cpl_size nbad2 = cpl_image_count_rejected(second);
const cpl_size nx = cpl_image_get_size_x(first);
const cpl_size ny = cpl_image_get_size_y(first);
cpl_image * cdiff = cpl_image_subtract_create(first, second);
cpl_image * diff = (cpl_image_get_type(cdiff) & CPL_TYPE_COMPLEX) ?
cpl_image_extract_mod(cdiff) : cdiff;
cpl_boolean expression;
char * message;
if (tolerance < 0.0) {
expression = CPL_FALSE;
message = cpl_sprintf("image1=%s; image2=%s. Negative tolerance %s = "
"%g", first_string, second_string,
tolerance_string, tolerance);
} else if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) input error:",
first_string, nx, ny, stype1, second_string,
cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
} else if (nbad1 == nbad2 && nbad1 == nx * ny) {
expression = CPL_TRUE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All pixels "
"are bad",
first_string, nx, ny, stype1, second_string,
cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
} else if (cpl_image_count_rejected(diff) == nx * ny) {
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) All pixels "
"are bad in the first (%" CPL_SIZE_FORMAT
") or second (%" CPL_SIZE_FORMAT ") image",
first_string, nx, ny, stype1,
second_string, cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string, nbad1, nbad2);
} else {
/* Create real-valued images with the absolute values */
cpl_image * imabs1 = (cpl_image_get_type(first) & CPL_TYPE_COMPLEX) ?
cpl_image_extract_mod(first) : cpl_image_abs_create(first);
cpl_image * imabs2 = (cpl_image_get_type(second) & CPL_TYPE_COMPLEX) ?
cpl_image_extract_mod(second) : cpl_image_abs_create(second);
cpl_image * immin = cpl_image_min_create(imabs1, imabs2);
cpl_error_code error = cpl_image_multiply_scalar(immin, tolerance);
cpl_image * posit = cpl_image_subtract_create(immin, diff);
cpl_stats istats;
const cpl_error_code icode =
cpl_stats_fill_from_image(&istats, posit, CPL_STATS_MIN
| CPL_STATS_MINPOS);
const cpl_stats * stats = icode ? NULL : &istats;
const double difval = cpl_stats_get_min(stats);
const cpl_size posx = cpl_stats_get_min_x(stats);
const cpl_size posy = cpl_stats_get_min_y(stats);
int is_bad1;
int is_bad2;
if (!error && cpl_errorstate_is_equal(mystate)) {
const double mval = cpl_image_get(immin, posx, posy, &is_bad2);
expression = difval >= 0.0 ? CPL_TRUE : CPL_FALSE;
if (cpl_image_get_type(cdiff) & CPL_TYPE_COMPLEX) {
const double complex val1 =
cpl_image_get_complex(first, posx, posy, &is_bad1);
const double complex val2 =
(cpl_image_get_type(second) & CPL_TYPE_COMPLEX)
? cpl_image_get_complex(second, posx, posy, &is_bad2)
: cpl_image_get(second, posx, posy, &is_bad2);
const char * rejstr1 = is_bad1 ? " bad" : " not bad";
const char * rejstr2 = is_bad2 ? " bad" : " not bad";
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT
",%s, %s) - %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ",%s, %s)| = "
"|%g - %g + I (%g - %g) | = |%g + I %g| "
"<= %g",
first_string, posx, posy, rejstr1, stype1,
second_string, posx, posy, rejstr2,
stype2, creal(val1), creal(val2),
cimag(val1), cimag(val2),
creal(val1) - creal(val2),
cimag(val1) - cimag(val2), mval);
} else {
const double val1 = cpl_image_get(first, posx, posy, &is_bad1);
const double val2 = cpl_image_get(second, posx, posy, &is_bad2);
const char * rejstr1 = is_bad1 ? " bad" : " not bad";
const char * rejstr2 = is_bad2 ? " bad" : " not bad";
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT
",%s, %s) - %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ",%s, %s)| = "
"|%g - %g| = |%g| <= %g",
first_string, posx, posy, rejstr1, stype1,
second_string, posx, posy, rejstr2,
stype2, val1, val2, val1 - val2, mval);
}
if (!expression && cpl_msg_get_level() <= CPL_MSG_ERROR)
cpl_stats_dump(stats, CPL_STATS_MIN
| CPL_STATS_MINPOS, stderr);
} else {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s(%" CPL_SIZE_FORMAT ",%" CPL_SIZE_FORMAT
", %s) <=> %s(%" CPL_SIZE_FORMAT ",%"
CPL_SIZE_FORMAT ", %s) (tol=%s) input error:",
first_string, nx, ny, stype1,
second_string, cpl_image_get_size_x(second),
cpl_image_get_size_y(second), stype2,
tolerance_string);
}
cpl_image_delete(imabs1);
cpl_image_delete(imabs2);
cpl_image_delete(immin);
cpl_image_delete(posit);
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
if (!expression && cpl_errorstate_is_equal(mystate)) {
cpl_msg_warning(cpl_func, "Structure of the compared images:");
if (cpl_msg_get_level() <= CPL_MSG_ERROR) {
cpl_image_dump_structure(first, stderr);
cpl_image_dump_structure(second, stderr);
}
}
cpl_errorstate_set(mystate);
cpl_image_delete(cdiff);
if (diff != cdiff) cpl_image_delete(diff);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL imagelists are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first imagelist in the comparison
@param first_string The first imagelist as a string
@param second The second list of identical size in the comparison
@param second_string The second imagelist as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_imagelist_abs()
@note This function should only be called from cpl_test_imagelist_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_imagelist_abs_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const cpl_imagelist * first,
const char *first_string,
const cpl_imagelist * second,
const char *second_string,
double tolerance,
const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
const cpl_size sz1 = cpl_imagelist_get_size(first);
const cpl_size sz2 = cpl_imagelist_get_size(second);
cpl_boolean expression;
char * message = NULL;
if (!cpl_errorstate_is_equal(mystate)) {
cpl_error_set(cpl_func, cpl_error_get_code());
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
} else if (sz1 != sz2) {
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) imagelist list sizes differ: "
"%" CPL_SIZE_FORMAT " <=> %" CPL_SIZE_FORMAT,
first_string, second_string,
tolerance_string, sz1, sz2);
} else {
const cpl_size failures = cpl_test_failures;
cpl_size i;
message = cpl_sprintf("|%s(%" CPL_SIZE_FORMAT ") - %s(%" CPL_SIZE_FORMAT
")| <= %g = %s", first_string, sz1,
second_string, sz2,
tolerance, tolerance_string);
for (i = 0; i < sz1; i++) {
const cpl_image * img1 = cpl_imagelist_get_const(first, i);
const cpl_image * img2 = cpl_imagelist_get_const(second, i);
char * img1string = cpl_sprintf("image %" CPL_SIZE_FORMAT
" in first list", 1+i);
char * img2string = cpl_sprintf("image %" CPL_SIZE_FORMAT
" in second list", 1+i);
cpl_test_image_abs_macro(errnopre, twallpre, flopspre, statepre,
img1, img1string, img2, img2string,
tolerance, tolerance_string, function,
file, line);
cpl_free(img1string);
cpl_free(img2string);
}
expression = failures == cpl_test_failures ? CPL_TRUE : CPL_FALSE;
cpl_test_failures = failures; /* Count as only one test ! */
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
if (!expression && cpl_errorstate_is_equal(mystate)) {
cpl_msg_warning(cpl_func, "Structure of the compared imagelists:");
if (cpl_msg_get_level() <= CPL_MSG_ERROR) {
cpl_imagelist_dump_structure(first, stderr);
cpl_imagelist_dump_structure(second, stderr);
}
}
cpl_errorstate_set(mystate);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL polynomials are identical
within a given (absolute) tolerance
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first polynomial in the comparison
@param first_string The first polynomial as a string
@param second The second polynomial in the comparison
@param second_string The second polynomial as a string
@param tolerance A non-negative tolerance
@param tolerance_string The tolerance as a string
@param function function name
@param file filename
@param line line number
@see cpl_test_polynomial_abs()
@note This function should only be called from the macro
cpl_test_polynomial_abs()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_polynomial_abs_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const cpl_polynomial * first,
const char *first_string,
const cpl_polynomial * second,
const char *second_string,
double tolerance,
const char *tolerance_string,
const char *function, const char *file,
unsigned line)
{
cpl_errorstate mystate = cpl_errorstate_get();
const int retval = cpl_polynomial_compare(first, second, tolerance);
cpl_boolean expression = retval ? CPL_FALSE : CPL_TRUE;
char * message;
if (cpl_errorstate_is_equal(mystate)) {
const cpl_size dim1 = cpl_polynomial_get_dimension(first);
const cpl_size dim2 = cpl_polynomial_get_dimension(second);
cpl_error_set(cpl_func, cpl_error_get_code());
message = cpl_sprintf("(dimension %d <=> %d. intol degree=%d) |%s - %s| "
"<= %g = %s", (int)dim1, (int)dim2,
retval ? retval - 1 : 0,
first_string, second_string,
tolerance, tolerance_string);
} else {
expression = CPL_FALSE;
message = cpl_sprintf("%s <=> %s (tol=%s) input error:",
first_string, second_string,
tolerance_string);
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, expression, message,
CPL_FALSE, function, file, line);
if (retval > 0) {
cpl_polynomial_dump(first, stderr);
cpl_polynomial_dump(second, stderr);
}
cpl_errorstate_set(mystate);
cpl_free(message);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test and reset the CPL error code
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param errorstate The expected CPL error code (incl. CPL_ERROR_NONE)
@param errorstate_string The CPL error code as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_errorstate
@note This function should only be called from the macro
cpl_test_errorstate()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_errorstate_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
cpl_errorstate errorstate,
const char * errorstate_string,
const char * function, const char * file,
unsigned line)
{
/* FIXME: Improve message */
char * message = cpl_sprintf("%s <=> %d (%s)",
errorstate_string,
cpl_error_get_code(),
cpl_error_get_message());
cpl_test_one(errnopre, twallpre, flopspre, statepre,
cpl_errorstate_is_equal(errorstate) ? CPL_TRUE : CPL_FALSE,
message, CPL_TRUE, function, file, line);
cpl_free(message);
cpl_test_reset(errorstate);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test and reset the CPL error code
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param error The expected CPL error code (incl. CPL_ERROR_NONE)
@param error_string The CPL error code as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_error
@note This function should only be called from the macro cpl_test_error()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_error_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
cpl_error_code error, const char * error_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %d (%s) <=> %d (%s)",
error_string, error,
cpl_error_get_message_default(error),
cpl_error_get_code(),
cpl_error_get_message());
cpl_test_one(errnopre, twallpre, flopspre, statepre,
cpl_error_get_code() == error ? CPL_TRUE : CPL_FALSE,
message, CPL_TRUE, function, file, line);
cpl_free(message);
cpl_test_reset(cleanstate);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if two CPL error expressions are equal, also to the CPL error code
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param first The first value in the comparison
@param first_string The first value as a string
@param second The second value in the comparison
@param second_string The second value as a string
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_error
@note This function should only be called from the macro cpl_test_eq_error()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_eq_error_macro(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre,
cpl_error_code first, const char * first_string,
cpl_error_code second, const char * second_string,
const char * function, const char * file,
unsigned line)
{
char * message = cpl_sprintf("(%s) = %d (%s) <=> (%s) = %d (%s) "
"<=> %d (%s)", first_string, first,
cpl_error_get_message_default(first),
second_string, second,
cpl_error_get_message_default(second),
cpl_error_get_code(),
cpl_error_get_message_default
(cpl_error_get_code()));
cpl_test_one(errnopre, twallpre, flopspre, statepre,
(first == second && cpl_error_get_code() == first)
? CPL_TRUE : CPL_FALSE,
message, CPL_TRUE, function, file, line);
cpl_free(message);
cpl_test_reset(cleanstate);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test if the memory system is empty
@param errnopre errno prior to expression evaluation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param function cpl_func
@param file __FILE__
@param line __LINE__
@see cpl_test_memory_is_empty
@note This function should only be called from the macro
cpl_test_memory_is_empty()
*/
/*----------------------------------------------------------------------------*/
void cpl_test_memory_is_empty_macro(int errnopre, double twallpre,
cpl_flops flopspre, cpl_errorstate statepre,
const char * function, const char * file,
unsigned line)
{
const char * message;
cpl_boolean ok;
if (cpl_memory_is_empty() == -1) {
message = "CPL memory system is empty (not testable)";
ok = CPL_TRUE;
} else {
message = "CPL memory system is empty";
ok = cpl_memory_is_empty() == 0 ? CPL_FALSE : CPL_TRUE;
}
cpl_test_one(errnopre, twallpre, flopspre, statepre, ok, message, CPL_FALSE,
function, file, line);
if (!ok) {
cpl_msg_indent_more();
cpl_memory_dump();
cpl_msg_indent_less();
}
return;
}
/*----------------------------------------------------------------------------*/
/**
@brief Finalize CPL and unit-testing environment and report any failures
@param nfail The number of failures counted apart from cpl_test() et al.
@return @em EXIT_SUCCESS iff the CPL errorstate is clean
@note This function should be used for the final return from a unit test
@see cpl_test_init()
nfail should normally be zero, but may be set to a positive number when it
is necessary to ensure a failure.
nfail should only be negative in the unit test of the unit-test functions
themselves.
Example of usage:
@code
int main (void)
{
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
cpl_test(myfunc(&p));
cpl_test(p != NULL);
return cpl_test_end(0);
}
@endcode
*/
/*----------------------------------------------------------------------------*/
int cpl_test_end(cpl_size nfail)
{
const int errnopre = errno;
const cpl_flops nflops = cpl_tools_get_flops();
cpl_boolean ok = CPL_TRUE;
const cpl_size mfail = nfail + (cpl_size)cpl_test_failures;
const double cpl_test_elap = cpl_test_get_walltime() - cpl_test_time_start;
#if defined HAVE_SYS_TIMES_H && defined _SC_CLK_TCK && defined HAVE_SYSCONF
struct tms buf;
const clock_t clocks = times(&buf);
const double cputime =(double)buf.tms_utime
/ (double)sysconf(_SC_CLK_TCK);
const double systime =(double)buf.tms_stime
/ (double)sysconf(_SC_CLK_TCK);
const double chcputime =(double)buf.tms_cutime
/ (double)sysconf(_SC_CLK_TCK);
const double chsystime =(double)buf.tms_cstime
/ (double)sysconf(_SC_CLK_TCK);
errno = 0;
cpl_msg_debug(cpl_func, "Sizeof(clock_t): %u", (unsigned)sizeof(clocks));
cpl_msg_debug(cpl_func, "sysconf(_SC_CLK_TCK): %u",
(unsigned)sysconf(_SC_CLK_TCK));
cpl_msg_info(cpl_func, "User time to test [s]: %g", cputime);
cpl_msg_info(cpl_func, "System time to test [s]: %g", systime);
cpl_msg_debug(cpl_func, "Child user time to test [s]: %g", chcputime);
cpl_msg_debug(cpl_func, "Child system time to test [s]: %g", chsystime);
#else
errno = 0;
#endif
if (cpl_test_state_ == 0) {
cpl_msg_error(cpl_func, "Missing a previous call to cpl_test_init()");
ok = CPL_FALSE;
} else if (cpl_test_state_ == 2) {
cpl_msg_error(cpl_func, "Repeated call to cpl_test_end()");
ok = CPL_FALSE;
}
/* Need to close files here, to deallocate */
cpl_test_zero(cpl_io_fits_end());
if (cpl_test_elap > 0.0) {
cpl_msg_info(cpl_func, "Actual time to test [s]: %g",
cpl_test_elap);
cpl_msg_info(cpl_func, "The computational speed during this test "
"[MFLOP/s]: %g", 1e-6*(double)nflops/cpl_test_elap);
} else {
cpl_msg_info(cpl_func, "Number of MFLOPs in this test: %g",
1e-6*(double)nflops);
}
if (errnopre != 0) {
cpl_msg_warning(cpl_func, "%s() was called with errno=%d: %s",
cpl_func, errnopre, strerror(errnopre));
}
/* Make sure that the failure is written */
if (cpl_msg_get_level() == CPL_MSG_OFF) cpl_msg_set_level(CPL_MSG_ERROR);
if (cpl_error_get_code() != CPL_ERROR_NONE) {
ok = CPL_FALSE;
cpl_msg_error(cpl_func, "The CPL errorstate was set by the unit "
"test(s)");
cpl_msg_indent_more();
cpl_errorstate_dump(cleanstate, CPL_FALSE, NULL);
cpl_msg_indent_less();
}
if (mfail > 0) {
ok = CPL_FALSE;
cpl_msg_error(cpl_func, "%" CPL_SIZE_FORMAT " of %" CPL_SIZE_FORMAT
" test(s) failed", mfail, cpl_test_count);
} else if (mfail < 0) {
ok = CPL_FALSE;
/* This special case is only foreseen to be reached by
the unit test of the CPL unit test module */
cpl_msg_error(cpl_func, "%" CPL_SIZE_FORMAT " of %" CPL_SIZE_FORMAT
" test(s) failed, %" CPL_SIZE_FORMAT " less than the "
"expected %" CPL_SIZE_FORMAT " failure(s)",
cpl_test_failures, cpl_test_count, -mfail, -nfail);
} else {
cpl_msg_info(cpl_func, "All %" CPL_SIZE_FORMAT " test(s) succeeded",
cpl_test_count);
}
if (!cpl_memory_is_empty()) {
ok = CPL_FALSE;
cpl_msg_error(cpl_func, "Memory leak detected:");
cpl_msg_indent_more();
cpl_memory_dump();
cpl_msg_indent_less();
} else if (cpl_msg_get_level() <= CPL_MSG_DEBUG) {
cpl_memory_dump();
}
if (!ok) {
cpl_msg_error(cpl_func, "This failure may indicate a bug in the tested "
"code");
cpl_msg_error(cpl_func, "You can contribute to the improvement of the "
"software by emailing the logfile '%s' and the configure "
"logfile 'config.log' to %s", cpl_msg_get_log_name(),
cpl_test_report ? cpl_test_report : PACKAGE_BUGREPORT);
cpl_msg_error(cpl_func, "System specifics:\n%s",
cpl_test_get_description());
}
cpl_test_dump_status();
if (errno != 0) {
cpl_msg_warning(cpl_func, "%s() set errno=%d: %s", cpl_func, errno,
strerror(errno));
errno = 0;
}
cpl_test_state_ = 2;
cpl_end();
return ok ? EXIT_SUCCESS : EXIT_FAILURE;
}
/**@}*/
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Dump the CPL errorstate and reset it
@param self The errorstate to reset to
@return void
*/
/*----------------------------------------------------------------------------*/
static void cpl_test_reset(cpl_errorstate self)
{
if (!cpl_errorstate_is_equal(self)) {
cpl_errorstate_dump(self, CPL_FALSE, cpl_errorstate_dump_debug);
cpl_errorstate_set(self);
}
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test an expression and update an internal counter if it fails
@param errnopre errno prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param flopspre FLOP count prior to expression evaluation
@param twallpre Wall clock time prior to expression calculation
@param statepre CPL errorstate prior to expression evaluation
@param expression The expression to test (CPL_FALSE means failure)
@param message The text message associated with the expression
@param function function name
@param file filename
@param line line number
The CPL FLOP count, the CPL errorstate and errno may have changed prior to
or during to the expression evaluation.
Success or failure,
any CPL errorstate change prior to expression evaluation,
any CPL errorstate change during expression evaluation,
any errno change prior to expression evaluation,
any errno change during expression evaluation.
*/
/*----------------------------------------------------------------------------*/
static void cpl_test_one(int errnopre, double twallpre, cpl_flops flopspre,
cpl_errorstate statepre, cpl_boolean expression,
const char *message, cpl_boolean expect_error,
const char *function, const char *file, unsigned line)
{
const int myerrno = errno; /* Local copy, in case errno changes in here */
char * errnopre_txt = errnopre == 0 ? NULL :
cpl_sprintf(" Prior to this test errno=%d: %s.", errnopre,
strerror(errnopre));
char * myerrno_txt = myerrno == errnopre ? NULL :
cpl_sprintf(" This test set errno=%d: %s.", myerrno,
strerror(myerrno));
const char * errnopre_msg = errnopre_txt == NULL ? "" : errnopre_txt;
const char * myerrno_msg = myerrno_txt == NULL ? "" : myerrno_txt;
const char * error_msg = cpl_errorstate_is_equal(cleanstate) ? "" :
(expect_error ?
(cpl_errorstate_is_equal(statepre) ? ""
: "CPL error(s) set during this test.") :
(cpl_errorstate_is_equal(statepre) ?
" CPL error(s) set prior to this test." :
(statepre == cleanstate ? " CPL error(s) set during this test." :
" CPL error(s) set prior to and during this test.")));
char * flopprev_txt = NULL;
char * floptest_txt = NULL;
assert(message != NULL);
assert(function != NULL);
assert(file != NULL);
errno = 0;
#ifdef _OPENMP
#pragma omp atomic
#endif
cpl_test_count++;
#ifdef _OPENMP
#pragma omp master
#endif
{
/* If two cpl_tests would be permitted to enter concurrently here,
then the reported FLOP rate would be meaningless */
const double cpl_test_time_now = cpl_test_get_walltime();
const double cpl_test_time_prev = cpl_test_time_one;
const cpl_flops cpl_test_flops_now = cpl_tools_get_flops();
const cpl_flops cpl_test_flops_prev = cpl_test_flops_one;
cpl_test_time_one = cpl_test_time_now;
cpl_test_flops_one = cpl_test_flops_now;
if (flopspre > cpl_test_flops_prev) {
const double cpl_test_time_between = twallpre - cpl_test_time_prev;
const cpl_flops cpl_test_flops_between = flopspre
- cpl_test_flops_prev;
if (cpl_test_time_between > 0.0) {
flopprev_txt = cpl_sprintf(" (%g FLOPs after the previous "
"test and prior to this one at "
"[MFLOP/s]: %g).",
(double)cpl_test_flops_between,
1e-6*(double)cpl_test_flops_between
/cpl_test_time_between);
} else {
flopprev_txt = cpl_sprintf(" (%g FLOPs after the previous test "
"and prior to this one).",
(double)cpl_test_flops_between);
}
}
if (cpl_test_flops_now > flopspre) {
const double cpl_test_time_during = cpl_test_time_now - twallpre;
const cpl_flops cpl_test_flops_during = cpl_test_flops_now
- flopspre;
if (cpl_test_time_during > 0.0) {
floptest_txt = cpl_sprintf(" (%g FLOPs during this test at "
"[MFLOP/s]: %g).",
(double)cpl_test_flops_during,
1e-6*(double)cpl_test_flops_during
/cpl_test_time_during);
} else {
floptest_txt = cpl_sprintf(" (%g FLOPs during this test).",
(double)cpl_test_flops_during);
}
}
}
if (flopprev_txt == NULL) flopprev_txt = cpl_strdup("");
if (floptest_txt == NULL) floptest_txt = cpl_strdup("");
if (cpl_test_state_ == 0) {
cpl_msg_error(cpl_func, "Missing a previous call to cpl_test_init(): "
"Failure regardless of test");
#ifdef _OPENMP
#pragma omp atomic
#endif
cpl_test_failures++;
} else if (cpl_test_state_ == 2) {
cpl_msg_error(cpl_func, "Test after call to cpl_test_end(): "
"Failure regardless of test");
#ifdef _OPENMP
#pragma omp atomic
#endif
cpl_test_failures++;
} else if (expression) {
cpl_boolean has_error = error_msg[0] ? CPL_TRUE : CPL_FALSE;
(has_error ? cpl_msg_info : cpl_msg_debug)
(function, "Test %" CPL_SIZE_FORMAT " OK at %s:%u: %s.%s%s%s%s%s",
cpl_test_count, file, line, message,
error_msg, errnopre_msg, myerrno_msg, flopprev_txt, floptest_txt);
cpl_errorstate_dump(cleanstate, CPL_FALSE, has_error
? cpl_errorstate_dump_info
: cpl_errorstate_dump_debug);
} else {
cpl_msg_error(function, "Test %" CPL_SIZE_FORMAT " failed at %s:%u: "
"%s.%s%s%s%s%s", cpl_test_count, file, line, message,
error_msg, errnopre_msg, myerrno_msg, flopprev_txt,
floptest_txt);
cpl_errorstate_dump(cleanstate, CPL_FALSE, NULL);
#ifdef _OPENMP
#pragma omp atomic
#endif
cpl_test_failures++;
}
cpl_free(errnopre_txt);
cpl_free(myerrno_txt);
cpl_free(flopprev_txt);
cpl_free(floptest_txt);
if (errno != 0) {
cpl_msg_debug(cpl_func, "%s() set errno=%d: %s", cpl_func, errno,
strerror(errno));
errno = 0;
}
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Dump a single CPL error at debug messaging level
@param self The number of the current error to be dumped
@param first The number of the first error to be dumped
@param last The number of the last error to be dumped
@return void
@see cpl_errorstate_dump_one
*/
/*----------------------------------------------------------------------------*/
static void cpl_errorstate_dump_debug(unsigned self, unsigned first,
unsigned last)
{
const cpl_boolean is_reverse = first > last ? CPL_TRUE : CPL_FALSE;
const unsigned newest = is_reverse ? first : last;
const unsigned oldest = is_reverse ? last : first;
const char * revmsg = is_reverse ? " in reverse order" : "";
assert( oldest <= self );
assert( newest >= self );
if (newest == 0) {
cpl_msg_debug(cpl_func, "No error(s) to dump");
assert( oldest == 0);
} else {
assert( oldest > 0);
assert( newest >= oldest);
if (self == first) {
if (oldest == 1) {
cpl_msg_debug(cpl_func, "Dumping all %u error(s)%s:", newest,
revmsg);
} else {
cpl_msg_debug(cpl_func, "Dumping the %u most recent error(s) "
"out of a total of %u errors%s:",
newest - oldest + 1, newest, revmsg);
}
cpl_msg_indent_more();
}
cpl_msg_debug(cpl_func, "[%u/%u] '%s' (%u) at %s", self, newest,
cpl_error_get_message(), cpl_error_get_code(),
cpl_error_get_where());
if (self == last) cpl_msg_indent_less();
}
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Dump a single CPL error at debug messaging level
@param self The number of the current error to be dumped
@param first The number of the first error to be dumped
@param last The number of the last error to be dumped
@return void
@see cpl_errorstate_dump_debug
*/
/*----------------------------------------------------------------------------*/
static void cpl_errorstate_dump_info(unsigned self, unsigned first,
unsigned last)
{
const cpl_boolean is_reverse = first > last ? CPL_TRUE : CPL_FALSE;
const unsigned newest = is_reverse ? first : last;
const unsigned oldest = is_reverse ? last : first;
const char * revmsg = is_reverse ? " in reverse order" : "";
assert( oldest <= self );
assert( newest >= self );
if (newest == 0) {
cpl_msg_info(cpl_func, "No error(s) to dump");
assert( oldest == 0);
} else {
assert( oldest > 0);
assert( newest >= oldest);
if (self == first) {
if (oldest == 1) {
cpl_msg_info(cpl_func, "Dumping all %u error(s)%s:", newest,
revmsg);
} else {
cpl_msg_info(cpl_func, "Dumping the %u most recent error(s) "
"out of a total of %u errors%s:",
newest - oldest + 1, newest, revmsg);
}
cpl_msg_indent_more();
}
cpl_msg_info(cpl_func, "[%u/%u] '%s' (%u) at %s", self, newest,
cpl_error_get_message(), cpl_error_get_code(),
cpl_error_get_where());
if (self == last) cpl_msg_indent_less();
}
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief A string useful in error-reporting
@return Pointer to a string literal useful in error-reporting
*/
/*----------------------------------------------------------------------------*/
static const char * cpl_test_get_description(void)
{
return "CPL version: " PACKAGE_VERSION
#if defined CPL_SIZE_BITS && CPL_SIZE_BITS == 32
" (32-bit cpl_size)"
#else
" (64-bit cpl_size)"
#endif
"\n"
#ifdef CFITSIO_VERSION
"CFITSIO version: " CPL_STRINGIFY(CFITSIO_VERSION) "\n"
#elif defined _FITSIO_H
"CFITSIO version is less than 3.0\n"
#endif
#if defined WCSLIB_VERSION
"WCSLIB version: " CPL_STRINGIFY(WCSLIB_VERSION) "\n"
#elif defined CPL_WCS_INSTALLED && CPL_WCS_INSTALLED == 1
"WCSLIB available\n"
#else
"WCSLIB unavailable\n"
#endif
#if defined CPL_FFTW_INSTALLED && defined CPL_FFTWF_INSTALLED
#if defined CPL_FFTW_VERSION && defined CPL_FFTWF_VERSION
"FFTW (normal precision) version: " CPL_FFTW_VERSION "\n"
"FFTW (single precision) version: " CPL_FFTWF_VERSION "\n"
#else
"FFTW (normal and single precision) available\n"
#endif
#elif defined CPL_FFTW_INSTALLED
#if defined CPL_FFTW_VERSION
"FFTW (normal precision) version: " CPL_FFTW_VERSION "\n"
"FFTW (single precision) unavailable\n"
#else
"FFTW (normal precision) available\n"
"FFTW (single precision) unavailable\n"
#endif
#elif defined CPL_FFTWF_INSTALLED
#if defined CPL_FFTWF_VERSION
"FFTW (normal precision) unavailable\n"
"FFTW (single precision) version: " CPL_FFTWF_VERSION "\n"
#else
"FFTW (normal precision) unavailable\n"
"FFTW (single precision) available\n"
#endif
#else
"FFTW unavailable\n"
#endif
#ifdef CPL_ADD_FLOPS
"CPL FLOP counting is available\n"
#else
"CPL FLOP counting is unavailable, enable with -DCPL_ADD_FLOPS\n"
#endif
#ifdef _OPENMP
CPL_XSTRINGIFY(_OPENMP) ": " CPL_STRINGIFY(_OPENMP) "\n"
#endif
#ifdef SIZEOF_SIZE_T
"SIZEOF_SIZE_T is defined as " CPL_STRINGIFY(SIZEOF_SIZE_T) "\n"
#else
"SIZEOF_SIZE_T is not defined\n"
#endif
#ifdef OFF_T
"OFF_T is defined as " CPL_STRINGIFY(OFF_T) "\n"
#else
"OFF_T is not defined\n"
#endif
#if defined WORDS_BIGENDIAN && WORDS_BIGENDIAN == 1
"This platform is big-endian\n"
#else
"This platform is not big-endian\n"
#endif
#ifdef __DATE__
"Compile date: " __DATE__ "\n"
#endif
#ifdef __TIME__
"Compile time: " __TIME__ "\n"
#endif
#ifdef __STDC__
CPL_XSTRINGIFY(__STDC__) ": " CPL_STRINGIFY(__STDC__) "\n"
#endif
#ifdef __STDC_VERSION__
CPL_XSTRINGIFY(__STDC_VERSION__) ": "
CPL_STRINGIFY(__STDC_VERSION__) "\n"
#endif
#ifdef __STDC_HOSTED__
CPL_XSTRINGIFY(__STDC_HOSTED__) ": " CPL_STRINGIFY(__STDC_HOSTED__) "\n"
#endif
#ifdef __STDC_IEC_559__
CPL_XSTRINGIFY(__STDC_IEC_559__) ": "
CPL_STRINGIFY(__STDC_IEC_559__) "\n"
#endif
#ifdef __STDC_IEC_559_COMPLEX__
CPL_XSTRINGIFY(__STDC_IEC_559_COMPLEX__) ": "
CPL_STRINGIFY(__STDC_IEC_559_COMPLEX__) "\n"
#endif
#ifdef __STRICT_ANSI__
/* gcc and Sun Studio 12.1 supports this */
CPL_XSTRINGIFY(__STRICT_ANSI__) ": " CPL_STRINGIFY(__STRICT_ANSI__) "\n"
#endif
#ifdef __GNUC__
"gcc version (major number): " CPL_STRINGIFY(__GNUC__) "\n"
#ifdef __GNUC_MINOR__
"gcc version (minor number): " CPL_STRINGIFY(__GNUC_MINOR__) "\n"
#endif
#ifdef __GNUC_PATCHLEVEL__
"gcc version (patch level): " CPL_STRINGIFY(__GNUC_PATCHLEVEL__) "\n"
#endif
#ifdef __VERSION__
"Compiler version: " __VERSION__ "\n"
#endif
#ifdef __LP64__
CPL_XSTRINGIFY(__LP64__) ": " CPL_STRINGIFY(__LP64__) "\n"
#endif
#ifdef __PIC__
CPL_XSTRINGIFY(__PIC__) ": " CPL_STRINGIFY(__PIC__) "\n"
#endif
#ifdef __OPTIMIZE__
CPL_XSTRINGIFY(__OPTIMIZE__) ": " CPL_STRINGIFY(__OPTIMIZE__) "\n"
#endif
#ifdef __TIMESTAMP__
"Last modification of " __FILE__ ": " __TIMESTAMP__ "\n"
#endif
#endif
;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Dump various process information
*/
/*----------------------------------------------------------------------------*/
static void cpl_test_dump_status(void)
{
#if defined HAVE_GETPID && defined CPL_TEST_DUMP_STATUS
const pid_t pid = getpid();
char * file = cpl_sprintf("/proc/%u/status", (const unsigned)pid);
FILE * stream = fopen(file, "r");
if (stream != NULL) {
char line[CPL_MAX_MSG_LENGTH];
while (fgets(line, CPL_MAX_MSG_LENGTH, stream) != NULL) {
/* Ignore newline */
char * retpos = memchr(line, '\n', CPL_MAX_MSG_LENGTH);
if (retpos != NULL) *retpos = 0;
cpl_msg_debug(cpl_func, "%s", line);
}
fclose(stream);
}
cpl_free(file);
#endif
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Some simple FITS validation
@param filename The filename
@param filename_string The filename as a string
@note No input validation !
*/
/*----------------------------------------------------------------------------*/
static char * cpl_test_fits_file(const char * filename,
const char * filename_string)
{
char * self = NULL;
#ifdef HAVE_SYS_STAT_H
struct stat buf;
const int error = stat(filename, &buf);
if (error) {
self = cpl_sprintf("%s => %s stat() returned %d: %s",
filename_string, filename, error, strerror(errno));
} else {
const off_t size = buf.st_size;
if (size == 0) {
self = cpl_sprintf("%s => %s has zero size", filename_string,
filename);
} else {
const off_t rem = size % 2880;
if (rem != 0) {
self = cpl_sprintf("%s => %s has illegal size=%luB with %uB "
"in excess of the 2880B-blocks",
filename_string, filename,
(long unsigned)size, (unsigned)rem);
}
}
}
#endif
return self;
}
|
GB_wait.c
|
//------------------------------------------------------------------------------
// GB_wait: finish all pending computations on a single matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLS: GB_builder
// The matrix A has zombies and/or pending tuples placed there by
// GrB_setElement, GrB_*assign, or GB_mxm. Zombies must now be deleted, and
// pending tuples must now be assembled together and added into the matrix.
// The indices in A might also be jumbled; if so, they are sorted now.
// When the function returns, and all pending tuples and zombies have been
// deleted. This is true even the function fails due to lack of memory (in
// that case, the matrix is cleared as well).
// If A is hypersparse, the time taken is at most O(nnz(A) + t log t), where t
// is the number of pending tuples in A, and nnz(A) includes both zombies and
// live entries. There is no O(m) or O(n) time component, if A is m-by-n.
// If the number of non-empty vectors of A grows too large, then A can be
// converted to non-hypersparse.
// If A is non-hypersparse, then O(n) is added in the worst case, to prune
// zombies and to update the vector pointers for A.
// If A->nvec_nonempty is unknown (-1) it is computed.
// If the method is successful, it does an OpenMP flush just before returning.
#define GB_FREE_ALL \
{ \
GB_phbix_free (A) ; \
GB_Matrix_free (&T) ; \
GB_Matrix_free (&S) ; \
GB_Matrix_free (&A1) ; \
}
#include "GB_select.h"
#include "GB_add.h"
#include "GB_Pending.h"
#include "GB_build.h"
#include "GB_jappend.h"
GB_PUBLIC
GrB_Info GB_wait // finish all pending computations
(
GrB_Matrix A, // matrix with pending computations
const char *name, // name of the matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info = GrB_SUCCESS ;
struct GB_Matrix_opaque T_header, A1_header, S_header ;
GrB_Matrix T = NULL, A1 = NULL, S = NULL ;
ASSERT_MATRIX_OK (A, "A to wait", GB_FLIP (GB0)) ;
if (GB_IS_FULL (A) || GB_IS_BITMAP (A))
{
// full and bitmap matrices never have any pending work
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (A->nvec_nonempty >= 0) ;
// ensure the matrix is written to memory
#pragma omp flush
return (GrB_SUCCESS) ;
}
// only sparse and hypersparse matrices can have pending work
ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// get the zombie and pending count, and burble if work needs to be done
//--------------------------------------------------------------------------
int64_t nzombies = A->nzombies ;
int64_t npending = GB_Pending_n (A) ;
const bool A_iso = A->iso ;
if (nzombies > 0 || npending > 0 || A->jumbled || A->nvec_nonempty < 0)
{
GB_BURBLE_MATRIX (A, "(%swait:%s " GBd " %s, " GBd " pending%s%s) ",
A_iso ? "iso " : "", name, nzombies,
(nzombies == 1) ? "zombie" : "zombies", npending,
A->jumbled ? ", jumbled" : "",
A->nvec_nonempty < 0 ? ", nvec" : "") ;
}
//--------------------------------------------------------------------------
// determine the max # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// check if only A->nvec_nonempty is needed
//--------------------------------------------------------------------------
if (npending == 0 && nzombies == 0 && !A->jumbled)
{
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = GB_nvec_nonempty (A, Context) ;
}
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// check if A only needs to be unjumbled
//--------------------------------------------------------------------------
if (npending == 0 && nzombies == 0)
{
// A is not conformed, so the sparsity structure of A is not modified.
// That is, if A has no pending tuples and no zombies, but is just
// jumbled, then it stays sparse or hypersparse.
GB_OK (GB_unjumble (A, Context)) ;
ASSERT (GB_IMPLIES (info == GrB_SUCCESS, A->nvec_nonempty >= 0)) ;
return (info) ;
}
//--------------------------------------------------------------------------
// assemble the pending tuples into T
//--------------------------------------------------------------------------
int64_t anz_orig = GB_nnz (A) ;
int64_t asize = A->type->size ;
int64_t tnz = 0 ;
if (npending > 0)
{
//----------------------------------------------------------------------
// construct a new hypersparse matrix T with just the pending tuples
//----------------------------------------------------------------------
// T has the same type as A->type, which can differ from the type of
// the pending tuples, A->Pending->type. The Pending->op can be NULL
// (an implicit SECOND function), or it can be any accum operator. The
// z=accum(x,y) operator can have any types, and it does not have to be
// associative. T is constructed as iso if A is iso.
GB_void *S_input = (A_iso) ? ((GB_void *) A->x) : NULL ;
GrB_Type stype = (A_iso) ? A->type : A->Pending->type ;
GB_CLEAR_STATIC_HEADER (T, &T_header) ;
info = GB_builder (
T, // create T using a static header
A->type, // T->type = A->type
A->vlen, // T->vlen = A->vlen
A->vdim, // T->vdim = A->vdim
A->is_csc, // T->is_csc = A->is_csc
&(A->Pending->i), // iwork_handle, becomes T->i on output
&(A->Pending->i_size),
&(A->Pending->j), // jwork_handle, free on output
&(A->Pending->j_size),
&(A->Pending->x), // Swork_handle, free on output
&(A->Pending->x_size),
A->Pending->sorted, // tuples may or may not be sorted
false, // there might be duplicates; look for them
A->Pending->nmax, // size of Pending->[ijx] arrays
true, // is_matrix: unused
NULL, NULL, S_input, // original I,J,S_input tuples
A_iso, // pending tuples are iso if A is iso
npending, // # of tuples
A->Pending->op, // dup operator for assembling duplicates,
// NULL if A is iso
stype, // type of Pending->x
Context
) ;
//----------------------------------------------------------------------
// free pending tuples
//----------------------------------------------------------------------
// The tuples have been converted to T, which is more compact, and
// duplicates have been removed. The following work needs to be done
// even if the builder fails.
// GB_builder frees A->Pending->j and A->Pending->x. If successful,
// A->Pending->i is now T->i. Otherwise A->Pending->i is freed. In
// both cases, A->Pending->i is NULL.
ASSERT (A->Pending->i == NULL) ;
ASSERT (A->Pending->j == NULL) ;
ASSERT (A->Pending->x == NULL) ;
// free the list of pending tuples
GB_Pending_free (&(A->Pending)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (A, "A after moving pending tuples to T", GB0) ;
//----------------------------------------------------------------------
// check the status of the builder
//----------------------------------------------------------------------
// Finally check the status of the builder. The pending tuples, must
// be freed (just above), whether or not the builder is successful.
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_ALL ;
return (info) ;
}
ASSERT_MATRIX_OK (T, "T = hypersparse matrix of pending tuples", GB0) ;
ASSERT (GB_IS_HYPERSPARSE (T)) ;
ASSERT (!GB_ZOMBIES (T)) ;
ASSERT (!GB_JUMBLED (T)) ;
ASSERT (!GB_PENDING (T)) ;
tnz = GB_nnz (T) ;
ASSERT (tnz > 0) ;
}
//--------------------------------------------------------------------------
// delete zombies
//--------------------------------------------------------------------------
// A zombie is an entry A(i,j) in the matrix that as been marked for
// deletion, but hasn't been deleted yet. It is marked by "negating"
// replacing its index i with GB_FLIP(i).
// TODO: pass tnz to GB_selector, to pad the reallocated A matrix
ASSERT_MATRIX_OK (A, "A before zombies removed", GB0) ;
if (nzombies > 0)
{
// remove all zombies from A
GB_OK (GB_selector (
NULL, // A in-place
GB_NONZOMBIE_selop_code, // use the opcode only
NULL, // no GB_Operator
false, // flipij is false
A, // input/output matrix
0, // ithunk is unused
NULL, // no GrB_Scalar Thunk
Context)) ;
ASSERT (A->nzombies == (anz_orig - GB_nnz (A))) ;
A->nzombies = 0 ;
}
ASSERT_MATRIX_OK (A, "A after zombies removed", GB0) ;
// all the zombies are gone, and pending tuples are now in T
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_PENDING (A)) ;
//--------------------------------------------------------------------------
// unjumble the matrix
//--------------------------------------------------------------------------
GB_OK (GB_unjumble (A, Context)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
//--------------------------------------------------------------------------
// check for pending tuples
//--------------------------------------------------------------------------
if (npending == 0)
{
// conform A to its desired sparsity structure and return result
info = GB_conform (A, Context) ;
ASSERT (GB_IMPLIES (info == GrB_SUCCESS, A->nvec_nonempty >= 0)) ;
#pragma omp flush
return (info) ;
}
//--------------------------------------------------------------------------
// check for quick transplant
//--------------------------------------------------------------------------
int64_t anz = GB_nnz (A) ;
if (anz == 0)
{
// A has no entries so just transplant T into A, then free T and
// conform A to its desired hypersparsity.
info = GB_transplant_conform (A, A->type, &T, Context) ;
ASSERT (GB_IMPLIES (info == GrB_SUCCESS, A->nvec_nonempty >= 0)) ;
#pragma omp flush
return (info) ;
}
//--------------------------------------------------------------------------
// determine the method for A = A+T
//--------------------------------------------------------------------------
// If anz > 0, T is hypersparse, even if A is a GrB_Vector
ASSERT (GB_IS_HYPERSPARSE (T)) ;
ASSERT (tnz > 0) ;
ASSERT (T->nvec > 0) ;
ASSERT (A->nvec > 0) ;
// tjfirst = first vector in T
int64_t tjfirst = T->h [0] ;
int64_t anz0 = 0 ;
int64_t kA = 0 ;
int64_t jlast ;
int64_t *restrict Ap = A->p ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ai = A->i ;
GB_void *restrict Ax = (GB_void *) A->x ;
int64_t anvec = A->nvec ;
// anz0 = nnz (A0) = nnz (A (:, 0:tjfirst-1)), the region not modified by T
if (A->h != NULL)
{
// find tjfirst in A->h
int64_t pright = anvec - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (tjfirst, A->h, kA, pright, found) ;
// A->h [0 ... kA-1] excludes vector tjfirst. The list
// A->h [kA ... anvec-1] includes tjfirst.
ASSERT (kA >= 0 && kA <= anvec) ;
ASSERT (GB_IMPLIES (kA > 0 && kA < anvec, A->h [kA-1] < tjfirst)) ;
ASSERT (GB_IMPLIES (found, A->h [kA] == tjfirst)) ;
jlast = (kA > 0) ? A->h [kA-1] : (-1) ;
}
else
{
kA = tjfirst ;
jlast = tjfirst - 1 ;
}
// anz1 = nnz (A1) = nnz (A (:, kA:end)), the region modified by T
anz0 = A->p [kA] ;
int64_t anz1 = anz - anz0 ;
bool ignore ;
// A + T will have anz_new entries
int64_t anz_new = anz + tnz ; // must have at least this space
if (2 * anz1 < anz0)
{
//----------------------------------------------------------------------
// append new tuples to A
//----------------------------------------------------------------------
// A is growing incrementally. It splits into two parts: A = [A0 A1].
// where A0 = A (:, 0:kA-1) and A1 = A (:, kA:end). The
// first part (A0 with anz0 = nnz (A0) entries) is not modified. The
// second part (A1, with anz1 = nnz (A1) entries) overlaps with T.
// If anz1 is zero, or small compared to anz0, then it is faster to
// leave A0 unmodified, and to update just A1.
// TODO: if A also had zombies, GB_selector could pad A so that
// GB_nnz_max (A) is equal to anz + tnz.
// make sure A has enough space for the new tuples
if (anz_new > GB_nnz_max (A))
{
// double the size if not enough space
GB_OK (GB_ix_realloc (A, 2 * anz_new, Context)) ;
Ai = A->i ;
Ax = (GB_void *) A->x ;
}
//----------------------------------------------------------------------
// T = A1 + T
//----------------------------------------------------------------------
if (anz1 > 0)
{
//------------------------------------------------------------------
// extract A1 = A (:, kA:end) as a shallow copy
//------------------------------------------------------------------
// A1 = [0, A (:, kA:end)], hypersparse with same dimensions as A
GB_CLEAR_STATIC_HEADER (A1, &A1_header) ;
GB_OK (GB_new (&A1, // hyper, existing header
A->type, A->vlen, A->vdim, GB_Ap_malloc, A->is_csc,
GxB_HYPERSPARSE, GB_ALWAYS_HYPER, anvec - kA, Context)) ;
// the A1->i and A1->x content are shallow copies of A(:,kA:end).
// They are not allocated pointers, but point to space inside
// Ai and Ax.
A1->x = (void *) (Ax + (A_iso ? 0 : (asize * anz0))) ;
A1->x_size = (A_iso ? 1 : anz1) * asize ;
A1->x_shallow = true ;
A1->i = Ai + anz0 ;
A1->i_size = anz1 * sizeof (int64_t) ;
A1->i_shallow = true ;
A1->iso = A_iso ; // OK
// fill the column A1->h and A1->p with A->h and A->p, shifted
int64_t *restrict A1p = A1->p ;
int64_t *restrict A1h = A1->h ;
int64_t a1nvec = 0 ;
for (int64_t k = kA ; k < anvec ; k++)
{
// get A (:,k)
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
if (pA_end > pA_start)
{
// add this column to A1 if A (:,k) is not empty
int64_t j = GBH (Ah, k) ;
A1p [a1nvec] = pA_start - anz0 ;
A1h [a1nvec] = j ;
a1nvec++ ;
}
}
// finalize A1
A1p [a1nvec] = anz1 ;
A1->nvec = a1nvec ;
A1->nvec_nonempty = a1nvec ;
A1->magic = GB_MAGIC ;
ASSERT_MATRIX_OK (A1, "A1 slice for GB_wait", GB0) ;
//------------------------------------------------------------------
// S = A1 + T, with no operator or mask
//------------------------------------------------------------------
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_add (S, A->type, A->is_csc, NULL, 0, 0, &ignore, A1, T,
false, NULL, NULL, NULL, Context)) ;
ASSERT_MATRIX_OK (S, "S = A1+T", GB0) ;
// free A1 and T
GB_Matrix_free (&T) ;
GB_Matrix_free (&A1) ;
//------------------------------------------------------------------
// replace T with S
//------------------------------------------------------------------
T = S ;
S = NULL ;
tnz = GB_nnz (T) ;
//------------------------------------------------------------------
// remove A1 from the vectors of A, if A is hypersparse
//------------------------------------------------------------------
if (A->h != NULL)
{
A->nvec = kA ;
}
}
//----------------------------------------------------------------------
// append T to the end of A0
//----------------------------------------------------------------------
const int64_t *restrict Tp = T->p ;
const int64_t *restrict Th = T->h ;
const int64_t *restrict Ti = T->i ;
int64_t tnvec = T->nvec ;
anz = anz0 ;
int64_t anz_last = anz ;
int nthreads = GB_nthreads (tnz, chunk, nthreads_max) ;
// append the indices and values of T to the end of A
GB_memcpy (Ai + anz, Ti, tnz * sizeof (int64_t), nthreads) ;
if (!A_iso)
{
const GB_void *restrict Tx = (GB_void *) T->x ;
GB_memcpy (Ax + anz * asize, Tx, tnz * asize, nthreads) ;
}
// append the vectors of T to the end of A
for (int64_t k = 0 ; k < tnvec ; k++)
{
int64_t j = Th [k] ;
ASSERT (j >= tjfirst) ;
anz += (Tp [k+1] - Tp [k]) ;
GB_OK (GB_jappend (A, j, &jlast, anz, &anz_last, Context)) ;
}
GB_jwrapup (A, jlast, anz) ;
ASSERT (anz == anz_new) ;
// need to recompute the # of non-empty vectors in GB_conform
A->nvec_nonempty = -1 ; // recomputed just below
ASSERT_MATRIX_OK (A, "A after GB_wait:append", GB0) ;
GB_Matrix_free (&T) ;
// conform A to its desired sparsity structure
info = GB_conform (A, Context) ;
}
else
{
//----------------------------------------------------------------------
// A = A+T
//----------------------------------------------------------------------
// The update is not incremental since most of A is changing. Just do
// a single parallel add: S=A+T, free T, and then transplant S back
// into A. The nzmax of A is tight, with no room for future
// incremental growth.
// FUTURE:: if GB_add could tolerate zombies in A, then the initial
// prune of zombies can be skipped.
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_add (S, A->type, A->is_csc, NULL, 0, 0, &ignore, A, T,
false, NULL, NULL, NULL, Context)) ;
GB_Matrix_free (&T) ;
ASSERT_MATRIX_OK (S, "S after GB_wait:add", GB0) ;
info = GB_transplant_conform (A, A->type, &S, Context) ;
}
//--------------------------------------------------------------------------
// flush the matrix and return result
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (info == GrB_SUCCESS, A->nvec_nonempty >= 0)) ;
#pragma omp flush
return (info) ;
}
|
par_csr_matvec.c
|
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec
*--------------------------------------------------------------------------*/
// y = alpha*A*x + beta*b
HYPRE_Int
hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *b,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *x_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride( x_local );
HYPRE_Int idxstride = hypre_VectorIndexStride( x_local );
HYPRE_Complex *x_tmp_data, **x_buf_data;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( idxstride>0 );
if (num_cols != x_size)
{
ierr = 11;
}
if (num_rows != y_size || num_rows != b_size)
{
ierr = 12;
}
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
{
ierr = 13;
}
hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
}
else
{
hypre_assert( num_vectors > 1 );
x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors );
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* x_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE);
x_tmp_data = hypre_VectorData(x_tmp);
/* x_buff_data */
x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
continue;
#endif
}
x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE);
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv];
HYPRE_Complex *locl_data = x_local_data + jv * vecstride;
/* if on device, no need to Sync: send_data is on device memory */
#if defined(HYPRE_USING_CUDA)
/* pack send data on device */
HYPRE_THRUST_CALL( gather,
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) +
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
locl_data,
send_data );
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/* pack send data on device */
HYPRE_Int i;
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts)
for (i = start; i < end; i++)
{
send_data[i] = locl_data[device_send_map_elmts[i]];
}
#else
HYPRE_Int i;
/* pack send data on host */
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#endif
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication starts */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv],
HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* computation offd part */
if (num_cols_offd)
{
hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local );
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
}
hypre_HandleCudaComputeStreamSyncPop(hypre_handle);
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
HYPRE_Int
hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A);
hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *y_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride(y_local);
HYPRE_Int idxstride = hypre_VectorIndexStride(y_local);
HYPRE_Complex *y_tmp_data, **y_buf_data;
HYPRE_Complex *y_local_data = hypre_VectorData(y_local);
hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0);
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows != x_size)
{
ierr = 1;
}
if (num_cols != y_size)
{
ierr = 2;
}
if (num_rows != x_size && num_cols != y_size)
{
ierr = 3;
}
hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
y_tmp = hypre_SeqVectorCreate(num_cols_offd);
}
else
{
hypre_assert( num_vectors > 1 );
y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* y_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE);
y_tmp_data = hypre_VectorData(y_tmp);
/* y_buf_data */
y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
continue;
#endif
}
y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
if (num_cols_offd)
{
if (offdT)
{
// offdT is optional. Used only if it's present
hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp);
}
else
{
hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp);
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
/* this is where we assume multivectors are 'column' storage */
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd],
HYPRE_MEMORY_DEVICE, y_buf_data[jv] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
if (diagT)
{
// diagT is optional. Used only if it's present.
hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local);
}
else
{
hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv];
HYPRE_Complex *locl_data = y_local_data + jv * vecstride;
#if defined(HYPRE_USING_CUDA)
/* unpack recv data on device */
hypreDevice_GenScatterAdd(locl_data,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
recv_data);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int i, j;
/* unpack recv data on device */
for (i = 0; i < num_sends; i++)
{
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1);
#pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts)
for (j = start; j < end; j++)
{
locl_data[device_send_map_elmts[j]] += recv_data[j];
}
}
#else
HYPRE_Int i;
/* unpack recv data on host, TODO OMP? */
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i];
}
#endif
}
hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST);
}
hypre_HandleCudaComputeStreamSyncPop(hypre_handle);
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y,
HYPRE_Int *CF_marker,
HYPRE_Int fpt )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_Vector *x_tmp;
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, i, j, index, start, num_procs;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Complex *x_tmp_data = NULL;
HYPRE_Complex *x_buf_data = NULL;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
if (num_cols != x_size)
ierr = 11;
if (num_rows != y_size)
ierr = 12;
if (num_cols != x_size && num_rows != y_size)
ierr = 13;
if (num_procs > 1)
{
if (num_cols_offd)
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
hypre_SeqVectorInitialize(x_tmp);
x_tmp_data = hypre_VectorData(x_tmp);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_sends)
x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
x_buf_data[index++]
= x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data );
}
hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker,
CF_marker, fpt);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_sends)
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd );
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local,
CF_marker, CF_marker_offd, fpt);
hypre_SeqVectorDestroy(x_tmp);
x_tmp = NULL;
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
}
return ierr;
}
|
omp.c
|
#include <stdio.h>
#include <omp.h>
int a, b, i, tid;
float x;
#pragma omp threadprivate(a, x)
int main(int argc, char *argv[]) {
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
printf("1st Parallel Region:\n");
#pragma omp parallel private(b,tid)
{
tid = omp_get_thread_num();
a = tid;
b = tid;
x = 1.1 * tid +1.0;
printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x);
} /* end of parallel region */
printf("************************************\n");
printf("Master thread doing serial work here\n");
printf("************************************\n");
printf("2nd Parallel Region:\n");
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x);
} /* end of parallel region */
return 0;
}
|
ocp_nlp_common.c
|
/*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
// blasfeo
#include "blasfeo/include/blasfeo_common.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// hpipm
#include "hpipm/include/hpipm_d_ocp_qp_dim.h"
// acados
#include "acados/utils/mem.h"
/************************************************
* config
************************************************/
int ocp_nlp_config_calculate_size(int N)
{
int ii;
int size = 0;
// self
size += sizeof(ocp_nlp_config);
// qp solver
size += 1 * ocp_qp_xcond_solver_config_calculate_size();
// regularization
size += ocp_nlp_reg_config_calculate_size();
// dynamics
size += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size();
// cost
size += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size();
// constraints
size += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size();
return size;
}
ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory)
{
int ii;
char *c_ptr = (char *) raw_memory;
ocp_nlp_config *config = (ocp_nlp_config *) c_ptr;
c_ptr += sizeof(ocp_nlp_config);
config->N = N;
// qp solver
config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr);
c_ptr += ocp_qp_xcond_solver_config_calculate_size();
// regularization
config->regularize = ocp_nlp_reg_config_assign(c_ptr);
c_ptr += ocp_nlp_reg_config_calculate_size();
// dynamics
config->dynamics = (ocp_nlp_dynamics_config **) c_ptr;
c_ptr += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr);
c_ptr += ocp_nlp_dynamics_config_calculate_size();
}
// cost
config->cost = (ocp_nlp_cost_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++)
{
config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr);
c_ptr += ocp_nlp_cost_config_calculate_size();
}
// constraints
config->constraints = (ocp_nlp_constraints_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr);
c_ptr += ocp_nlp_constraints_config_calculate_size();
}
return config;
}
/************************************************
* dims
************************************************/
static int ocp_nlp_dims_calculate_size_self(int N)
{
int size = 0;
size += sizeof(ocp_nlp_dims);
// nlp sizes
size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns
// dynamics
size += N * sizeof(void *);
// cost
size += (N + 1) * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
// regularization
size += ocp_nlp_reg_dims_calculate_size(N);
size += sizeof(ocp_nlp_reg_dims);
size += 8; // initial align
return size;
}
int ocp_nlp_dims_calculate_size(void *config_)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
int size = 0;
// self
size += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
// cost
for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]);
// constraints
for (ii = 0; ii <= N; ii++)
size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
// qp solver
size += config->qp_solver->dims_calculate_size(config->qp_solver, N);
return size;
}
static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
int ii;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr;
c_ptr += sizeof(ocp_nlp_dims);
// nv
assign_and_advance_int(N + 1, &dims->nv, &c_ptr);
// nx
assign_and_advance_int(N + 1, &dims->nx, &c_ptr);
// nu
assign_and_advance_int(N + 1, &dims->nu, &c_ptr);
// ni
assign_and_advance_int(N + 1, &dims->ni, &c_ptr);
// nz
assign_and_advance_int(N + 1, &dims->nz, &c_ptr);
// ns
assign_and_advance_int(N + 1, &dims->ns, &c_ptr);
// dynamics
dims->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
dims->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
dims->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// regularization
dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr);
c_ptr += ocp_nlp_reg_dims_calculate_size(N);
/* initialize qp_solver dimensions */
// dims->qp_solver->N = N;
// for (ii = 0; ii <= N; ii++)
// {
// TODO(dimitris): values below are needed for reformulation of QP when soft constraints
// are not supported. Make this a bit more transparent as it clushes with nbx/nbu above.
// dims->qp_solver->nsbx[ii] = 0;
// dims->qp_solver->nsbu[ii] = 0;
// dims->qp_solver->nsg[ii] = 0;
// }
// N
dims->N = N;
// initialize dimensions to zero by default
// nv
for(ii=0; ii<=N; ii++)
dims->nv[ii] = 0;
// nx
for(ii=0; ii<=N; ii++)
dims->nx[ii] = 0;
// nu
for(ii=0; ii<=N; ii++)
dims->nu[ii] = 0;
// ni
for(ii=0; ii<=N; ii++)
dims->ni[ii] = 0;
// nz
for(ii=0; ii<=N; ii++)
dims->nz[ii] = 0;
// ns
for(ii=0; ii<=N; ii++)
dims->ns[ii] = 0;
// TODO initialize dims to zero by default also in modules !!!!!!!
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr);
return dims;
}
ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
char *c_ptr = (char *) raw_memory;
// self
ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr);
c_ptr += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr);
c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr);
c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
dims->constraints[ii] =
config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
}
// qp solver
dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr);
c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N);
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr);
return dims;
}
void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field,
const void* value_array)
{
// to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int ii;
int N = config->N;
int *int_array = (int *) value_array;
/* set ocp_nlp dimension */
if (!strcmp(field, "nx"))
{
// opt var
for (ii = 0; ii <= N; ii++)
{
// set nx
dims->nx[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nx", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nx", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]);
}
}
else if (!strcmp(field, "nu"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nu
dims->nu[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nu", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nu", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]);
}
}
else if (!strcmp(field, "nz"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nz
dims->nz[ii] = int_array[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nz", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nz", &int_array[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nz", &int_array[i]);
}
}
else if (!strcmp(field, "ns"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set ns
dims->ns[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "ns", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns",
&int_array[i]);
}
}
else
{
printf("error: dims type not available in module ocp_nlp: %s", field);
exit(1);
}
#if 0
/* set ocp_nlp submodule dimensions */
if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], field, &int_array[i]);
}
}
if (!strcmp(field, "nu"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
}
if (!strcmp(field, "nx"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
}
for (int i = 0; i <= N; i++) // cost
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], field, &int_array[i]);
}
for (int i = 0; i <= N; i++) // constraints
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, &int_array[i]);
}
if (strcmp(field, "nz")) // qp_solver does not contain nz
{
for (int i = 0; i <= N; i++) // qp_solver
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field,
&int_array[i]);
}
}
#endif
return;
}
void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field,
const void* value_)
{
// to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
int i = stage;
// set in constraint module
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, int_value);
// update ni in ocp_nlp dimensions
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ni", &dims->ni[i]);
// update qp_solver dims
if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value);
}
else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ng_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver);
}
else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi")))
{
// update ng_qp_solver in qp_solver
int nsg_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver);
}
else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"nge_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver);
}
return;
}
void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage,
const char *field, const void* value_)
{
// to set dimension ny (output)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value);
}
void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage,
const char *field, const void* value)
{
// mainly for gnsf dimensions
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value;
config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value);
}
/************************************************
* in
************************************************/
int ocp_nlp_in_calculate_size_self(int N)
{
int size = sizeof(ocp_nlp_in);
size += N * sizeof(double); // Ts
size += N * sizeof(void *); // dynamics
size += (N + 1) * sizeof(void *); // cost
size += (N + 1) * sizeof(void *); // constraints
return size;
}
int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
int ii;
int N = dims->N;
int size = ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
size +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_in *in = (ocp_nlp_in *) c_ptr;
c_ptr += sizeof(ocp_nlp_in);
// Ts
in->Ts = (double *) c_ptr;
c_ptr += N * sizeof(double);
// dynamics
in->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
in->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
in->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
return in;
}
ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
int ii;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
// struct
ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr);
c_ptr += ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
in->dynamics[ii] =
config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr);
c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii],
dims->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr);
return in;
}
/************************************************
* out
************************************************/
int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
int size = sizeof(ocp_nlp_out);
size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z
size += 1 * N * sizeof(struct blasfeo_dvec); // pi
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux
size += 1 * blasfeo_memsize_dvec(nz[ii]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // ux
size += 1 * blasfeo_memsize_dvec(nz[N]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
// loop index
int ii;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_out *out = (ocp_nlp_out *) c_ptr;
c_ptr += sizeof(ocp_nlp_out);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// blasfeo_dvec_struct
// ux
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr);
// z
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr);
// pi
assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr);
// lam
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr);
// t
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// blasfeo_dvec
// ux
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr);
}
// z
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr);
}
// pi
for (int ii = 0; ii < N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr);
}
// lam
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr);
}
// t
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr);
}
// zero solution
for(ii=0; ii<N; ii++)
{
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
}
ii = N;
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr);
return out;
}
/************************************************
* options
************************************************/
int ocp_nlp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->step_length = 1.0;
// submodules opts
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name, i.e. substring in field before '_'
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts,
field+module_length+1, value);
}
// pass options to dynamics module
else // nlp opts
{
if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else
{
printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to dynamics module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) )
{
config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage],
field+module_length+1, value );
}
// pass options to cost module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) )
{
config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage],
field+module_length+1, value);
}
// pass options to constraint module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) )
{
config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage],
(char *) field+module_length+1, value);
}
else
{
printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field);
exit(1);
}
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
size += (N+1)*sizeof(bool); // set_sim_guess
size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt
size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg
size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun
for (int ii = 0; ii < N; ii++)
{
size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj
size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun
size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess
}
size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj
size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess
size += 8; // initial align
size += 8; // middle align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_memory);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// middle align
align_char_to(8, &c_ptr);
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize,
opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize,
opts->regularize);
// dynamics
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii],
dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// set_sim_guess
assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr);
for (int ii = 0; ii <= N; ++ii)
{
mem->set_sim_guess[ii] = false;
}
// blasfeo_struct align
align_char_to(8, &c_ptr);
// dzduxt
mem->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
mem->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// cost_grad
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr);
// ineq_fun
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr);
// ineq_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr);
// dyn_fun
assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr);
// dyn_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr);
// sim_guess
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
// cost_grad
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr);
}
// ineq_fun
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr);
}
// ineq_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr);
}
// dyn_fun
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr);
}
// dyn_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr);
}
// sim_guess
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr);
// set to 0;
blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0);
// printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii);
}
// printf("created memory %p\n", mem);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// nlp
size += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// weights_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// array of pointers
// cost
size += (N+1)*sizeof(void *);
// dynamics
size += N*sizeof(void *);
// constraints
size += (N+1)*sizeof(void *);
// module workspace
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
return size;
}
ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr;
c_ptr += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// weights_nlp_out
work->weights_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return work;
}
/************************************************
* functions
************************************************/
void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int ii;
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]);
}
return;
}
void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem,
ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i],
mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0);
}
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config,
ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int *ni = dims->ni;
// constraints
config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0],
in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]);
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]);
blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0);
// d
blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0);
return;
}
double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i, j;
int N = dims->N;
int *nx = dims->nx;
int *ni = dims->ni;
double merit_fun = 0.0;
// compute fun value
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// cost
config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<N; i++)
{
// cost
config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// constr
config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
}
double *tmp_fun;
double tmp;
struct blasfeo_dvec *tmp_fun_vec;
double cost_fun = 0.0;
for(i=0; i<=N; i++)
{
tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]);
cost_fun += *tmp_fun;
}
double dyn_fun = 0.0;
for(i=0; i<N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
// blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(nx[i+1], work->weights_nlp_out->pi+i, 0);
for(j=0; j<nx[i+1]; j++)
{
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
dyn_fun += fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j));
}
}
double constr_fun = 0.0;
for(i=0; i<=N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
// blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(2*ni[i], work->weights_nlp_out->lam+i, 0);
for(j=0; j<2*ni[i]; j++)
{
tmp = BLASFEO_DVECEL(tmp_fun_vec, j);
tmp = tmp>0.0 ? tmp : 0.0;
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
constr_fun += fabs(BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j)) * tmp;
}
}
merit_fun = cost_fun + dyn_fun + constr_fun;
printf("\n%e %e %e %e\n", merit_fun, cost_fun, dyn_fun, constr_fun);
return merit_fun;
}
void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// ocp_nlp_config *config = (ocp_nlp_config *) config_;
// (fixed) step length
double alpha = opts->step_length;
#if 0 // XXX test piece of code
double tmp0, tmp1;
// current point
for (i = 0; i <= N; i++)
blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0);
// linear update of algebraic variables using state and input sensitivity
// if (i < N)
// {
// blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
// }
// initialize weights
if(mem->sqp_iter[0]==0)
{
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weights_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weights_nlp_out->lam+i, 0);
}
// update weigths
for (i = 0; i < N; i++)
{
for(j=0; j<nx[i+1]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j)));
BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
for (i = 0; i <= N; i++)
{
for(j=0; j<2*ni[i]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j)));
BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
printf("\n\nmerit fun value\n");
double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double alpha_min = 0.2;
for (j=0; j<10 & alpha>alpha_min; j++)
{
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
printf("\n%d tmp merit fun value\n", j);
double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
if(merit_fun1 < merit_fun0)
{
break;
}
else
{
alpha *= 0.7;
}
}
printf("\nalpha %f\n", alpha);
#endif
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0);
// update dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0);
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
}
}
return;
}
/************************************************
* residuals
************************************************/
int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_res);
size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_g res_d res_m
size += 1 * N * sizeof(struct blasfeo_dvec); // res_b
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_g
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_b
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_d res_m
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // res_g
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_d res_m
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_res *res = (ocp_nlp_res *) c_ptr;
c_ptr += sizeof(ocp_nlp_res);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// res_g
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_g, &c_ptr);
// res_b
assign_and_advance_blasfeo_dvec_structs(N, &res->res_b, &c_ptr);
// res_d
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_d, &c_ptr);
// res_m
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_m, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// res_g
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_g + ii, &c_ptr);
}
// res_b
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_b + ii, &c_ptr);
}
// res_d
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_d + ii, &c_ptr);
}
// res_m
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_m + ii, &c_ptr);
}
res->memsize = ocp_nlp_res_calculate_size(dims);
return res;
}
void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res,
ocp_nlp_memory *mem)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
double tmp_res;
// res_g
res->inf_norm_res_g = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_g + ii,
0);
blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_g + ii, 0,
res->res_g + ii, 0);
blasfeo_dvecnrm_inf(nv[ii], res->res_g + ii, 0, &tmp_res);
res->inf_norm_res_g = tmp_res > res->inf_norm_res_g ? tmp_res : res->inf_norm_res_g;
}
// res_b
res->inf_norm_res_b = 0.0;
for (int ii = 0; ii < N; ii++)
{
blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_b + ii, 0);
blasfeo_dvecnrm_inf(nx[ii + 1], res->res_b + ii, 0, &tmp_res);
res->inf_norm_res_b = tmp_res > res->inf_norm_res_b ? tmp_res : res->inf_norm_res_b;
}
// res_d
res->inf_norm_res_d = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_d + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_d + ii, 0, &tmp_res);
res->inf_norm_res_d = tmp_res > res->inf_norm_res_d ? tmp_res : res->inf_norm_res_d;
}
// res_m
res->inf_norm_res_m = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_m + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_m + ii, 0, &tmp_res);
res->inf_norm_res_m = tmp_res > res->inf_norm_res_m ? tmp_res : res->inf_norm_res_m;
}
return;
}
|
ThreadPool.h
|
/*
Copyright (c) 2017, Michael Kazhdan
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer. Redistributions in binary form must
reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Johns Hopkins University nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THREADPOOL_H_
#define THREADPOOL_H_
#include <omp.h>
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <future>
#include <thread>
#include <vector>
struct ThreadPool {
static const size_t chunk_size = 128;
static bool _Close;
static volatile unsigned int _RemainingTasks;
static std::condition_variable _WaitingForWorkOrClose, _DoneWithWork;
static std::vector<std::thread> _Threads;
static std::function<void(unsigned int)> _ThreadFunction;
static void Parallel_for(size_t begin,
size_t end,
const std::function<void(unsigned int, size_t)>
&iterationFunction) {
if (begin >= end) {
return;
}
size_t range = end - begin;
size_t chunks = (range + chunk_size - 1) / chunk_size;
unsigned int threads = (unsigned int)NumThreads();
std::atomic<size_t> index;
index.store(0);
if (range < chunk_size || threads == 1) {
for (size_t i = begin; i < end; i++) {
iterationFunction(0, i);
}
return;
}
auto _ChunkFunction = [&iterationFunction, begin, end](
unsigned int thread, size_t chunk) {
const size_t _begin = begin + chunk_size * chunk;
const size_t _end = std::min<size_t>(end, _begin + chunk_size);
for (size_t i = _begin; i < _end; i++) {
iterationFunction(thread, i);
}
};
_ThreadFunction = [&_ChunkFunction, chunks,
&index](unsigned int thread) {
size_t chunk;
while ((chunk = index.fetch_add(1)) < chunks) {
_ChunkFunction(thread, chunk);
}
};
#pragma omp parallel for
for (size_t c = 0; c < chunks; c++) {
_ChunkFunction(omp_get_thread_num(), c);
}
}
static unsigned int NumThreads(void) {
return (unsigned int)_Threads.size() + 1;
}
static void Init(
unsigned int numThreads = std::thread::hardware_concurrency()) {
if (_Threads.size() && !_Close) {
_Close = true;
_WaitingForWorkOrClose.notify_all();
for (unsigned int t = 0; t < _Threads.size(); t++)
_Threads[t].join();
}
_Close = true;
numThreads--;
_Threads.resize(numThreads);
}
static void Terminate(void) {
if (_Threads.size() && !_Close) {
_Close = true;
_WaitingForWorkOrClose.notify_all();
for (unsigned int t = 0; t < _Threads.size(); t++)
_Threads[t].join();
_Threads.resize(0);
}
}
template <typename... Functions>
static void ParallelSections(const Functions &... functions) {
std::vector<std::future<void>> futures(sizeof...(Functions));
_ParallelSections(&futures[0], functions...);
for (size_t t = 0; t < futures.size(); t++) futures[t].get();
}
template <typename Function>
static void _ParallelSections(std::future<void> *futures,
const Function &function) {
*futures = std::async(std::launch::async, function);
}
template <typename Function, typename... Functions>
static void _ParallelSections(std::future<void> *futures,
const Function &function,
const Functions &... functions) {
*futures = std::async(std::launch::async, function);
_ParallelSections(futures + 1, functions...);
}
};
// Hack to avoid using the linker for now
bool ThreadPool::_Close;
volatile unsigned int ThreadPool::_RemainingTasks;
std::condition_variable ThreadPool::_WaitingForWorkOrClose;
std::condition_variable ThreadPool::_DoneWithWork;
std::vector<std::thread> ThreadPool::_Threads;
std::function<void(unsigned int)> ThreadPool::_ThreadFunction;
#endif // THREADPOOL_H_
|
GB_unop__tanh_fc32_fc32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tanh_fc32_fc32)
// op(A') function: GB (_unop_tran__tanh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ctanhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctanhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ctanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tanh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tanh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lxor_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32)
// A*D function (colscale): GB (_AxD__lxor_uint32)
// D*A function (rowscale): GB (_DxB__lxor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32)
// C=scalar+B GB (_bind1st__lxor_uint32)
// C=scalar+B' GB (_bind1st_tran__lxor_uint32)
// C=A+scalar GB (_bind2nd__lxor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
band.h
|
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file band.h
*
* \brief Contains declaration and partial implementation of sirius::Band class.
*/
#ifndef __BAND_H__
#define __BAND_H__
#include "periodic_function.h"
#include "k_point_set.h"
#include "potential.h"
#include "local_operator.h"
#include "non_local_operator.h"
namespace sirius
{
// TODO: Band problem is a mess and needs more formal organizaiton. We have different basis functions.
// We can do first- and second-variation or a full variation. We can do iterative or exact diagonalization.
// This has to be organized.
/// Setup and solve the eigen value problem.
class Band
{
private:
/// Simulation context.
Simulation_context& ctx_;
/// Alias for the unit cell.
Unit_cell& unit_cell_;
/// BLACS grid for distributed linear algebra operations.
BLACS_grid const& blacs_grid_;
/// Non-zero Gaunt coefficients
std::unique_ptr<Gaunt_coefficients<double_complex>> gaunt_coefs_;
/// Interface to a standard eigen-value solver.
std::unique_ptr<Eigenproblem> std_evp_solver_;
/// Interface to a generalized eigen-value solver.
std::unique_ptr<Eigenproblem> gen_evp_solver_;
std::unique_ptr<Local_operator> local_op_;
/// Apply effective magentic field to the first-variational state.
/** Must be called first because hpsi is overwritten with B|fv_j>. */
void apply_magnetic_field(wave_functions& fv_states__,
Gvec const& gkvec__,
Periodic_function<double>* effective_magnetic_field__[3],
std::vector<wave_functions>& hpsi__) const;
/// Apply SO correction to the first-variational states.
/** Raising and lowering operators:
* \f[
* L_{\pm} Y_{\ell m}= (L_x \pm i L_y) Y_{\ell m} = \sqrt{\ell(\ell+1) - m(m \pm 1)} Y_{\ell m \pm 1}
* \f]
*/
void apply_so_correction(mdarray<double_complex, 2>& fv_states, mdarray<double_complex, 3>& hpsi);
/// Apply UJ correction to scalar wave functions
template <spin_block_t sblock>
void apply_uj_correction(mdarray<double_complex, 2>& fv_states, mdarray<double_complex, 3>& hpsi);
/// Add interstitial contribution to apw-apw block of Hamiltonian and overlap
inline void set_fv_h_o_it(K_point* kp__,
Potential const& potential__,
matrix<double_complex>& h__,
matrix<double_complex>& o__) const;
inline void set_o_it(K_point* kp, mdarray<double_complex, 2>& o) const;
template <spin_block_t sblock>
inline void set_h_it(K_point* kp,
Periodic_function<double>* effective_potential,
Periodic_function<double>* effective_magnetic_field[3],
matrix<double_complex>& h) const;
/// Setup lo-lo block of Hamiltonian and overlap matrices
inline void set_fv_h_o_lo_lo(K_point* kp, mdarray<double_complex, 2>& h, mdarray<double_complex, 2>& o) const;
template <spin_block_t sblock>
inline void set_h_lo_lo(K_point* kp, mdarray<double_complex, 2>& h) const;
inline void set_o_lo_lo(K_point* kp, mdarray<double_complex, 2>& o) const;
inline void set_o(K_point* kp, mdarray<double_complex, 2>& o);
template <spin_block_t sblock>
inline void set_h(K_point* kp,
Periodic_function<double>* effective_potential,
Periodic_function<double>* effective_magnetic_field[3],
mdarray<double_complex, 2>& h);
inline void diag_fv_full_potential_exact(K_point* kp__,
Potential const& potential__) const;
inline void diag_fv_full_potential_davidson(K_point* kp__) const;
inline void apply_fv_o(K_point* kp__,
bool apw_only__,
bool add_o1__,
int N__,
int n__,
wave_functions& phi__,
wave_functions& ophi__) const;
inline void get_singular_components(K_point* kp__) const;
/// Exact (not iterative) diagonalization of the Hamiltonian.
template <typename T>
inline void diag_pseudo_potential_exact(K_point* kp__,
int ispn__,
D_operator<T>& d_op__,
Q_operator<T>& q_op__) const;
/// Iterative Davidson diagonalization.
template <typename T>
inline void diag_pseudo_potential_davidson(K_point* kp__,
int ispn__,
D_operator<T>& d_op__,
Q_operator<T>& q_op__) const;
/// RMM-DIIS diagonalization.
template <typename T>
inline void diag_pseudo_potential_rmm_diis(K_point* kp__,
int ispn__,
D_operator<T>& d_op__,
Q_operator<T>& q_op__) const;
template <typename T>
inline void diag_pseudo_potential_chebyshev(K_point* kp__,
int ispn__,
D_operator<T>& d_op__,
Q_operator<T>& q_op__,
P_operator<T>& p_op__) const;
template <typename T>
inline void apply_h(K_point* kp__,
int ispn__,
int N__,
int n__,
wave_functions& phi__,
wave_functions& hphi__,
D_operator<T>& d_op) const;
template <typename T>
void apply_h_o(K_point* kp__,
int ispn__,
int N__,
int n__,
wave_functions& phi__,
wave_functions& hphi__,
wave_functions& ophi__,
D_operator<T>& d_op,
Q_operator<T>& q_op) const;
/// Auxiliary function used internally by residuals() function.
inline mdarray<double,1> residuals_aux(K_point* kp__,
int num_bands__,
std::vector<double>& eval__,
wave_functions& hpsi__,
wave_functions& opsi__,
wave_functions& res__,
mdarray<double, 1>& h_diag__,
mdarray<double, 1>& o_diag__) const;
/// Compute residuals.
template <typename T>
inline int residuals(K_point* kp__,
int ispn__,
int N__,
int num_bands__,
std::vector<double>& eval__,
std::vector<double>& eval_old__,
dmatrix<T>& evec__,
wave_functions& hphi__,
wave_functions& ophi__,
wave_functions& hpsi__,
wave_functions& opsi__,
wave_functions& res__,
mdarray<double, 1>& h_diag__,
mdarray<double, 1>& o_diag__) const;
/// Setup the Hermitian subspace matrix.
/** Compute \f$ O_{ii'} = \langle \phi_i | \hat O | \phi_{i'} \rangle \f$ operator matrix
* for the subspace spanned by the wave-functions \f$ \phi_i \f$. The matrix is always returned
* in the CPU pointer because most of the standard math libraries start from the CPU. */
template <typename T>
inline void set_subspace_mtrx(int N__,
int n__,
wave_functions& phi__,
wave_functions& op_phi__,
dmatrix<T>& mtrx__,
dmatrix<T>& mtrx_old__) const
{
PROFILE("sirius::Band::set_subspace_mtrx");
assert(n__ != 0);
if (mtrx_old__.size()) {
assert(&mtrx__.blacs_grid() == &mtrx_old__.blacs_grid());
}
/* copy old N x N distributed matrix */
if (N__ > 0) {
splindex<block_cyclic> spl_row(N__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col());
#pragma omp parallel for
for (int i = 0; i < spl_col.local_size(); i++) {
std::memcpy(&mtrx__(0, i), &mtrx_old__(0, i), spl_row.local_size() * sizeof(T));
}
}
/* <{phi,phi_new}|Op|phi_new> */
inner(phi__, 0, N__ + n__, op_phi__, N__, n__, mtrx__, 0, N__);
/* restore lower part */
if (N__ > 0) {
if (mtrx__.blacs_grid().comm().size() == 1) {
#pragma omp parallel for
for (int i = 0; i < N__; i++) {
for (int j = N__; j < N__ + n__; j++) {
mtrx__(j, i) = std::conj(mtrx__(i, j));
}
}
} else {
linalg<CPU>::tranc(n__, N__, mtrx__, 0, N__, mtrx__, N__, 0);
}
}
if (ctx_.control().print_checksum_) {
splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col());
double_complex cs(0, 0);
for (int i = 0; i < spl_col.local_size(); i++) {
for (int j = 0; j < spl_row.local_size(); j++) {
cs += mtrx__(j, i);
}
}
mtrx__.blacs_grid().comm().allreduce(&cs, 1);
DUMP("checksum(subspace_mtrx): %18.10f %18.10f", cs.real(), cs.imag());
}
mtrx__.make_real_diag(N__ + n__);
/* save new matrix */
if (mtrx_old__.size()) {
splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col());
#pragma omp parallel for
for (int i = 0; i < spl_col.local_size(); i++) {
std::memcpy(&mtrx_old__(0, i), &mtrx__(0, i), spl_row.local_size() * sizeof(T));
}
}
}
/// Diagonalize a full-potential Hamiltonian.
void diag_fv_full_potential(K_point* kp__,
Potential const& potential__) const
{
auto& itso = ctx_.iterative_solver_input_section();
if (itso.type_ == "exact") {
diag_fv_full_potential_exact(kp__, potential__);
} else if (itso.type_ == "davidson") {
diag_fv_full_potential_davidson(kp__);
}
}
/// Diagonalize a pseudo-potential Hamiltonian.
template <typename T>
void diag_pseudo_potential(K_point* kp__,
Periodic_function<double>* effective_potential__,
Periodic_function<double>* effective_magnetic_field__[3]) const
{
PROFILE("sirius::Band::diag_pseudo_potential");
local_op_->prepare(kp__->gkvec());
ctx_.fft_coarse().prepare(kp__->gkvec().partition());
D_operator<T> d_op(ctx_, kp__->beta_projectors());
Q_operator<T> q_op(ctx_, kp__->beta_projectors());
auto& itso = ctx_.iterative_solver_input_section();
if (itso.type_ == "exact") {
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_exact(kp__, ispn, d_op, q_op);
}
} else {
STOP();
}
} else if (itso.type_ == "davidson") {
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_davidson(kp__, ispn, d_op, q_op);
}
} else {
STOP();
}
} else if (itso.type_ == "rmm-diis") {
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_rmm_diis(kp__, ispn, d_op, q_op);
}
} else {
STOP();
}
} else if (itso.type_ == "chebyshev") {
P_operator<T> p_op(ctx_, kp__->beta_projectors(), kp__->p_mtrx());
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_chebyshev(kp__, ispn, d_op, q_op, p_op);
}
} else {
STOP();
}
} else {
TERMINATE("unknown iterative solver type");
}
ctx_.fft_coarse().dismiss();
}
public:
/// Constructor
Band(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx__.unit_cell())
, blacs_grid_(ctx__.blacs_grid())
{
PROFILE("sirius::Band::Band");
gaunt_coefs_ = std::unique_ptr<Gaunt_coefficients<double_complex>>(
new Gaunt_coefficients<double_complex>(ctx_.lmax_apw(),
ctx_.lmax_pot(),
ctx_.lmax_apw(),
SHT::gaunt_hybrid));
Eigenproblem* ptr;
/* create standard eigen-value solver */
switch (ctx_.std_evp_solver_type()) {
case ev_lapack: {
ptr = new Eigenproblem_lapack(2 * linalg_base::dlamch('S'));
break;
}
#ifdef __SCALAPACK
case ev_scalapack: {
ptr = new Eigenproblem_scalapack(blacs_grid_, ctx_.cyclic_block_size(), ctx_.cyclic_block_size(), 1e-12);
break;
}
#endif
#ifdef __PLASMA
case ev_plasma: {
ptr = new Eigenproblem_plasma();
break;
}
#endif
#ifdef __MAGMA
case ev_magma: {
ptr = new Eigenproblem_magma();
break;
}
#endif
#ifdef __ELPA
case ev_elpa1: {
ptr = new Eigenproblem_elpa1(blacs_grid_, ctx_.cyclic_block_size());
break;
}
case ev_elpa2: {
ptr = new Eigenproblem_elpa2(blacs_grid_, ctx_.cyclic_block_size());
break;
}
#endif
default: {
TERMINATE("wrong standard eigen-value solver");
}
}
std_evp_solver_ = std::unique_ptr<Eigenproblem>(ptr);
/* create generalized eign-value solver */
switch (ctx_.gen_evp_solver_type()) {
case ev_lapack: {
ptr = new Eigenproblem_lapack(2 * linalg_base::dlamch('S'));
break;
}
#ifdef __SCALAPACK
case ev_scalapack: {
ptr = new Eigenproblem_scalapack(blacs_grid_, ctx_.cyclic_block_size(), ctx_.cyclic_block_size(), 1e-12);
break;
}
#endif
#ifdef __ELPA
case ev_elpa1: {
ptr = new Eigenproblem_elpa1(blacs_grid_, ctx_.cyclic_block_size());
break;
}
case ev_elpa2: {
ptr = new Eigenproblem_elpa2(blacs_grid_, ctx_.cyclic_block_size());
break;
}
#endif
#ifdef __MAGMA
case ev_magma: {
ptr = new Eigenproblem_magma();
break;
}
#endif
#ifdef __RS_GEN_EIG
case ev_rs_gpu: {
ptr = new Eigenproblem_RS_GPU(blacs_grid_, ctx_.cyclic_block_size(), ctx_.cyclic_block_size());
break;
}
case ev_rs_cpu: {
ptr = new Eigenproblem_RS_CPU(blacs_grid_, ctx_.cyclic_block_size(), ctx_.cyclic_block_size());
break;
}
#endif
default: {
TERMINATE("wrong generalized eigen-value solver");
}
}
gen_evp_solver_ = std::unique_ptr<Eigenproblem>(ptr);
if (std_evp_solver_->parallel() != gen_evp_solver_->parallel()) {
TERMINATE("both eigen-value solvers must be serial or parallel");
}
if (!std_evp_solver_->parallel() && blacs_grid_.comm().size() > 1) {
TERMINATE("eigen-value solvers must be parallel");
}
local_op_ = std::unique_ptr<Local_operator>(new Local_operator(ctx_, ctx_.fft_coarse()));
}
/// Apply the muffin-tin part of the Hamiltonian to the apw basis functions of an atom.
/** The following matrix is computed:
* \f[
* b_{L_2 \nu_2}^{\alpha}({\bf G'}) = \sum_{L_1 \nu_1} \sum_{L_3}
* a_{L_1\nu_1}^{\alpha}({\bf G'})
* \langle u_{\ell_1\nu_1}^{\alpha} | h_{L3}^{\alpha} | u_{\ell_2\nu_2}^{\alpha}
* \rangle \langle Y_{L_1} | R_{L_3} | Y_{L_2} \rangle
* \f]
*/
template <spin_block_t sblock>
void apply_hmt_to_apw(Atom const& atom__,
int num_gkvec__,
mdarray<double_complex, 2>& alm__,
mdarray<double_complex, 2>& halm__) const
{
auto& type = atom__.type();
// TODO: this is k-independent and can in principle be precomputed together with radial integrals if memory is available
// TODO: for spin-collinear case hmt is Hermitian; compute upper triangular part and use zhemm
mdarray<double_complex, 2> hmt(type.mt_aw_basis_size(), type.mt_aw_basis_size());
/* compute the muffin-tin Hamiltonian */
for (int j2 = 0; j2 < type.mt_aw_basis_size(); j2++) {
int lm2 = type.indexb(j2).lm;
int idxrf2 = type.indexb(j2).idxrf;
for (int j1 = 0; j1 < type.mt_aw_basis_size(); j1++) {
int lm1 = type.indexb(j1).lm;
int idxrf1 = type.indexb(j1).idxrf;
hmt(j1, j2) = atom__.radial_integrals_sum_L3<sblock>(idxrf1, idxrf2, gaunt_coefs_->gaunt_vector(lm1, lm2));
}
}
linalg<CPU>::gemm(0, 1, num_gkvec__, type.mt_aw_basis_size(), type.mt_aw_basis_size(), alm__, hmt, halm__);
}
void apply_o1mt_to_apw(Atom const& atom__,
int num_gkvec__,
mdarray<double_complex, 2>& alm__,
mdarray<double_complex, 2>& oalm__) const
{
auto& type = atom__.type();
for (int j = 0; j < type.mt_aw_basis_size(); j++) {
int l = type.indexb(j).l;
int lm = type.indexb(j).lm;
int idxrf = type.indexb(j).idxrf;
for (int order = 0; order < type.aw_order(l); order++) {
int j1 = type.indexb().index_by_lm_order(lm, order);
int idxrf1 = type.indexr().index_by_l_order(l, order);
for (int ig = 0; ig < num_gkvec__; ig++) {
oalm__(ig, j) += atom__.symmetry_class().o1_radial_integral(idxrf, idxrf1) * alm__(ig, j1);
}
}
}
}
/// Setup apw-lo and lo-apw blocs of Hamiltonian and overlap matrices
void set_fv_h_o_apw_lo(K_point* kp,
Atom_type const& type,
Atom const& atom,
int ia,
mdarray<double_complex, 2>& alm_row,
mdarray<double_complex, 2>& alm_col,
mdarray<double_complex, 2>& h,
mdarray<double_complex, 2>& o) const;
template <spin_block_t sblock>
void set_h_apw_lo(K_point* kp, Atom_type* type, Atom* atom, int ia, mdarray<double_complex, 2>& alm,
mdarray<double_complex, 2>& h);
/// Set APW-lo and lo-APW blocks of the overlap matrix.
void set_o_apw_lo(K_point* kp, Atom_type* type, Atom* atom, int ia, mdarray<double_complex, 2>& alm,
mdarray<double_complex, 2>& o);
/// Setup the Hamiltonian and overlap matrices in APW+lo basis
/** The Hamiltonian matrix has the following expression:
* \f[
* H_{\mu' \mu}=\langle \varphi_{\mu' } | \hat H | \varphi_{\mu } \rangle =
* \left( \begin{array}{cc}
* H_{\bf G'G} & H_{{\bf G'}j} \\
* H_{j'{\bf G}} & H_{j'j}
* \end{array} \right)
* \f]
* APW-APW block:
* \f{eqnarray*}{
* H_{{\bf G'} {\bf G}}^{\bf k} &=& \sum_{\alpha} \sum_{L'\nu', L\nu} a_{L'\nu'}^{\alpha *}({\bf G'+k})
* \langle u_{\ell' \nu'}^{\alpha}Y_{\ell' m'}|\hat h^{\alpha} | u_{\ell \nu}^{\alpha}Y_{\ell m} \rangle
* a_{L\nu}^{\alpha}({\bf G+k}) + \frac{1}{2}{\bf G'} {\bf G} \cdot \Theta({\bf G - G'}) + \tilde V_{eff}({\bf G - G'}) \\
* &=& \sum_{\alpha} \sum_{\xi' } a_{\xi'}^{\alpha *}({\bf G'+k})
* b_{\xi'}^{\alpha}({\bf G+k}) + \frac{1}{2}{\bf G'} {\bf G} \cdot \Theta({\bf G - G'}) + \tilde V_{eff}({\bf G - G'})
* \f}
* APW-lo block:
* \f[
* H_{{\bf G'} j}^{\bf k} = \sum_{L'\nu'} a_{L'\nu'}^{\alpha_j *}({\bf G'+k})
* \langle u_{\ell' \nu'}^{\alpha_j}Y_{\ell' m'}|\hat h^{\alpha_j} | \phi_{\ell_j}^{\zeta_j \alpha_j} Y_{\ell_j m_j} \rangle
* \f]
* lo-APW block:
* \f[
* H_{j' {\bf G}}^{\bf k} = \sum_{L\nu} \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} Y_{\ell_{j'} m_{j'}}
* |\hat h^{\alpha_{j'}} | u_{\ell \nu}^{\alpha_{j'}}Y_{\ell m} \rangle a_{L\nu}^{\alpha_{j'}}({\bf G+k})
* \f]
* lo-lo block:
* \f[
* H_{j' j}^{\bf k} = \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} Y_{\ell_{j'} m_{j'}}
* |\hat h^{\alpha_{j}} | \phi_{\ell_j}^{\zeta_j \alpha_j} Y_{\ell_j m_j} \rangle \delta_{\alpha_j \alpha_{j'}}
* \f]
*
* The overlap matrix has the following expression:
* \f[
* O_{\mu' \mu} = \langle \varphi_{\mu'} | \varphi_{\mu} \rangle
* \f]
* APW-APW block:
* \f[
* O_{{\bf G'} {\bf G}}^{\bf k} = \sum_{\alpha} \sum_{L\nu} a_{L\nu}^{\alpha *}({\bf G'+k})
* a_{L\nu}^{\alpha}({\bf G+k}) + \Theta({\bf G-G'})
* \f]
*
* APW-lo block:
* \f[
* O_{{\bf G'} j}^{\bf k} = \sum_{\nu'} a_{\ell_j m_j \nu'}^{\alpha_j *}({\bf G'+k})
* \langle u_{\ell_j \nu'}^{\alpha_j} | \phi_{\ell_j}^{\zeta_j \alpha_j} \rangle
* \f]
*
* lo-APW block:
* \f[
* O_{j' {\bf G}}^{\bf k} =
* \sum_{\nu'} \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} | u_{\ell_{j'} \nu'}^{\alpha_{j'}} \rangle
* a_{\ell_{j'} m_{j'} \nu'}^{\alpha_{j'}}({\bf G+k})
* \f]
*
* lo-lo block:
* \f[
* O_{j' j}^{\bf k} = \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} |
* \phi_{\ell_{j}}^{\zeta_{j} \alpha_{j}} \rangle \delta_{\alpha_{j'} \alpha_j}
* \delta_{\ell_{j'} \ell_j} \delta_{m_{j'} m_j}
* \f]
*/
template <device_t pu, electronic_structure_method_t basis>
inline void set_fv_h_o(K_point* kp,
Potential const& potential__,
dmatrix<double_complex>& h,
dmatrix<double_complex>& o) const;
/// Apply LAPW Hamiltonain and overlap to the trial wave-functions.
/** Check the documentation of Band::set_fv_h_o() for the expressions of Hamiltonian and overlap
* matrices and \ref basis for the definition of the LAPW+lo basis.
*
* For the set of wave-functions expanded in LAPW+lo basis (k-point index is dropped for simplicity)
* \f[
* \psi_{i} = \sum_{\mu} \phi_{\mu} C_{\mu i}
* \f]
* where \f$ \mu = \{ {\bf G}, j \} \f$ is a combined index of LAPW and local orbitals we want to contrusct
* a subspace Hamiltonian and overlap matrices:
* \f[
* H_{i' i} = \langle \psi_{i'} | \hat H | \psi_i \rangle =
* \sum_{\mu' \mu} C_{\mu' i'}^{*} \langle \phi_{\mu'} | \hat H | \phi_{\mu} \rangle C_{\mu i} =
* \sum_{\mu'} C_{\mu' i'}^{*} h_{\mu' i}(\psi)
* \f]
* \f[
* O_{i' i} = \langle \psi_{i'} | \psi_i \rangle =
* \sum_{\mu' \mu} C_{\mu' i'}^{*} \langle \phi_{\mu'} | \phi_{\mu} \rangle C_{\mu i} =
* \sum_{\mu'} C_{\mu' i'}^{*} o_{\mu' i}(\psi)
* \f]
* where
* \f[
* h_{\mu' i}(\psi) = \sum_{\mu} \langle \phi_{\mu'} | \hat H | \phi_{\mu} \rangle C_{\mu i}
* \f]
* and
* \f[
* o_{\mu' i}(\psi) = \sum_{\mu} \langle \phi_{\mu'} | \phi_{\mu} \rangle C_{\mu i}
* \f]
* For the APW block of \f$ h_{\mu' i}(\psi) \f$ and \f$ o_{\mu' i}(\psi) \f$ we have:
* \f[
* h_{{\bf G'} i}(\psi) = \sum_{{\bf G}} \langle \phi_{\bf G'} | \hat H | \phi_{\bf G} \rangle C_{{\bf G} i} +
* \sum_{j} \langle \phi_{\bf G'} | \hat H | \phi_{j} \rangle C_{j i}
* \f]
* \f[
* o_{{\bf G'} i}(\psi) = \sum_{{\bf G}} \langle \phi_{\bf G'} | \phi_{\bf G} \rangle C_{{\bf G} i} +
* \sum_{j} \langle \phi_{\bf G'} | \phi_{j} \rangle C_{j i}
* \f]
* and for the lo block:
* \f[
* h_{j' i}(\psi) = \sum_{{\bf G}} \langle \phi_{j'} | \hat H | \phi_{\bf G} \rangle C_{{\bf G} i} +
* \sum_{j} \langle \phi_{j'} | \hat H | \phi_{j} \rangle C_{j i}
* \f]
* \f[
* o_{j' i}(\psi) = \sum_{{\bf G}} \langle \phi_{j'} | \phi_{\bf G} \rangle C_{{\bf G} i} +
* \sum_{j} \langle \phi_{j'} | \phi_{j} \rangle C_{j i}
* \f]
*
* APW-APW contribution, muffin-tin part:
* \f[
* h_{{\bf G'} i}(\psi) = \sum_{{\bf G}} \langle \phi_{\bf G'} | \hat H | \phi_{\bf G} \rangle C_{{\bf G} i} =
* \sum_{{\bf G}} \sum_{\alpha} \sum_{\xi'} a_{\xi'}^{\alpha *}({\bf G'}) b_{\xi'}^{\alpha}({\bf G})
* C_{{\bf G} i}
* \f]
* \f[
* o_{{\bf G'} i}(\psi) = \sum_{{\bf G}} \langle \phi_{\bf G'} | \phi_{\bf G} \rangle C_{{\bf G} i} =
* \sum_{{\bf G}} \sum_{\alpha} \sum_{\xi'} a_{\xi'}^{\alpha *}({\bf G'}) a_{\xi'}^{\alpha}({\bf G})
* C_{{\bf G} i}
* \f]
* APW-APW contribution, interstitial effective potential part:
* \f[
* h_{{\bf G'} i}(\psi) = \int \Theta({\bf r}) e^{-i{\bf G'}{\bf r}} V({\bf r}) \psi_{i}({\bf r}) d{\bf r}
* \f]
* This is done by transforming \f$ \psi_i({\bf G}) \f$ to the real space, multiplying by effectvive potential
* and step function and transforming the result back to the \f$ {\bf G} \f$ domain.
*
* APW-APW contribution, interstitial kinetic energy part:
* \f[
* h_{{\bf G'} i}(\psi) = \int \Theta({\bf r}) e^{-i{\bf G'}{\bf r}} \Big( -\frac{1}{2} \nabla \Big)
* \Big( \nabla \psi_{i}({\bf r}) \Big) d{\bf r}
* \f]
* and the gradient of the wave-function is computed with FFT as:
* \f[
* \Big( \nabla \psi_{i}({\bf r}) \Big) = \sum_{\bf G} i{\bf G}e^{i{\bf G}{\bf r}}\psi_i({\bf G})
* \f]
*
* APW-APW contribution, interstitial overlap:
* \f[
* o_{{\bf G'} i}(\psi) = \int \Theta({\bf r}) e^{-i{\bf G'}{\bf r}} \psi_{i}({\bf r}) d{\bf r}
* \f]
*
* APW-lo contribution:
* \f[
* h_{{\bf G'} i}(\psi) = \sum_{j} \langle \phi_{\bf G'} | \hat H | \phi_{j} \rangle C_{j i} =
* \sum_{j} C_{j i} \sum_{L'\nu'} a_{L'\nu'}^{\alpha_j *}({\bf G'})
* \langle u_{\ell' \nu'}^{\alpha_j}Y_{\ell' m'}|\hat h^{\alpha_j} | \phi_{\ell_j}^{\zeta_j \alpha_j} Y_{\ell_j m_j} \rangle =
* \sum_{j} C_{j i} \sum_{\xi'} a_{\xi'}^{\alpha_j *}({\bf G'}) h_{\xi' \xi_j}^{\alpha_j}
* \f]
* \f[
* o_{{\bf G'} i}(\psi) = \sum_{j} \langle \phi_{\bf G'} | \phi_{j} \rangle C_{j i} =
* \sum_{j} C_{j i} \sum_{L'\nu'} a_{L'\nu'}^{\alpha_j *}({\bf G'})
* \langle u_{\ell' \nu'}^{\alpha_j}Y_{\ell' m'}| \phi_{\ell_j}^{\zeta_j \alpha_j} Y_{\ell_j m_j} \rangle =
* \sum_{j} C_{j i} \sum_{\nu'} a_{\ell_j m_j \nu'}^{\alpha_j *}({\bf G'}) o_{\nu' \zeta_j \ell_j}^{\alpha_j}
* \f]
* lo-APW contribution:
* \f[
* h_{j' i}(\psi) = \sum_{\bf G} \langle \phi_{j'} | \hat H | \phi_{\bf G} \rangle C_{{\bf G} i} =
* \sum_{\bf G} C_{{\bf G} i} \sum_{L\nu} \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} Y_{\ell_{j'} m_{j'}}
* |\hat h^{\alpha_{j'}} | u_{\ell \nu}^{\alpha_{j'}}Y_{\ell m} \rangle a_{L\nu}^{\alpha_{j'}}({\bf G}) =
* \sum_{\bf G} C_{{\bf G} i} \sum_{\xi} h_{\xi_{j'} \xi}^{\alpha_{j'}} a_{\xi}^{\alpha_{j'}}({\bf G})
* \f]
* \f[
* o_{j' i}(\psi) = \sum_{\bf G} \langle \phi_{j'} | \phi_{\bf G} \rangle C_{{\bf G} i} =
* \sum_{\bf G} C_{{\bf G} i} \sum_{L\nu} \langle \phi_{\ell_{j'}}^{\zeta_{j'} \alpha_{j'}} Y_{\ell_{j'} m_{j'}}
* | u_{\ell \nu}^{\alpha_{j'}}Y_{\ell m} \rangle a_{L\nu}^{\alpha_{j'}}({\bf G}) =
* \sum_{\bf G} C_{{\bf G} i} \sum_{\nu} o_{\zeta_{j'} \nu \ell_{j'}}^{\alpha_{j'}} a_{\ell_{j'} m_{j'} \nu}^{\alpha_{j'}}({\bf G})
* \f]
* lo-lo contribution:
* \f[
* h_{j' i}(\psi) = \sum_{j} \langle \phi_{j'} | \hat H | \phi_{j} \rangle C_{j i} = \sum_{j} C_{j i} h_{\xi_{j'} \xi_j}^{\alpha_j}
* \delta_{\alpha_j \alpha_{j'}}
* \f]
* \f[
* o_{j' i}(\psi) = \sum_{j} \langle \phi_{j'} | \phi_{j} \rangle C_{j i} = \sum_{j} C_{j i}
* o_{\zeta_{j'} \zeta_{j} \ell_j}^{\alpha_j}
* \delta_{\alpha_j \alpha_{j'}} \delta_{\ell_j \ell_{j'}} \delta_{m_j m_{j'}}
* \f]
*/
inline void apply_fv_h_o(K_point* kp__,
int nlo,
int N,
int n,
wave_functions& phi__,
wave_functions& hphi__,
wave_functions& ophi__) const;
/// Solve second-variational problem.
inline void solve_sv(K_point* kp,
Periodic_function<double>* effective_magnetic_field[3]) const;
/// Diagonalization of the full Hamiltonian (without second variation).
inline void solve_fd(K_point* kp,
Periodic_function<double>* effective_potential,
Periodic_function<double>* effective_magnetic_field[3]) const;
/// Solve \f$ \hat H \psi = E \psi \f$ and find eigen-states of the Hamiltonian.
inline void solve_for_kset(K_point_set& kset__,
Potential& potential__,
bool precompute__) const;
inline Eigenproblem const& std_evp_solver() const
{
return *std_evp_solver_;
}
inline Eigenproblem const& gen_evp_solver() const
{
return *gen_evp_solver_;
}
/// Get diagonal elements of LAPW Hamiltonian.
inline mdarray<double, 1> get_h_diag(K_point* kp__,
double v0__,
double theta0__) const;
/// Get diagonal elements of LAPW overlap.
inline mdarray<double, 1> get_o_diag(K_point* kp__,
double theta0__) const;
/// Get diagonal elements of pseudopotential Hamiltonian.
template <typename T>
inline mdarray<double, 1> get_h_diag(K_point* kp__,
int ispn__,
double v0__,
D_operator<T>& d_op__) const;
/// Get diagonal elements of pseudopotential overlap matrix.
template <typename T>
inline mdarray<double, 1> get_o_diag(K_point* kp__,
Q_operator<T>& q_op__) const;
inline void initialize_subspace(K_point_set& kset__,
Potential& potential__) const;
/// Initialize the wave-functions subspace.
template <typename T>
inline void initialize_subspace(K_point* kp__,
Periodic_function<double>* effective_potential__,
Periodic_function<double>* effective_magnetic_field[3],
int num_ao__,
std::vector<std::vector<Spline<double>>> const& rad_int__) const;
};
#include "Band/get_h_o_diag.hpp"
#include "Band/apply.hpp"
#include "Band/set_lapw_h_o.hpp"
#include "Band/residuals.hpp"
#include "Band/diagonalize.hpp"
#include "Band/initialize_subspace.hpp"
#include "Band/solve.hpp"
}
#endif // __BAND_H__
|
spmm.h
|
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <limits>
#include <algorithm>
namespace dgl {
namespace aten {
namespace cpu {
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType *out_off = O + rid * dim;
std::fill(out_off, out_off + dim, 0);
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType *lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType *rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
out_off[k] += Op::Call(lhs_off, rhs_off);
}
}
}
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (val != 0) {
#pragma omp atomic
out_off[k] += val;
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note It uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
* \note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges = has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
std::fill(out_off, out_off + dim, Cmp::zero);
if (Op::use_lhs)
std::fill(argx_off, argx_off + dim, 0);
if (Op::use_rhs)
std::fill(argw_off, argw_off + dim, 0);
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = cid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
* \note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges = has_idx? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = rid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
namespace op {
//////////////////////////////// binary operators on CPU ////////////////////////////////
template <typename DType>
struct Add {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off + *rhs_off;
}
};
template <typename DType> constexpr bool Add<DType>::use_lhs;
template <typename DType> constexpr bool Add<DType>::use_rhs;
template <typename DType>
struct Sub {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off - *rhs_off;
}
};
template <typename DType> constexpr bool Sub<DType>::use_lhs;
template <typename DType> constexpr bool Sub<DType>::use_rhs;
template <typename DType>
struct Mul {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off * *rhs_off;
}
};
template <typename DType> constexpr bool Mul<DType>::use_lhs;
template <typename DType> constexpr bool Mul<DType>::use_rhs;
template <typename DType>
struct Div {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off / *rhs_off;
}
};
template <typename DType> constexpr bool Div<DType>::use_lhs;
template <typename DType> constexpr bool Div<DType>::use_rhs;
template <typename DType>
struct CopyLhs {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = false;
inline static DType Call(const DType* lhs_off, const DType* ) {
return *lhs_off;
}
};
template <typename DType> constexpr bool CopyLhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyLhs<DType>::use_rhs;
template <typename DType>
struct CopyRhs {
static constexpr bool use_lhs = false;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* , const DType* rhs_off) {
return *rhs_off;
}
};
template <typename DType> constexpr bool CopyRhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyRhs<DType>::use_rhs;
//////////////////////////////// Reduce operators on CPU ////////////////////////////////
template <typename DType>
struct Max {
static constexpr DType zero = -std::numeric_limits<DType>::infinity();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum < val;
}
};
template <typename DType> constexpr DType Max<DType>::zero;
template <typename DType>
struct Min {
static constexpr DType zero = std::numeric_limits<DType>::infinity();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum > val;
}
};
template <typename DType> constexpr DType Min<DType>::zero;
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef dgl::aten::cpu::op::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef dgl::aten::cpu::op::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef dgl::aten::cpu::op::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef dgl::aten::cpu::op::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_lhs") { \
typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_rhs") { \
typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \
} \
} while (0)
} // namespace op
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
fopenmp-extensions.c
|
// RUN: %clang_cc1 -verify=ompx -fopenmp %s
// RUN: %clang_cc1 -verify=ompx -fopenmp-simd %s
// RUN: %clang_cc1 -verify=ompx -fopenmp -fopenmp-extensions %s
// RUN: %clang_cc1 -verify=ompx -fopenmp-simd -fopenmp-extensions %s
// RUN: %clang_cc1 -verify=omp -fopenmp -fno-openmp-extensions %s
// RUN: %clang_cc1 -verify=omp -fopenmp-simd -fno-openmp-extensions %s
// RUN: %clang_cc1 -verify=omp -fopenmp \
// RUN: -fopenmp-extensions -fno-openmp-extensions %s
// RUN: %clang_cc1 -verify=omp -fopenmp-simd \
// RUN: -fopenmp-extensions -fno-openmp-extensions %s
// RUN: %clang_cc1 -verify=ompx -fopenmp \
// RUN: -fno-openmp-extensions -fopenmp-extensions %s
// RUN: %clang_cc1 -verify=ompx -fopenmp-simd \
// RUN: -fno-openmp-extensions -fopenmp-extensions %s
void foo() {
int x;
// ompx-no-diagnostics
// omp-error@+1 {{incorrect map type modifier}}
#pragma omp target map(ompx_hold, alloc: x)
;
}
|
ep.c
|
/*[]*/
struct __sFILEX ;
/*[]*/
int printf(const char *restrict , ...);
/*[]*/
extern double log(double );
/*[]*/
extern double fabs(double );
/*[]*/
extern double pow(double , double );
/*[]*/
extern double sqrt(double );
/*[]*/
typedef int boolean;
/*[]*/
extern double randlc(double *, double );
/*[]*/
extern void vranlc(int , double * , double , double *);
/*[]*/
extern void timer_clear(int );
/*[]*/
extern void timer_start(int );
/*[]*/
extern void timer_stop(int );
/*[]*/
extern double timer_read(int );
/*[]*/
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
/*[]*/
static double x[2 * (1 << 16)];
#pragma omp threadprivate(x)
/*[]*/
static double q[10];
/*[]*/
/*[]*/
/*[]*/
int main(int argc, char **argv) {
/*[]*/
/*[]*/
double Mops;
/*[]*/
double t1;
/*[]*/
double t2;
/*[]*/
double sx;
/*[]*/
double sy;
/*[]*/
double tm;
/*[]*/
double an;
/*[]*/
double tt;
/*[]*/
double gc;
/*[]*/
double dum[3] = {1.0, 1.0 , 1.0};
/*[]*/
int np;
/*[]*/
int i;
/*[]*/
int k;
/*[]*/
int nit;
/*[]*/
int k_offset;
/*[]*/
int j;
/*[]*/
int nthreads = 1;
/*[]*/
boolean verified;
/*[]*/
char size[13 + 1];
/*[]*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - EP Benchmark\n");
/*[]*/
/*[]*/
int _imopVarPre149;
/*[]*/
double _imopVarPre150;
/*[]*/
int _imopVarPre153;
/*[]*/
int _imopVarPre154;
/*[]*/
unsigned int _imopVarPre155;
/*[]*/
_imopVarPre149 = 24 + 1;
/*[]*/
_imopVarPre150 = pow(2.0, _imopVarPre149);
/*[]*/
/*[]*/
_imopVarPre153 = 2 > 1;
/*[]*/
/*[]*/
if (_imopVarPre153) {
/*[]*/
/*[]*/
_imopVarPre154 = 1;
} else {
/*[]*/
/*[]*/
_imopVarPre154 = 0;
}
/*[]*/
_imopVarPre155 = __builtin_object_size(size, _imopVarPre154);
/*[]*/
/*[]*/
__builtin___sprintf_chk(size, 0, _imopVarPre155, "%12.0f", _imopVarPre150);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = 13; j >= 1; j--) {
/*[]*/
/*[]*/
/*[]*/
if (size[j] == '.') {
/*[]*/
/*[]*/
size[j] = ' ';
}
}
/*[]*/
printf(" Number of random numbers generated: %13s\n", size);
/*[]*/
/*[]*/
verified = 0;
/*[]*/
np = (1 << (24 - 16));
/*[]*/
double *_imopVarPre159;
/*[]*/
double _imopVarPre160;
/*[]*/
double *_imopVarPre161;
/*[]*/
_imopVarPre159 = &(dum[2]);
/*[]*/
_imopVarPre160 = dum[1];
/*[]*/
_imopVarPre161 = &(dum[0]);
/*[]*/
vranlc(0, _imopVarPre161, _imopVarPre160, _imopVarPre159);
/*[]*/
/*[]*/
double _imopVarPre164;
/*[]*/
double *_imopVarPre165;
/*[]*/
double _imopVarPre166;
/*[]*/
_imopVarPre164 = dum[2];
/*[]*/
_imopVarPre165 = &(dum[1]);
/*[]*/
_imopVarPre166 = randlc(_imopVarPre165, _imopVarPre164);
/*[]*/
/*[]*/
dum[0] = _imopVarPre166;
/*[1]*/
#pragma omp parallel default(shared) private(i)
{
/*[1]*/
/*[1]*/
#pragma omp for nowait
/*[1]*/
/*[1]*/
/*[1]*/
for (i = 0; i < 2 * (1 << 16); i++) {
/*[1]*/
/*[1]*/
x[i] = -1.0e99;
}
}
/*[]*/
int _imopVarPre201;
/*[]*/
double _imopVarPre202;
/*[]*/
double _imopVarPre203;
/*[]*/
double _imopVarPre204;
/*[]*/
double _imopVarPre205;
/*[]*/
_imopVarPre201 = (1.0 > 1.0);
/*[]*/
/*[]*/
if (_imopVarPre201) {
/*[]*/
/*[]*/
_imopVarPre202 = 1.0;
} else {
/*[]*/
/*[]*/
_imopVarPre202 = 1.0;
}
/*[]*/
_imopVarPre203 = fabs(_imopVarPre202);
/*[]*/
/*[]*/
_imopVarPre204 = sqrt(_imopVarPre203);
/*[]*/
/*[]*/
_imopVarPre205 = log(_imopVarPre204);
/*[]*/
/*[]*/
Mops = _imopVarPre205;
/*[]*/
timer_clear(1);
/*[]*/
/*[]*/
timer_clear(2);
/*[]*/
/*[]*/
timer_clear(3);
/*[]*/
/*[]*/
timer_start(1);
/*[]*/
/*[]*/
double *_imopVarPre207;
/*[]*/
_imopVarPre207 = &t1;
/*[]*/
vranlc(0, _imopVarPre207, 1220703125.0, x);
/*[]*/
/*[]*/
t1 = 1220703125.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= 16 + 1; i++) {
/*[]*/
/*[]*/
double *_imopVarPre209;
/*[]*/
double _imopVarPre210;
/*[]*/
_imopVarPre209 = &t1;
/*[]*/
_imopVarPre210 = randlc(_imopVarPre209, t1);
/*[]*/
/*[]*/
t2 = _imopVarPre210;
}
/*[]*/
an = t1;
/*[]*/
tt = 271828183.0;
/*[]*/
gc = 0.0;
/*[]*/
sx = 0.0;
/*[]*/
sy = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 10 - 1; i++) {
/*[]*/
/*[]*/
q[i] = 0.0;
}
/*[]*/
k_offset = -1;
/*[2]*/
#pragma omp parallel copyin(x)
{
/*[2]*/
/*[2]*/
double t1;
/*[2]*/
double t2;
/*[2]*/
double t3;
/*[2]*/
double t4;
/*[2]*/
double x1;
/*[2]*/
double x2;
/*[2]*/
int kk;
/*[2]*/
int i;
/*[2]*/
int ik;
/*[2]*/
int l;
/*[2]*/
double qq[10];
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i < 10; i++) {
/*[2]*/
/*[2]*/
qq[i] = 0.0;
}
/*[2]*/
#pragma omp for reduction(+:sx, sy) schedule(static) nowait
/*[2]*/
/*[2]*/
/*[2]*/
for (k = 1; k <= np; k++) {
/*[2]*/
/*[2]*/
kk = k_offset + k;
/*[2]*/
t1 = 271828183.0;
/*[2]*/
t2 = an;
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 1; i <= 100; i++) {
/*[2]*/
/*[2]*/
ik = kk / 2;
/*[2]*/
/*[2]*/
if (2 * ik != kk) {
/*[2]*/
/*[2]*/
double *_imopVarPre212;
/*[2]*/
double _imopVarPre213;
/*[2]*/
_imopVarPre212 = &t1;
/*[2]*/
_imopVarPre213 = randlc(_imopVarPre212, t2);
/*[2]*/
/*[2]*/
t3 = _imopVarPre213;
}
/*[2]*/
/*[2]*/
if (ik == 0) {
/*[2]*/
/*[2]*/
break;
}
/*[2]*/
double *_imopVarPre215;
/*[2]*/
double _imopVarPre216;
/*[2]*/
_imopVarPre215 = &t2;
/*[2]*/
_imopVarPre216 = randlc(_imopVarPre215, t2);
/*[2]*/
/*[2]*/
t3 = _imopVarPre216;
/*[2]*/
kk = ik;
}
/*[2]*/
/*[2]*/
if (0 == 1) {
/*[2]*/
/*[2]*/
timer_start(3);
/*[2]*/
}
/*[2]*/
double *_imopVarPre220;
/*[2]*/
double *_imopVarPre221;
/*[2]*/
int _imopVarPre222;
/*[2]*/
_imopVarPre220 = x - 1;
/*[2]*/
_imopVarPre221 = &t1;
/*[2]*/
_imopVarPre222 = 2 * (1 << 16);
/*[2]*/
vranlc(_imopVarPre222, _imopVarPre221, 1220703125.0, _imopVarPre220);
/*[2]*/
/*[2]*/
/*[2]*/
if (0 == 1) {
/*[2]*/
/*[2]*/
timer_stop(3);
/*[2]*/
}
/*[2]*/
/*[2]*/
if (0 == 1) {
/*[2]*/
/*[2]*/
timer_start(2);
/*[2]*/
}
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i < (1 << 16); i++) {
/*[2]*/
/*[2]*/
x1 = 2.0 * x[2 * i] - 1.0;
/*[2]*/
x2 = 2.0 * x[2 * i + 1] - 1.0;
/*[2]*/
t1 = (x1 * x1) + (x2 * x2);
/*[2]*/
/*[2]*/
if (t1 <= 1.0) {
/*[2]*/
/*[2]*/
double _imopVarPre227;
/*[2]*/
double _imopVarPre228;
/*[2]*/
double _imopVarPre229;
/*[2]*/
_imopVarPre227 = log(t1);
/*[2]*/
/*[2]*/
_imopVarPre228 = -2.0 * _imopVarPre227 / t1;
/*[2]*/
_imopVarPre229 = sqrt(_imopVarPre228);
/*[2]*/
/*[2]*/
t2 = _imopVarPre229;
/*[2]*/
t3 = (x1 * t2);
/*[2]*/
t4 = (x2 * t2);
/*[2]*/
double _imopVarPre250;
/*[2]*/
double _imopVarPre251;
/*[2]*/
int _imopVarPre252;
/*[2]*/
double _imopVarPre253;
/*[2]*/
double _imopVarPre255;
/*[2]*/
double _imopVarPre257;
/*[2]*/
_imopVarPre250 = fabs(t3);
/*[2]*/
/*[2]*/
_imopVarPre251 = fabs(t4);
/*[2]*/
/*[2]*/
_imopVarPre252 = (_imopVarPre250 > _imopVarPre251);
/*[2]*/
/*[2]*/
if (_imopVarPre252) {
/*[2]*/
/*[2]*/
_imopVarPre255 = fabs(t3);
/*[2]*/
/*[2]*/
_imopVarPre253 = _imopVarPre255;
} else {
/*[2]*/
/*[2]*/
_imopVarPre257 = fabs(t4);
/*[2]*/
/*[2]*/
_imopVarPre253 = _imopVarPre257;
}
/*[2]*/
l = _imopVarPre253;
/*[2]*/
qq[l] += 1.0;
/*[2]*/
sx = sx + t3;
/*[2]*/
sy = sy + t4;
}
}
/*[2]*/
/*[2]*/
if (0 == 1) {
/*[2]*/
/*[2]*/
timer_stop(2);
/*[2]*/
}
}
/*[2]*/
// #pragma omp dummyFlush CRITICAL_START
/*[2]*/
#pragma omp critical
{
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i <= 10 - 1; i++) {
/*[2]*/
/*[2]*/
q[i] += qq[i];
}
}
/*[2]*/
// #pragma omp dummyFlush CRITICAL_END
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 10 - 1; i++) {
/*[]*/
/*[]*/
gc = gc + q[i];
}
/*[]*/
timer_stop(1);
/*[]*/
/*[]*/
tm = timer_read(1);
/*[]*/
/*[]*/
nit = 0;
/*[]*/
/*[]*/
if (24 == 24) {
/*[]*/
/*[]*/
double _imopVarPre269;
/*[]*/
double _imopVarPre270;
/*[]*/
int _imopVarPre271;
/*[]*/
double _imopVarPre274;
/*[]*/
double _imopVarPre275;
/*[]*/
_imopVarPre269 = (sx - (-3.247834652034740e3)) / sx;
/*[]*/
_imopVarPre270 = fabs(_imopVarPre269);
/*[]*/
/*[]*/
_imopVarPre271 = (_imopVarPre270 <= 1.0e-8);
/*[]*/
/*[]*/
if (_imopVarPre271) {
/*[]*/
/*[]*/
_imopVarPre274 = (sy - (-6.958407078382297e3)) / sy;
/*[]*/
_imopVarPre275 = fabs(_imopVarPre274);
/*[]*/
/*[]*/
_imopVarPre271 = (_imopVarPre275 <= 1.0e-8);
}
/*[]*/
/*[]*/
if (_imopVarPre271) {
/*[]*/
/*[]*/
verified = 1;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (24 == 25) {
/*[]*/
/*[]*/
double _imopVarPre287;
/*[]*/
double _imopVarPre288;
/*[]*/
int _imopVarPre289;
/*[]*/
double _imopVarPre292;
/*[]*/
double _imopVarPre293;
/*[]*/
_imopVarPre287 = (sx - (-2.863319731645753e3)) / sx;
/*[]*/
_imopVarPre288 = fabs(_imopVarPre287);
/*[]*/
/*[]*/
_imopVarPre289 = (_imopVarPre288 <= 1.0e-8);
/*[]*/
/*[]*/
if (_imopVarPre289) {
/*[]*/
/*[]*/
_imopVarPre292 = (sy - (-6.320053679109499e3)) / sy;
/*[]*/
_imopVarPre293 = fabs(_imopVarPre292);
/*[]*/
/*[]*/
_imopVarPre289 = (_imopVarPre293 <= 1.0e-8);
}
/*[]*/
/*[]*/
if (_imopVarPre289) {
/*[]*/
/*[]*/
verified = 1;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (24 == 28) {
/*[]*/
/*[]*/
double _imopVarPre305;
/*[]*/
double _imopVarPre306;
/*[]*/
int _imopVarPre307;
/*[]*/
double _imopVarPre310;
/*[]*/
double _imopVarPre311;
/*[]*/
_imopVarPre305 = (sx - (-4.295875165629892e3)) / sx;
/*[]*/
_imopVarPre306 = fabs(_imopVarPre305);
/*[]*/
/*[]*/
_imopVarPre307 = (_imopVarPre306 <= 1.0e-8);
/*[]*/
/*[]*/
if (_imopVarPre307) {
/*[]*/
/*[]*/
_imopVarPre310 = (sy - (-1.580732573678431e4)) / sy;
/*[]*/
_imopVarPre311 = fabs(_imopVarPre310);
/*[]*/
/*[]*/
_imopVarPre307 = (_imopVarPre311 <= 1.0e-8);
}
/*[]*/
/*[]*/
if (_imopVarPre307) {
/*[]*/
/*[]*/
verified = 1;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (24 == 30) {
/*[]*/
/*[]*/
double _imopVarPre323;
/*[]*/
double _imopVarPre324;
/*[]*/
int _imopVarPre325;
/*[]*/
double _imopVarPre328;
/*[]*/
double _imopVarPre329;
/*[]*/
_imopVarPre323 = (sx - 4.033815542441498e4) / sx;
/*[]*/
_imopVarPre324 = fabs(_imopVarPre323);
/*[]*/
/*[]*/
_imopVarPre325 = (_imopVarPre324 <= 1.0e-8);
/*[]*/
/*[]*/
if (_imopVarPre325) {
/*[]*/
/*[]*/
_imopVarPre328 = (sy - (-2.660669192809235e4)) / sy;
/*[]*/
_imopVarPre329 = fabs(_imopVarPre328);
/*[]*/
/*[]*/
_imopVarPre325 = (_imopVarPre329 <= 1.0e-8);
}
/*[]*/
/*[]*/
if (_imopVarPre325) {
/*[]*/
/*[]*/
verified = 1;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (24 == 32) {
/*[]*/
/*[]*/
double _imopVarPre341;
/*[]*/
double _imopVarPre342;
/*[]*/
int _imopVarPre343;
/*[]*/
double _imopVarPre346;
/*[]*/
double _imopVarPre347;
/*[]*/
_imopVarPre341 = (sx - 4.764367927995374e4) / sx;
/*[]*/
_imopVarPre342 = fabs(_imopVarPre341);
/*[]*/
/*[]*/
_imopVarPre343 = (_imopVarPre342 <= 1.0e-8);
/*[]*/
/*[]*/
if (_imopVarPre343) {
/*[]*/
/*[]*/
_imopVarPre346 = (sy - (-8.084072988043731e4)) / sy;
/*[]*/
_imopVarPre347 = fabs(_imopVarPre346);
/*[]*/
/*[]*/
_imopVarPre343 = (_imopVarPre347 <= 1.0e-8);
}
/*[]*/
/*[]*/
if (_imopVarPre343) {
/*[]*/
/*[]*/
verified = 1;
}
}
}
}
}
}
/*[]*/
int _imopVarPre350;
/*[]*/
double _imopVarPre351;
/*[]*/
_imopVarPre350 = 24 + 1;
/*[]*/
_imopVarPre351 = pow(2.0, _imopVarPre350);
/*[]*/
/*[]*/
Mops = _imopVarPre351 / tm / 1000000.0;
/*[]*/
printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, 24, gc, sx, sy);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 10 - 1; i++) {
/*[]*/
/*[]*/
double _imopVarPre353;
/*[]*/
_imopVarPre353 = q[i];
/*[]*/
printf("%3d %15.0f\n", i, _imopVarPre353);
/*[]*/
}
/*[]*/
int _imopVarPre355;
/*[]*/
_imopVarPre355 = 24 + 1;
/*[]*/
c_print_results("EP", 'S', _imopVarPre355, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp");
/*[]*/
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
double _imopVarPre357;
/*[]*/
_imopVarPre357 = timer_read(1);
/*[]*/
/*[]*/
printf("Total time: %f", _imopVarPre357);
/*[]*/
/*[]*/
double _imopVarPre359;
/*[]*/
_imopVarPre359 = timer_read(2);
/*[]*/
/*[]*/
printf("Gaussian pairs: %f", _imopVarPre359);
/*[]*/
/*[]*/
double _imopVarPre361;
/*[]*/
_imopVarPre361 = timer_read(3);
/*[]*/
/*[]*/
printf("Random numbers: %f", _imopVarPre361);
/*[]*/
}
}
|
commondraw.c
|
/********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : commondraw.c
* Description : common drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_commondraw_c__
#define __libaroma_commondraw_c__
#include <aroma_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Function : libaroma_draw_limit
* Return Value: int
* Descriptions: get limit position
*/
int libaroma_draw_limit(
int x, int max) {
if (x<0) {
return 0;
}
if (x>=max) {
return max-1;
}
return x;
} /* End of libaroma_draw_limit */
/*
* Function : libaroma_draw_limited
* Return Value: byte
* Descriptions: is draw position limited/overflow
*/
byte libaroma_draw_limited(
int x, int max) {
return ((x < 0) || (x >= max) ? 1 : 0);
} /* End of libaroma_draw_limited */
/*
* Function : libaroma_draw_ex2
* Return Value: byte
* Descriptions: canvas drawing
*/
byte libaroma_draw_ex2(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sw, int sh,
byte draw_flags,
byte opacity,
byte ismask,
word maskcolor
) {
if (src == NULL) {
ALOGW("libaroma_draw_ex1 src = NULL");
return 0;
}
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if ((dx >= dst->w) || (dy >= dst->h)) {
ALOGW("libaroma_draw_ex1 dx/dy bigger that destination size");
return 0;
}
if (opacity==0) {
return 1; /* No Need Any Process */
}
byte useAlpha = (draw_flags&LIBAROMA_DRAW_WITH_ALPHA)?1:0;
byte noDither = (draw_flags&LIBAROMA_DRAW_NODITHER)?1:0;
byte toBlack = (draw_flags&LIBAROMA_DRAW_TO_BLACK)?1:0;
/* fix positions */
if (sx < 0) {
dx += abs(sx);
sw -= abs(sx);
sx = 0;
}
if (sy < 0) {
dy += abs(sy);
sh -= abs(sy);
sy = 0;
}
/* fix size */
if (sw + sx >= src->w) {
sw -= (sw + sx) - src->w;
}
if (sh + sy >= src->h) {
sh -= (sh + sy) - src->h;
}
if ((sw <= 0) || (sh <= 0)) {
ALOGW("libaroma_draw_ex1 calculated sw/sh < 1");
return 0;
}
/* set calculated units */
int sr_w = sw;
int sr_h = sh;
int sr_x = sx;
int sr_y = sy;
int ds_x = dx;
int ds_y = dy;
/* fix destination */
if (dx < 0) {
int ndx = abs(dx);
sr_x += abs(ndx);
sr_w -= ndx;
ds_x = 0;
}
if (dy < 0) {
int ndy = abs(dy);
sr_y += ndy;
sr_h -= ndy;
ds_y = 0;
}
/* fix source size */
if (sr_w + dx > dst->w) {
sr_w -= (sr_w + dx) - dst->w;
}
if (sr_h + dy > dst->h) {
sr_h -= (sr_h + dy) - dst->h;
}
/* prepare loop data */
int y;
int pos_sr_x = sr_x * 2;
int pos_ds_x = ds_x * 2;
int pos_sc_w = src->l * 2;
int pos_dc_w = dst->l * 2;
int copy_sz = sr_w * 2;
byte * src_data = ((byte *) src->data);
byte * dst_data = ((byte *) dst->data);
if (useAlpha) {
if (src->alpha == NULL) {
useAlpha = 0;
}
}
if (!useAlpha){
ismask=0;
}
if (opacity == 0xff) {
if (useAlpha) {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data+((ds_y + y)*pos_dc_w)+pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, dst_mem, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (noDither){
libaroma_alpha_px(
sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
libaroma_alpha_px_line(
y, sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
}
}
}
else {
/* Copy Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
memcpy(
dst_data + ((ds_y + y)*pos_dc_w) + pos_ds_x,
src_data + ((sr_y + y)*pos_sc_w) + pos_sr_x,
copy_sz
);
}
}
}
else {
if (useAlpha) {
/* Blend Destination with Source */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp tmp_dst = (wordp) malloc(sr_w * 2);
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, tmp_dst, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (toBlack){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_black(sr_w, dst_mem, tmp_dst, opacity);
}
else if (noDither){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
libaroma_alpha_px_line(
y, sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
}
free(tmp_dst);
}
}
else {
/* Blend Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
wordp src_mem = (wordp) (src_data + ((sr_y + y) * pos_sc_w) + pos_sr_x);
if (toBlack){
libaroma_alpha_black(sr_w, dst_mem, src_mem, opacity);
}
else if (noDither){
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
else{
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
}
}
}
return 1;
} /* End of libaroma_draw_ex1 */
/*
* Function : libaroma_draw_rect
* Return Value: byte
* Descriptions: draw filled rectangle
*/
byte libaroma_draw_rect(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word color, byte alpha) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
/* check for valid x/y */
if (x > dst->w || y > dst->h){
ALOGW("libaroma_draw_rect x/y (%d/%d) greater than dest size (%dx%d)", x, y, dst->w, dst->h);
return 0;
}
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
/* fixed size */
w = x2 - x;
h = y2 - y;
/* draw */
int dy;
if (alpha == 0xff) {
wordp datapos = dst->data + x;
#ifdef libaroma_memset16
for (dy = y; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
libaroma_color_set(linepos,color,w);
}
#else
int w2=w*2;
wordp firstline = datapos + (y * dst->l);
libaroma_color_set(firstline, color, w);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y+1; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
memcpy(linepos,firstline,w2);
}
#endif
}
else {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y; dy < y2; dy++) {
wordp linepos = dst->data + (dy * dst->l) + x;
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(dy, w, linepos, linepos, color, alpha);
#else
libaroma_alpha_rgba_fill(w, linepos, linepos, color, alpha);
#endif
}
}
return 1;
} /* End of libaroma_draw_rect */
/*
* Function : libaroma_draw_rectangle
* Return Value: byte
* Descriptions: draw non-filled rectangle
*/
byte libaroma_draw_rectangle(
LIBAROMA_CANVASP dest,
int x, int y, int w, int h,
int thickness, int roundsize,
word color, byte alpha, byte aliased
){
if (dest==NULL) dest=libaroma_fb()->canvas;
if (w<1 || h<1) return 0;
int cornerw=0;
int cornerh=0;
if (roundsize){
cornerw=cornerh=roundsize;
int maxsz=(MIN(w, h))/2;
if (cornerw>maxsz) cornerw=maxsz;
if (cornerh>maxsz) cornerh=maxsz;
}
byte sizeOverflows=0;
if (x+thickness > ((x+w)-(thickness*2))){
sizeOverflows = 1;
}
if (y+thickness > ((y+h)-(thickness*2))){
sizeOverflows = 1;
}
if (sizeOverflows){ //just draw a filled rectangle
libaroma_draw_rect(dest, x, y, w, h, color, alpha);
return 1;
}
if (w > (cornerw*2) && h > (cornerh*2)){ /* draw lines */
/* top */
libaroma_draw_rect(dest, x+cornerw, y, w-(cornerw*2), thickness, color, alpha);
/* left */
libaroma_draw_rect(dest, x, y+cornerh, thickness, h-(cornerh*2), color, alpha);
/* right */
libaroma_draw_rect(dest, (x+w)-thickness, y+cornerh, thickness, h-(cornerh*2), color, alpha);
/* bottom */
libaroma_draw_rect(dest, x+cornerw, (y+h)-thickness, w-(cornerw*2), thickness, color, alpha);
}
if (cornerw && cornerh){ /* draw corners */
/* top, left */
libaroma_draw_arc(dest, cornerw+1, cornerh+1, cornerw+1, cornerh+1, thickness, 180.0, 270.0, color, alpha, 0, aliased?0.5:0.0);
/* top, right */
libaroma_draw_arc(dest, (x+w)-cornerw, cornerh+1, cornerw, cornerh+1, thickness, 270.0, 360.0, color, alpha, 0, aliased?0.5:0.0);
/* no antialiasing makes height more precise in bottom corners */
if (!aliased) cornerh+=1;
/* bottom, left */
libaroma_draw_arc(dest, cornerw+1, (y+h)-cornerh-1, cornerw+1, cornerh+1, thickness, 90.0, 180.0, color, alpha, 0, aliased?0.5:0.0);
/* bottom, right */
libaroma_draw_arc(dest, (x+w)-cornerw, (y+h)-cornerh-1, cornerw, cornerh+1, thickness, 0.0, 90.0, color, alpha, 0, aliased?0.5:0.0);
}
return 1;
} /* End of libaroma_draw_rectangle */
/*
* Function : libaroma_draw_skewed_rect
* Return Value: byte
* Descriptions: draw skewed rectangle
*/
byte libaroma_draw_skewed_rect(
LIBAROMA_CANVASP dest,
LIBAROMA_CANVASP src,
int x0, int y0, int x1, int y1,
int x2, int y2, int x3, int y3,
word color
){
if (!dest) dest=libaroma_fb()->canvas;
int ret=0;
LIBAROMA_PATHP path=libaroma_path(x0, y0);
if (path==NULL){
ALOGW("libaroma_draw_skewed_rect failed to alloc path");
return 0;
}
if (!libaroma_path_add(path, x1, y1)) goto exit;
if (!libaroma_path_add(path, x2, y2)) goto exit;
if (!libaroma_path_add(path, x3, y3)) goto exit;
if (!libaroma_path_draw_filled(dest, src, path, color, 0xFF, 0, 1)){
ALOGW("libaroma_draw_skewed_rect failed to draw path");
goto exit;
}
ret=1;
exit:
libaroma_path_free(path);
return ret;
} /* End of libaroma_draw_skewed_rect */
/*
* Function : libaroma_draw_pixel
* Return Value: byte
* Descriptions: draw pixel
*/
byte libaroma_draw_pixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
word color,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
wordp d=&dest->data[dest->l * dy + dx];
if (alpha==0xff){
*d = color;
}
else if (alpha>0){
*d = libaroma_alpha(*d,color,alpha);
}
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_alphapixel
* Return Value: byte
* Descriptions: set alpha pixel
*/
byte libaroma_draw_alphapixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
if (dest->alpha==NULL){
return 0;
}
dest->alpha[dest->l * dy + dx] = alpha;
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_copypixel
* Return Value: byte
* Descriptions: copy pixel color
*/
byte libaroma_draw_copypixel(
LIBAROMA_CANVASP dest, LIBAROMA_CANVASP src,
int dx, int dy, int sx, int sy
){
if (!dest || !src) return 0;
if ((dx<0)||(dy<0)||(sx<0)||(sy<0)||
(dy>=dest->h)||(dx>=dest->w)||(sy>=src->h)||(sx>=src->w)
){
return 0;
}
dest->data[(dest->l*dy)+dx] = src->data[(src->l*sy)+sx];
dest->data[(dest->l*dy)+dx+1] = src->data[(src->l*sy)+sx+1];
dest->data[(dest->l*dy)+dx+2] = src->data[(src->l*sy)+sx+2];
return 1;
} /* End of libaroma_draw_copypixel */
/*
* Function : libaroma_draw_copyalphapixel
* Return Value: byte
* Descriptions: copy pixel alpha
*/
byte libaroma_draw_copyalphapixel(
LIBAROMA_CANVASP dest, LIBAROMA_CANVASP src,
int dx, int dy, int sx, int sy
){
if (!dest || !src) return 0;
if (!src->alpha) return 0;
if (!dest->alpha) //initialize alpha for target canvas
dest->alpha = calloc(dest->s, 1);
if ((dx<0)||(dy<0)||(sx<0)||(sy<0)||
(dy>=dest->h)||(dx>=dest->w)||(sy>=src->h)||(sx>=src->w)
){
return 0;
}
dest->alpha[(dest->l*dy)+dx] = src->alpha[(src->l*sy)+sx];
return 1;
} /* End of libaroma_draw_copyalphapixel */
/*
* Function : libaroma_draw_line
* Return Value: byte
* Descriptions: draw line
*/
byte libaroma_draw_line(
LIBAROMA_CANVASP dest,
int x0, int y0, int x1, int y1,
float wd,
word color,
byte alpha,
byte is_mask){
#define __DRAW_PIX(x,y,a) \
if (is_mask==1){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(alpha,MAX(0, alpha * (1-(a)))) \
)) { break; } \
} \
else if (is_mask==2){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(0xff,MAX(0, 255 * (a))) \
)) { break; } \
} \
else{ \
if (!libaroma_draw_pixel( \
dest, x, y, color, \
MIN(0xff,MAX(0, alpha * (1-(a)))) \
)) { break; } \
}
if (!dest){
dest=libaroma_fb()->canvas;
}
int dx = abs(x1-x0), sx = x0 < x1 ? 1 : -1;
int dy = abs(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = dx-dy, e2, x2, y2;
float ed = dx+dy == 0 ? 1 : sqrt((float)dx*dx+(float)dy*dy);
for (wd = (wd+1)/2; ; ) {
if ((x0>=0)&&(y0>=0)){
__DRAW_PIX(x0,y0,
abs(err-dx+dy)/ed-wd+1
);
}
e2 = err; x2 = x0;
if (2*e2 >= -dx) {
for (e2 += dy, y2 = y0; e2 < ed*wd && (y1 != y2 || dx > dy); e2 += dx){
if ((x0>=0)&&(y2>=0)){
__DRAW_PIX(x0, y2+=sy,
abs(e2)/ed-wd+1
);
}
}
if (x0==x1){
break;
}
e2 = err; err -= dy; x0 += sx;
}
if (2*e2 <= dy){
for (e2 = dx-e2; e2 < ed*wd && (x1 != x2 || dx < dy); e2 += dy){
if ((x2>=0)&&(y0>=0)){
__DRAW_PIX(x2 += sx, y0,
abs(e2)/ed-wd+1
);
}
}
if (y0==y1){
break;
}
err += dx; y0 += sy;
}
}
#undef __DRAW_PIX
return 1;
} /* End of libaroma_draw_line */
/*
* Function : libaroma_draw_subpixel
* Return Value: byte
* Descriptions: draw subpixel
*/
byte libaroma_draw_subpixel(
LIBAROMA_CANVASP dest,
float dx, float dy, float tickness,
word color,
byte alpha){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<=-1)||(dy<=-1)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
int x, y;
float px, py;
float ht=(tickness-1.0)/2;
for (y=floor(dy-ht);y<=ceil(dy+ht);y++){
if ((y>=0)&&(y<dest->h)){
int pos = y * dest->l;
for (x=floor(dx-ht);x<=ceil(dx+ht);x++){
if ((x>=0)&&(x<dest->w)){
px = abs((dx<x)?dx-x:x-dx)/ht;
py = abs((dy<y)?dy-y:y-dy)/ht;
int alp = MIN(0xff,MAX((1-(px+py)) * 0xff,0));
wordp d = dest->data + pos + x;
word cl = libaroma_alpha(*d, color, alp);
if (alpha!=0xff){
cl=libaroma_alpha(*d,cl,alpha);
}
*d=cl;
}
}
}
}
return 1;
} /* End of libaroma_draw_subpixel */
/*
* Function : libaroma_draw_mask_circle
* Return Value: byte
* Descriptions: draw masked circle
*/
byte libaroma_draw_mask_circle(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (src == NULL) {
return 0;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
int psy = sy + y;
if ((pdy<dst->h)&&(pdy>=0)&&(psy<src->h)&&(psy>=0)){
int pos_d = pdy * dst->l;
int pos_s = psy * src->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (sx-x<0){
w-=abs(sx-x);
x=sx;
}
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
int sdx = sx-x;
if (sdx+w>src->w){
w=src->w-sdx;
}
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
wordp sd = src->data + pos_s + sdx;
if (alpha==0xff){
memcpy(dd,sd,w*2);
}
else{
//libaroma_alpha_const_line(pdy,w,dd,dd,sd,alpha);
libaroma_alpha_const(w,dd,dd,sd,alpha);
}
}
}
}
return 1;
} /* End of libaroma_draw_mask_circle */
/*
* Function : libaroma_draw_circle
* Return Value: byte
* Descriptions: draw filled circle
*/
byte libaroma_draw_circle(
LIBAROMA_CANVASP dst,
word color,
int dx, int dy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
if (alpha==0xff){
libaroma_color_set(dd,color,w);
}
else{
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(pdy,w,dd, dd,color,alpha);
#else
libaroma_alpha_rgba_fill(w,dd, dd,color,alpha);
#endif
}
}
}
}
return 1;
} /* End of libaroma_draw_circle */
/*
* Function : libaroma_draw_alpha_circle
* Return Value: byte
* Descriptions: draw alpha-filled circle
*/
byte libaroma_draw_alpha_circle(
LIBAROMA_CANVASP dst,
int dx, int dy,
int sz,
byte alpha){
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
int curx =pos_d + pdx;
int j;
for (j=0; j<w; j++){
dst->alpha[curx+j]=alpha;
}
}
}
}
return 1;
} /* End of libaroma_draw_alpha_circle */
/*
* Function : libaroma_draw_line_width
* Return Value: byte
* Descriptions: draw line with width
*/
byte libaroma_draw_line_width(
LIBAROMA_CANVASP dest,
float x1, float y1, float x2, float y2,
float wd,
word color,
byte alpha,
byte is_mask,
float aliasing){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
float angle = atan2(y2 - y1, x2 - x1);
float t2sina1 = wd / 2 * sin(angle);
float t2cosa1 = wd / 2 * cos(angle);
float t2sina2 = wd / 2 * sin(angle);
float t2cosa2 = wd / 2 * cos(angle);
LIBAROMA_PATHP path=libaroma_path(x1 + t2sina1, y1 - t2cosa1);
libaroma_path_add(path, x2 + t2sina2, y2 - t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x1 - t2sina1, y1 + t2cosa1);
libaroma_path_add(path, x1 + t2sina1, y1 - t2cosa1);
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_line_width */
/*
* Function : _libaroma_draw_arc_findpoint
* Return Value: byte
* Descriptions: find arc point
*/
byte _libaroma_draw_arc_findpoint(
LIBAROMA_PATHP path,
float dx, float dy,
float radius_w, float radius_h,
float xt0, float yt0,
float xt1, float yt1,
double start, double end
){
double radian;
if (start==end){
return 0;
}
else if (start<end){
radian = start + ((end - start) / 2.0);
}
else{
radian = end + ((start - end) / 2.0);
}
float xt = dx + radius_w*cos(radian);
float yt = dy + radius_h*sin(radian);
if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt0, yt0, xt, yt,
start, radian
);
}
libaroma_path_add(path, xt, yt);
if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt, yt, xt1, yt1,
radian, end
);
}
libaroma_path_add(path, xt1, yt1);
return 1;
} /* End of _libaroma_draw_arc_findpoint */
/*
* Function : libaroma_draw_arc
* Return Value: byte
* Descriptions: draw arc into canvas
*/
byte libaroma_draw_arc(
LIBAROMA_CANVASP dest,
float dx, float dy,
float radius_w, float radius_h,
float width,
float start_angle, float end_angle,
word color,byte alpha,byte is_mask,float aliasing
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
if (start_angle==end_angle){
/* no draw needed */
return 1;
}
/*
start_angle=fmod(start_angle,360);
end_angle=fmod(end_angle,360);
*/
/*
start_angle=360-start_angle;
end_angle=360-end_angle;
*/
if (start_angle>end_angle){
float tmp=start_angle;
start_angle=end_angle;
end_angle=tmp;
}
double start_radian = start_angle* __PI / 180.0;
double end_radian = end_angle * __PI / 180.0;
float start_x = dx + radius_w*cos(start_radian);
float start_y = dy + radius_h*sin(start_radian);
float end_x = dx + radius_w*cos(end_radian);
float end_y = dy + radius_h*sin(end_radian);
LIBAROMA_PATHP path=libaroma_path(start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
start_radian, end_radian
);
libaroma_path_add(path, end_x, end_y);
if ((width>0)&&(width<radius_w/2)&&(width<radius_h/2)) {
radius_w -= width;
radius_h -= width;
/* roll */
start_x = dx + radius_w*cos(end_radian);
start_y = dy + radius_h*sin(end_radian);
end_x = dx + radius_w*cos(start_radian);
end_y = dy + radius_h*sin(start_radian);
libaroma_path_add(path, start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
end_radian, start_radian
);
}
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_arc */
#ifdef __cplusplus
}
#endif
#endif /* __libaroma_commondraw_c__ */
|
main.c
|
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define mm 15
#define npart 4*mm*mm*mm
/*
* Function declarations
*/
void
dfill(int,double,double[],int);
void
domove(int,double[],double[],double[],double);
void
dscal(int,double,double[],int);
void
fcc(double[],int,int,double);
void
forces(int,double[],double[],double,double);
double
mkekin(int,double[],double[],double,double);
void
mxwell(double[],int,double,double);
void
prnout(int,double,double,double,double,double,double,int,double);
double
velavg(int,double[],double,double);
double
secnds(void);
/*
* Variable declarations
*/
double epot;
double vir;
double count;
/*
* Main program : Molecular Dynamics simulation.
*/
int main(){
int move;
double x[npart*3], vh[npart*3], f[npart*3];
double ekin;
double vel;
double sc;
double start, time;
/*
* Parameter definitions
*/
double den = 0.83134;
double side = pow((double)npart/den,0.3333333);
double tref = 0.722;
double rcoff = (double)mm/4.0;
double h = 0.064;
int irep = 10;
int istop = 20;
int iprint = 5;
int movemx = 20;
double a = side/(double)mm;
double hsq = h*h;
double hsq2 = hsq*0.5;
double tscale = 16.0/((double)npart-1.0);
double vaver = 1.13*sqrt(tref/24.0);
/*
* Initial output
*/
printf(" Molecular Dynamics Simulation example program\n");
printf(" ---------------------------------------------\n");
printf(" number of particles is ............ %6d\n",npart);
printf(" side length of the box is ......... %13.6f\n",side);
printf(" cut off is ........................ %13.6f\n",rcoff);
printf(" reduced temperature is ............ %13.6f\n",tref);
printf(" basic timestep is ................. %13.6f\n",h);
printf(" temperature scale interval ........ %6d\n",irep);
printf(" stop scaling at move .............. %6d\n",istop);
printf(" print interval .................... %6d\n",iprint);
printf(" total no. of steps ................ %6d\n",movemx);
/*
* Generate fcc lattice for atoms inside box
*/
fcc(x, npart, mm, a);
/*
* Initialise velocities and forces (which are zero in fcc positions)
*/
mxwell(vh, 3*npart, h, tref);
dfill(3*npart, 0.0, f, 1);
/*
* Start of md
*/
printf("\n i ke pe e temp "
" pres vel rp\n ----- ---------- ----------"
" ---------- -------- -------- -------- ----\n");
start = secnds();
#pragma omp parallel default(shared) private(move)
{
for (move=1; move<=movemx; move++) {
/*
* Move the particles and partially update velocities
*/
#pragma omp single
{
domove(3*npart, x, vh, f, side);
}
/*
* Compute forces in the new positions and accumulate the virial
* and potential energy.
*/
forces(npart, x, f, side, rcoff);
/*
* Scale forces, complete update of velocities and compute k.e.
*/
#pragma omp single
{
ekin=mkekin(npart, f, vh, hsq2, hsq);
/*
* Average the velocity and temperature scale if desired
*/
vel=velavg(npart, vh, vaver, h);
if (move<istop && fmod(move, irep)==0) {
sc=sqrt(tref/(tscale*ekin));
dscal(3*npart, sc, vh, 1);
ekin=tref/tscale;
}
/*
* Sum to get full potential energy and virial
*/
if (fmod(move, iprint)==0)
prnout(move, ekin, epot, tscale, vir, vel, count, npart, den);
}
}
}
time = secnds() - start;
printf("Time = %f\n",(float) time);
}
double secnds()
{
return omp_get_wtime();
}
|
threshold.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
#if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
#include "MagickCore/threshold-map.h"
#else
static const char *const
BuiltinMap=
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
#endif
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
const Quantum
*magick_restrict p,
*magick_restrict pixels;
Quantum
*magick_restrict q;
ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse)
(void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorThresholdImage() forces all pixels in the color range to white
% otherwise black.
%
% The format of the ColorThresholdImage method is:
%
% MagickBooleanType ColorThresholdImage(Image *image,
% const PixelInfo *start_color,const PixelInfo *stop_color,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o start_color, stop_color: define the start and stop color range. Any
% pixel within the range returns white otherwise black.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorThresholdImage(Image *image,
const PixelInfo *start_color,const PixelInfo *stop_color,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
start,
stop;
ssize_t
y;
/*
Color threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=AcquireImageColormap(image,2,exception);
if (status == MagickFalse)
return(status);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
start=(*start_color);
stop=(*stop_color);
switch (image->colorspace)
{
case HCLColorspace:
{
ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case LabColorspace:
{
ConvertRGBToLab(start_color->red,start_color->green,start_color->blue,
illuminant,&start.red,&start.green,&start.blue);
ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue,
illuminant,&stop.red,&stop.green,&stop.blue);
break;
}
default:
{
start.red*=QuantumScale;
start.green*=QuantumScale;
start.blue*=QuantumScale;
stop.red*=QuantumScale;
stop.green*=QuantumScale;
stop.blue*=QuantumScale;
break;
}
}
start.red*=QuantumRange;
start.green*=QuantumRange;
start.blue*=QuantumRange;
stop.red*=QuantumRange;
stop.green*=QuantumRange;
stop.blue*=QuantumRange;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickBooleanType
foreground = MagickTrue;
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((q[i] < GetPixelInfoChannel(&start,channel)) ||
(q[i] > GetPixelInfoChannel(&stop,channel)))
foreground=MagickFalse;
}
SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->colorspace=sRGBColorspace;
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels to dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with an ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i,
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j,
n;
n=0;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[j]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[j]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=(Quantum) 0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=(Quantum) 0;
else
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GeneralMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(Index shift=0; shift<threads; ++shift)
{
Index i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
#pragma omp critical
{
for(Index i=0; i<threads; ++i)
#pragma omp atomic
--(info[i].users);
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
EIGEN_ALIGN_DEFAULT LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_DEFAULT RhsScalar m_staticB[SizeB];
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index m = this->m_mc;
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::evalTo(dst, lhs, rhs);
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::addTo(dst, lhs, rhs);
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::subTo(dst, lhs, rhs);
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
private.c
|
#include <omp.h>
#include <stdio.h>
int alpha[10],beta[10],i;
#pragma omp threadprivate(alpha)
main()
{
/* first parallel region */
#pragma omp parallel private(i,beta)
{ int id ;
id = omp_get_thread_num();
for(i=0;i<10;i++)
alpha[i] = beta[i] = id * i;
}
/* second parallel region */
#pragma omp parallel
printf("I am thread %d :alpha[3] = %d and beta[3] = %d\n", omp_get_thread_num(),alpha[3],beta[3]);
}
|
laplace2d.c
|
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
#pragma acc data copy(A), create(Anew)
while ( error > tol && iter < iter_max )
{
error = 0.0;
//#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
//#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
Example_copyin.1.c
|
/*
* @@name: copyin.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
#include <stdlib.h>
float* work;
int size;
float tol;
#pragma omp threadprivate(work,size,tol)
void build()
{
int i;
work = (float*)malloc( sizeof(float)*size );
for( i = 0; i < size; ++i ) work[i] = tol;
}
void copyin_example( float t, int n )
{
tol = t;
size = n;
#pragma omp parallel copyin(tol,size)
{
build();
}
}
|
gmm.c
|
/*
* gmm.c
*
* Contains definitions of functions for training
* Gaussian Mixture Models
*
* Copyright (C) 2015 Sai Nitish Satyavolu
*/
#include "gmm.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef MEX_COMPILE
#include "mex.h"
#define IPrintf mexPrintf
#else
#define IPrintf printf
#endif
#define PI 3.14159265359
GMM* gmm_new(int M, int D, const char *cov_type)
{
GMM *gmm = malloc(sizeof(GMM));
// Set GMM settings
gmm->M = M;
gmm->D = D;
gmm->num_max_iter = 1000;
gmm->converged = 0;
gmm->tol = 0.000001;
gmm->reg = 0.001;
gmm->init_method = RANDOM;
if (strcmp(cov_type, "diagonal") == 0)
gmm->cov_type = DIAGONAL;
else if (strcmp(cov_type, "spherical") == 0)
gmm->cov_type = SPHERICAL;
else
{
IPrintf("WARNING: Invalid cov_type specified. Defaulting to DIAGONAL.\n");
gmm->cov_type = DIAGONAL;
}
// Allocate memory for GMM parameters
gmm->weights = malloc(gmm->M*sizeof(double));
gmm->means = malloc(gmm->M*sizeof(double *));
gmm->covars = malloc(gmm->M*sizeof(double *));
for (int k=0; k<gmm->M; k++)
{
gmm->means[k] = malloc(gmm->D*sizeof(double));
if (gmm->cov_type == DIAGONAL)
gmm->covars[k] = malloc(gmm->D*sizeof(double));
else if (gmm->cov_type == SPHERICAL)
gmm->covars[k] = malloc(1*sizeof(double));
}
return gmm;
}
void gmm_set_max_iter(GMM *gmm, int num_max_iter)
{
gmm->num_max_iter = num_max_iter;
}
void gmm_set_convergence_tol(GMM *gmm, double tol)
{
gmm->tol = tol;
}
void gmm_set_regularization_value(GMM *gmm, double reg)
{
gmm->reg = reg;
}
void gmm_set_initialization_method(GMM *gmm, const char *method)
{
if (strcmp(method, "random") == 0)
gmm->init_method = RANDOM;
else if (strcmp(method, "kmeans") == 0)
gmm->init_method = KMEANS;
else
{
IPrintf("WARNING: Invalid init_method specified. Defaulting to RANDOM.\n");
gmm->init_method = RANDOM;
}
}
void gmm_fit(GMM *gmm, const double *X, int N)
{
// Initialize GMM parameters
_gmm_init_params(gmm, X, N);
// Allocate memory for storing membership probabilities P(k | x_t)
gmm->P_k_giv_xt = malloc(gmm->M*sizeof(double *));
for (int k = 0; k < gmm->M; k++)
gmm->P_k_giv_xt[k] = malloc(N*sizeof(double));
// EM iterations
double llh = 0, llh_prev = 0;
for (int i_iter = 0; i_iter < gmm->num_max_iter; i_iter++)
{
// Perform one EM step
llh_prev = llh;
llh = _gmm_em_step(gmm, X, N);
//if (i_iter%20 == 0)
IPrintf("Iter = %d, LLH = %lf\n", i_iter+1, llh);
if ((llh > 0) == 0 && (llh <= 0) == 0)
{
IPrintf("WARNING: Encountered NaN value at iteration: %d\n", i_iter+1);
gmm_print_params(gmm);
break;
}
// Check for convergence
if (i_iter > 2 && fabs((llh - llh_prev)/llh_prev) < gmm->tol)
{
gmm->converged = 1;
IPrintf("EM algorithm converged after %d iterations.\n", i_iter+1);
break;
}
}
// Free memory used for storing membership probabilities
for (int k = 0; k < gmm->M; k++)
free(gmm->P_k_giv_xt[k]);
free(gmm->P_k_giv_xt);
}
double gmm_score(GMM *gmm, const double *X, int N)
{
// Allocate memory for storing membership probabilities P(k | x_t)
gmm->P_k_giv_xt = malloc(gmm->M*sizeof(double *));
for (int k = 0; k < gmm->M; k++)
gmm->P_k_giv_xt[k] = malloc(N*sizeof(double));
// Compute log likellihood
double llh = _gmm_compute_membership_prob(gmm, X, N);
// Free memory used for storing membership probabilities
for (int k = 0; k < gmm->M; k++)
free(gmm->P_k_giv_xt[k]);
free(gmm->P_k_giv_xt);
return llh;
}
// TODO: Other initialization methods
void _gmm_init_params(GMM *gmm, const double *X, int N)
{
if (gmm->init_method == RANDOM)
{
// Random initialization
_gmm_init_params_random(gmm, X, N);
}
else if (gmm->init_method == KMEANS)
{
// K-means initialization
_gmm_init_params_kmeans(gmm, X, N);
}
else
{
// Default is random initialization
_gmm_init_params_random(gmm, X, N);
}
}
// TODO: Unique sampling of data points for initializing component means
void _gmm_init_params_random(GMM *gmm, const double *X, int N)
{
// Initialize means to randomly chosen samples
srand(time(NULL));
for (int k=0; k<gmm->M; k++)
{
int r = rand()%N;
memcpy(gmm->means[k], &X[gmm->D*r], gmm->D*sizeof(double));
}
// Initialize component weights to same value
for (int k=0; k<gmm->M; k++)
gmm->weights[k] = 1.0/gmm->M;
// Initialize component variances to data variance
double *mean = calloc(gmm->D, sizeof(double));
for (int t=0; t<N; t++)
_gmm_vec_add(mean, &X[gmm->D*t], 1, 1, gmm->D);
_gmm_vec_divide_by_scalar(mean, N, gmm->D);
if (gmm->cov_type == DIAGONAL)
{
double *vars = malloc(gmm->D*sizeof(double));
for (int i=0; i<gmm->D; i++)
{
vars[i] = 0;
for (int t=0; t<N; t++)
vars[i] += _gmm_pow2(X[gmm->D*t+i] - mean[i]);
vars[i] = vars[i]/N;
}
for (int k=0; k<gmm->M; k++)
memcpy(gmm->covars[k], vars, gmm->D*sizeof(double));
free(vars);
}
else if (gmm->cov_type == SPHERICAL)
{
double var = 0;
for (int t=0; t<N; t++)
var += _gmm_pow2(_gmm_vec_l2_dist(&X[gmm->D*t], mean, gmm->D));
var = var/(N*gmm->D);
for (int k=0; k<gmm->M; k++)
gmm->covars[k][0] = var;
}
// Fre memory used for storing mean
free(mean);
}
// TODO: Handle empty clusters in K-means
// TODO: Unique sampling of data points for initializing component means
// TODO: Make K-means more efficient
void _gmm_init_params_kmeans(GMM *gmm, const double *X, int N)
{
const int num_iter = 10;
// Initialize means to randomly chosen samples
srand(time(NULL));
for (int k=0; k<gmm->M; k++)
{
int r = rand()%N;
memcpy(gmm->means[k], &X[gmm->D*r], gmm->D*sizeof(double));
}
// K-means iterative algorithm
int *associations = malloc(N*sizeof(int));
for (int i_iter = 0; i_iter < num_iter; i_iter++)
{
IPrintf(".");
// Find assiciation of each data point
for (int t = 0; t < N; t++)
{
double min_dist = _gmm_vec_l2_dist(&X[gmm->D*t], gmm->means[0], gmm->D);
associations[t] = 0;
for (int k=1; k<gmm->M; k++)
{
double dist = _gmm_vec_l2_dist(&X[gmm->D*t], gmm->means[k], gmm->D);
if (dist < min_dist)
{
min_dist = dist;
associations[t] = k;
}
}
}
// Update mean of each cluster
for (int k=0; k<gmm->M; k++)
{
memset(gmm->means[k], 0, gmm->D*sizeof(double));
int nk = 0;
for (int t=0; t<N; t++)
{
if (associations[t] == k)
{
nk++;
_gmm_vec_add(gmm->means[k], &X[gmm->D*t], 1, 1, gmm->D);
}
}
_gmm_vec_divide_by_scalar(gmm->means[k], nk, gmm->D);
}
}
IPrintf("\n");
// Initialize component weights to fraction of associations
memset(gmm->weights, 0, gmm->M*sizeof(double));
for (int t=0; t<N; t++)
gmm->weights[associations[t]] += 1.0/N;
// Initialize component variances to variances in each cluster
for (int k=0; k<gmm->M; k++)
{
int nk = 0;
if (gmm->cov_type == SPHERICAL)
gmm->covars[k][0] = 0;
else if (gmm->cov_type == DIAGONAL)
memset(gmm->covars[k], 0, gmm->D*sizeof(double));
for (int t=0; t<N; t++)
{
if (associations[t] == k)
{
nk++;
if (gmm->cov_type == SPHERICAL)
gmm->covars[k][0] += _gmm_pow2(_gmm_vec_l2_dist(&X[gmm->D*t], gmm->means[k], gmm->D));
else if (gmm->cov_type == DIAGONAL)
{
for (int i=0; i<gmm->D; i++)
gmm->covars[k][i] += _gmm_pow2(X[gmm->D*t+i] - gmm->means[k][i]);
}
}
}
if (gmm->cov_type == SPHERICAL)
{
gmm->covars[k][0] = gmm->covars[k][0]/(nk*gmm->D);
if (gmm->covars[k][0] < gmm->reg)
gmm->covars[k][0] = gmm->reg;
}
else if (gmm->cov_type == DIAGONAL)
{
_gmm_vec_divide_by_scalar(gmm->covars[k], nk, gmm->D);
for (int i=0; i<gmm->D; i++)
{
if (gmm->covars[k][i] < gmm->reg)
gmm->covars[k][i] = gmm->reg;
}
}
}
// Fre memory used for storing associations
free(associations);
}
double _gmm_em_step(GMM *gmm, const double *X, int N)
{
double llh;
/* ---------------------------------------------- Expectation step */
// Compute membership probabilities
llh = _gmm_compute_membership_prob(gmm, X, N);
/* --------------------------------------------- Maximization step */
// Update GMM parameters
_gmm_update_params(gmm, X, N);
return llh;
}
double _gmm_compute_membership_prob(GMM *gmm, const double *X, int N)
{
double llh = 0;
// Populate the matrix with log(P(k | xt, gmm))
#pragma omp parallel for reduction(+:llh)
for (int t = 0; t < N; t++)
{
double max = -1;
for (int k = 0; k < gmm->M; k++)
{
gmm->P_k_giv_xt[k][t] = log(gmm->weights[k]) + _gmm_log_gaussian_pdf(&X[gmm->D*t], gmm->means[k], gmm->covars[k], gmm->D, gmm->cov_type);
if (gmm->P_k_giv_xt[k][t] > max)
max = gmm->P_k_giv_xt[k][t];
}
double llh_t = 0;
for (int k=0; k<gmm->M; k++)
llh_t += exp(gmm->P_k_giv_xt[k][t] - max);
llh_t = max + log(llh_t);
for (int k = 0; k < gmm->M; k++)
{
gmm->P_k_giv_xt[k][t] = exp(gmm->P_k_giv_xt[k][t] - llh_t);
}
llh += llh_t/N;
}
return llh;
}
void _gmm_update_params(GMM *gmm, const double *X, int N)
{
if (gmm->cov_type == SPHERICAL)
{
#pragma omp parallel for
for (int k=0; k<gmm->M; k++)
{
double sum_P_k = 0;
double sum_xxP_k = 0;
memset(gmm->means[k], 0, gmm->D*sizeof(double));
for (int t=0; t<N; t++)
{
sum_P_k += gmm->P_k_giv_xt[k][t];
sum_xxP_k += _gmm_vec_dot_prod(&X[gmm->D*t], &X[gmm->D*t], gmm->D) * gmm->P_k_giv_xt[k][t];
_gmm_vec_add(gmm->means[k], &X[gmm->D*t], 1, gmm->P_k_giv_xt[k][t], gmm->D);
}
_gmm_vec_divide_by_scalar(gmm->means[k], sum_P_k, gmm->D);
gmm->weights[k] = sum_P_k/N;
gmm->covars[k][0] = (sum_xxP_k/sum_P_k - _gmm_vec_dot_prod(gmm->means[k], gmm->means[k], gmm->D))/gmm->D;
if (gmm->covars[k][0] < gmm->reg)
gmm->covars[k][0] = gmm->reg;
}
}
else if (gmm->cov_type == DIAGONAL)
{
#pragma omp parallel for
for (int k=0; k<gmm->M; k++)
{
double sum_P_k = 0;
memset(gmm->means[k], 0, gmm->D*sizeof(double));
for (int t=0; t<N; t++)
{
sum_P_k += gmm->P_k_giv_xt[k][t];
_gmm_vec_add(gmm->means[k], &X[gmm->D*t], 1, gmm->P_k_giv_xt[k][t], gmm->D);
}
gmm->weights[k] = sum_P_k/N;
_gmm_vec_divide_by_scalar(gmm->means[k], sum_P_k, gmm->D);
memset(gmm->covars[k], 0, gmm->D*sizeof(double));
for (int t=0; t<N; t++)
{
for (int i=0; i<gmm->D; i++)
gmm->covars[k][i] += gmm->P_k_giv_xt[k][t]*_gmm_pow2(X[gmm->D*t+i] - gmm->means[k][i]);
}
_gmm_vec_divide_by_scalar(gmm->covars[k], sum_P_k, gmm->D);
for (int i=0; i<gmm->D; i++)
{
if (gmm->covars[k][i] < gmm->reg)
gmm->covars[k][i] = gmm->reg;
}
}
}
}
// TODO: Pre-compute det of covariance matrix
double _gmm_log_gaussian_pdf(const double *x, const double *mean, const double *covar, int D, CovType cov_type)
{
double result = 0;
if (cov_type == SPHERICAL)
result = -0.5 * D * log(2*PI*covar[0]) - _gmm_pow2(_gmm_vec_l2_dist(x, mean, D))/(2*covar[0]);
else if (cov_type == DIAGONAL)
{
double det = 1;
for (int i=0; i<D; ++i)
det *= covar[i];
result = -0.5 * D * log(2*PI) - 0.5 * log(det);
for (int i=0; i<D; ++i)
result -= _gmm_pow2(x[i] - mean[i])/(2*covar[i]);
}
return result;
}
double _gmm_vec_l2_dist(const double *x, const double *y, int D)
{
double l2_dist_sq = 0;
for (int i=0; i<D; i++)
{
l2_dist_sq += _gmm_pow2(x[i] - y[i]);
}
return(sqrt(l2_dist_sq));
}
void _gmm_vec_add(double *x, const double *y, double a, double b, int D)
{
for (int i=0; i<D; i++)
x[i] = a*x[i] + b*y[i];
}
void _gmm_vec_divide_by_scalar(double *x, double a, int D)
{
for (int i=0; i<D; i++)
x[i] = x[i]/a;
}
double _gmm_vec_dot_prod(const double *x, const double *y, int D)
{
double prod = 0;
for (int i=0; i<D; i++)
prod += x[i]*y[i];
return prod;
}
double _gmm_pow2(double x)
{
return x*x;
}
void gmm_print_params(const GMM *gmm)
{
for (int k=0; k<gmm->M; k++)
{
IPrintf("Component: %d\n", k+1);
IPrintf("Weight: %lf\n", gmm->weights[k]);
if (gmm->D < 50)
{
IPrintf("Mean: ");
for (int i=0; i<gmm->D; i++)
IPrintf("%lf, ", gmm->means[k][i]);
IPrintf("\n");
}
if (gmm->cov_type == SPHERICAL)
IPrintf("Var: %lf\n", gmm->covars[k][0]);
else if (gmm->cov_type == DIAGONAL)
{
IPrintf("Var: ");
int i=0;
for (; i<5 && i<gmm->D; i++)
IPrintf("%lf, ", gmm->covars[k][i]);
if (i < gmm->D)
IPrintf("...");
IPrintf("\n");
}
IPrintf("\n");
}
}
void gmm_free(GMM *gmm)
{
free(gmm->weights);
for (int k=0; k<gmm->M; k++)
{
free(gmm->means[k]);
free(gmm->covars[k]);
}
free(gmm->means);
free(gmm->covars);
free(gmm);
}
|
image.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->alpha_trait=image->alpha_trait;
clone_image->channels=image->channels;
clone_image->mask_trait=image->mask_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
switch (type)
{
case ReadPixelMask:
{
if ((image->channels & ReadMaskChannel) == 0)
return((Image *) NULL);
break;
}
case WritePixelMask:
{
if ((image->channels & WriteMaskChannel) == 0)
return((Image *) NULL);
break;
}
default:
{
if ((image->channels & CompositeMaskChannel) == 0)
return((Image *) NULL);
break;
}
}
mask_image=AcquireImage((ImageInfo *) NULL,exception);
status=SetImageExtent(mask_image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(mask_image));
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case ReadPixelMask:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MagickPathExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),option,(size_t)
(MagickPathExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(option)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
size_t
length;
ssize_t
y;
void
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset(pixels,0,length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum));
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: the level of transparency: 0 is fully transparent and QuantumRange
% is fully opaque.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType))))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*component != '\0') && (IsGlob(component) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,component);
else
GetPathComponent(image_info->filename,SubcanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to seekable temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) memset(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic cache.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelCompositeMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=QuantumRange;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=(Quantum) 0;
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,pixel,q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelCompositeMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
mm.best-par.c
|
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define M NCONT
#define N NCONT
#define K CONT
double A[M][K];
double B[K][N];
double C[M][N];
void init_arrays()
{
int i1, i2;
for (i1=0; i1<M; i1++)
for (i2=0; i2<K; i2++)
A[i1][i2] = (i1+i2) % 5 + 1;
for (i1=0; i1<K; i1++)
for (i2=0; i2<N; i2++)
B[i1][i2] = (i1+i2) % 5 + 1;
for (i1=0; i1<M; i1++)
for (i2=0; i2<N; i2++)
C[i1][i2] = 0;
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
int i, j, k;
int ii, jj, kk;
int iii, jjj, kkk;
{
double C_copy[128][64];
double B_copy;
double A_copy[256][128];
register int cbv_1;
cbv_1=K-1;
#pragma omp parallel for private(iii,jjj,kkk,ii,jj,kk,i,j,k,A_copy,B_copy,C_copy)
for (kkk=0; kkk<=cbv_1; kkk=kkk+512) {
for (iii=0; iii<=M-1; iii=iii+256) {
for (jjj=0; jjj<=N-1; jjj=jjj+1024) {
for (kk=kkk; kk<=min(K-1,kkk+256); kk=kk+256) {
for (ii=iii; ii<=min(M-1,iii+128); ii=ii+128) {
for (k=kk; k<=min(K-1,kk+255); k=k+1)
for (i=ii; i<=min(M-1,ii+127); i=i+1)
A_copy[(k-kk)][(i-ii)]=A[i][k];
for (jj=jjj; jj<=min(N-1,jjj+960); jj=jj+64) {
for (i=ii; i<=min(M-1,ii+127); i=i+1)
for (j=jj; j<=min(N-1,jj+63); j=j+1)
C_copy[(i-ii)][(j-jj)]=C[i][j];
for (k=kk; k<=min(K-1,kk+255)-7; k=k+8) {
for (i=ii; i<=min(M-1,ii+127)-7; i=i+8) {
register int cbv_2;
cbv_2=min(N-1,jj+63);
#pragma ivdep
#pragma vector always
for (j=jj; j<=cbv_2; j=j+1) {
double scv_1, scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8;
double scv_9, scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16;
scv_1=B[k][j];
scv_2=B[(k+6)][j];
scv_3=B[(k+5)][j];
scv_4=C_copy[(i-ii+4)][(j-jj)];
scv_5=C_copy[(i-ii+2)][(j-jj)];
scv_6=B[(k+4)][j];
scv_7=C_copy[(i-ii+3)][(j-jj)];
scv_8=C_copy[(i-ii+6)][(j-jj)];
scv_9=B[(k+3)][j];
scv_10=C_copy[(i-ii+5)][(j-jj)];
scv_11=C_copy[(i-ii+1)][(j-jj)];
scv_12=B[(k+1)][j];
scv_13=C_copy[(i-ii+7)][(j-jj)];
scv_14=B[(k+2)][j];
scv_15=B[(k+7)][j];
scv_16=C_copy[(i-ii)][(j-jj)];
scv_16=scv_16+A_copy[(k-kk)][(i-ii)]*scv_1;
scv_11=scv_11+A_copy[(k-kk)][(i-ii+1)]*scv_1;
scv_5=scv_5+A_copy[(k-kk)][(i-ii+2)]*scv_1;
scv_7=scv_7+A_copy[(k-kk)][(i-ii+3)]*scv_1;
scv_4=scv_4+A_copy[(k-kk)][(i-ii+4)]*scv_1;
scv_10=scv_10+A_copy[(k-kk)][(i-ii+5)]*scv_1;
scv_8=scv_8+A_copy[(k-kk)][(i-ii+6)]*scv_1;
scv_13=scv_13+A_copy[(k-kk)][(i-ii+7)]*scv_1;
scv_16=scv_16+A_copy[(k-kk+1)][(i-ii)]*scv_12;
scv_11=scv_11+A_copy[(k-kk+1)][(i-ii+1)]*scv_12;
scv_5=scv_5+A_copy[(k-kk+1)][(i-ii+2)]*scv_12;
scv_7=scv_7+A_copy[(k-kk+1)][(i-ii+3)]*scv_12;
scv_4=scv_4+A_copy[(k-kk+1)][(i-ii+4)]*scv_12;
scv_10=scv_10+A_copy[(k-kk+1)][(i-ii+5)]*scv_12;
scv_8=scv_8+A_copy[(k-kk+1)][(i-ii+6)]*scv_12;
scv_13=scv_13+A_copy[(k-kk+1)][(i-ii+7)]*scv_12;
scv_16=scv_16+A_copy[(k-kk+2)][(i-ii)]*scv_14;
scv_11=scv_11+A_copy[(k-kk+2)][(i-ii+1)]*scv_14;
scv_5=scv_5+A_copy[(k-kk+2)][(i-ii+2)]*scv_14;
scv_7=scv_7+A_copy[(k-kk+2)][(i-ii+3)]*scv_14;
scv_4=scv_4+A_copy[(k-kk+2)][(i-ii+4)]*scv_14;
scv_10=scv_10+A_copy[(k-kk+2)][(i-ii+5)]*scv_14;
scv_8=scv_8+A_copy[(k-kk+2)][(i-ii+6)]*scv_14;
scv_13=scv_13+A_copy[(k-kk+2)][(i-ii+7)]*scv_14;
scv_16=scv_16+A_copy[(k-kk+3)][(i-ii)]*scv_9;
scv_11=scv_11+A_copy[(k-kk+3)][(i-ii+1)]*scv_9;
scv_5=scv_5+A_copy[(k-kk+3)][(i-ii+2)]*scv_9;
scv_7=scv_7+A_copy[(k-kk+3)][(i-ii+3)]*scv_9;
scv_4=scv_4+A_copy[(k-kk+3)][(i-ii+4)]*scv_9;
scv_10=scv_10+A_copy[(k-kk+3)][(i-ii+5)]*scv_9;
scv_8=scv_8+A_copy[(k-kk+3)][(i-ii+6)]*scv_9;
scv_13=scv_13+A_copy[(k-kk+3)][(i-ii+7)]*scv_9;
scv_16=scv_16+A_copy[(k-kk+4)][(i-ii)]*scv_6;
scv_11=scv_11+A_copy[(k-kk+4)][(i-ii+1)]*scv_6;
scv_5=scv_5+A_copy[(k-kk+4)][(i-ii+2)]*scv_6;
scv_7=scv_7+A_copy[(k-kk+4)][(i-ii+3)]*scv_6;
scv_4=scv_4+A_copy[(k-kk+4)][(i-ii+4)]*scv_6;
scv_10=scv_10+A_copy[(k-kk+4)][(i-ii+5)]*scv_6;
scv_8=scv_8+A_copy[(k-kk+4)][(i-ii+6)]*scv_6;
scv_13=scv_13+A_copy[(k-kk+4)][(i-ii+7)]*scv_6;
scv_16=scv_16+A_copy[(k-kk+5)][(i-ii)]*scv_3;
scv_11=scv_11+A_copy[(k-kk+5)][(i-ii+1)]*scv_3;
scv_5=scv_5+A_copy[(k-kk+5)][(i-ii+2)]*scv_3;
scv_7=scv_7+A_copy[(k-kk+5)][(i-ii+3)]*scv_3;
scv_4=scv_4+A_copy[(k-kk+5)][(i-ii+4)]*scv_3;
scv_10=scv_10+A_copy[(k-kk+5)][(i-ii+5)]*scv_3;
scv_8=scv_8+A_copy[(k-kk+5)][(i-ii+6)]*scv_3;
scv_13=scv_13+A_copy[(k-kk+5)][(i-ii+7)]*scv_3;
scv_16=scv_16+A_copy[(k-kk+6)][(i-ii)]*scv_2;
scv_11=scv_11+A_copy[(k-kk+6)][(i-ii+1)]*scv_2;
scv_5=scv_5+A_copy[(k-kk+6)][(i-ii+2)]*scv_2;
scv_7=scv_7+A_copy[(k-kk+6)][(i-ii+3)]*scv_2;
scv_4=scv_4+A_copy[(k-kk+6)][(i-ii+4)]*scv_2;
scv_10=scv_10+A_copy[(k-kk+6)][(i-ii+5)]*scv_2;
scv_8=scv_8+A_copy[(k-kk+6)][(i-ii+6)]*scv_2;
scv_13=scv_13+A_copy[(k-kk+6)][(i-ii+7)]*scv_2;
scv_16=scv_16+A_copy[(k-kk+7)][(i-ii)]*scv_15;
scv_11=scv_11+A_copy[(k-kk+7)][(i-ii+1)]*scv_15;
scv_5=scv_5+A_copy[(k-kk+7)][(i-ii+2)]*scv_15;
scv_7=scv_7+A_copy[(k-kk+7)][(i-ii+3)]*scv_15;
scv_4=scv_4+A_copy[(k-kk+7)][(i-ii+4)]*scv_15;
scv_10=scv_10+A_copy[(k-kk+7)][(i-ii+5)]*scv_15;
scv_8=scv_8+A_copy[(k-kk+7)][(i-ii+6)]*scv_15;
scv_13=scv_13+A_copy[(k-kk+7)][(i-ii+7)]*scv_15;
C_copy[(i-ii+4)][(j-jj)]=scv_4;
C_copy[(i-ii+2)][(j-jj)]=scv_5;
C_copy[(i-ii+3)][(j-jj)]=scv_7;
C_copy[(i-ii+6)][(j-jj)]=scv_8;
C_copy[(i-ii+5)][(j-jj)]=scv_10;
C_copy[(i-ii+1)][(j-jj)]=scv_11;
C_copy[(i-ii+7)][(j-jj)]=scv_13;
C_copy[(i-ii)][(j-jj)]=scv_16;
}
}
for (; i<=min(M-1,ii+127); i=i+1) {
register int cbv_3;
cbv_3=min(N-1,jj+63);
#pragma ivdep
#pragma vector always
for (j=jj; j<=cbv_3; j=j+1) {
double scv_17;
scv_17=C_copy[(i-ii)][(j-jj)];
scv_17=scv_17+A_copy[(k-kk)][(i-ii)]*B[k][j];
scv_17=scv_17+A_copy[(k-kk+1)][(i-ii)]*B[(k+1)][j];
scv_17=scv_17+A_copy[(k-kk+2)][(i-ii)]*B[(k+2)][j];
scv_17=scv_17+A_copy[(k-kk+3)][(i-ii)]*B[(k+3)][j];
scv_17=scv_17+A_copy[(k-kk+4)][(i-ii)]*B[(k+4)][j];
scv_17=scv_17+A_copy[(k-kk+5)][(i-ii)]*B[(k+5)][j];
scv_17=scv_17+A_copy[(k-kk+6)][(i-ii)]*B[(k+6)][j];
scv_17=scv_17+A_copy[(k-kk+7)][(i-ii)]*B[(k+7)][j];
C_copy[(i-ii)][(j-jj)]=scv_17;
}
}
}
for (; k<=min(K-1,kk+255); k=k+1) {
for (i=ii; i<=min(M-1,ii+127)-7; i=i+8) {
register int cbv_4;
cbv_4=min(N-1,jj+63);
#pragma ivdep
#pragma vector always
for (j=jj; j<=cbv_4; j=j+1) {
double scv_18, scv_19, scv_20, scv_21, scv_22, scv_23, scv_24, scv_25;
double scv_26;
scv_18=C_copy[(i-ii+7)][(j-jj)];
scv_19=B[k][j];
scv_20=C_copy[(i-ii+5)][(j-jj)];
scv_21=C_copy[(i-ii+3)][(j-jj)];
scv_22=C_copy[(i-ii+4)][(j-jj)];
scv_23=C_copy[(i-ii+6)][(j-jj)];
scv_24=C_copy[(i-ii+1)][(j-jj)];
scv_25=C_copy[(i-ii)][(j-jj)];
scv_26=C_copy[(i-ii+2)][(j-jj)];
scv_25=scv_25+A_copy[(k-kk)][(i-ii)]*scv_19;
scv_24=scv_24+A_copy[(k-kk)][(i-ii+1)]*scv_19;
scv_26=scv_26+A_copy[(k-kk)][(i-ii+2)]*scv_19;
scv_21=scv_21+A_copy[(k-kk)][(i-ii+3)]*scv_19;
scv_22=scv_22+A_copy[(k-kk)][(i-ii+4)]*scv_19;
scv_20=scv_20+A_copy[(k-kk)][(i-ii+5)]*scv_19;
scv_23=scv_23+A_copy[(k-kk)][(i-ii+6)]*scv_19;
scv_18=scv_18+A_copy[(k-kk)][(i-ii+7)]*scv_19;
C_copy[(i-ii+7)][(j-jj)]=scv_18;
C_copy[(i-ii+5)][(j-jj)]=scv_20;
C_copy[(i-ii+3)][(j-jj)]=scv_21;
C_copy[(i-ii+4)][(j-jj)]=scv_22;
C_copy[(i-ii+6)][(j-jj)]=scv_23;
C_copy[(i-ii+1)][(j-jj)]=scv_24;
C_copy[(i-ii)][(j-jj)]=scv_25;
C_copy[(i-ii+2)][(j-jj)]=scv_26;
}
}
for (; i<=min(M-1,ii+127); i=i+1) {
register int cbv_5;
cbv_5=min(N-1,jj+63);
#pragma ivdep
#pragma vector always
for (j=jj; j<=cbv_5; j=j+1) {
double scv_27;
scv_27=C_copy[(i-ii)][(j-jj)];
scv_27=scv_27+A_copy[(k-kk)][(i-ii)]*B[k][j];
C_copy[(i-ii)][(j-jj)]=scv_27;
}
}
}
for (i=ii; i<=min(M-1,ii+127); i=i+1)
for (j=jj; j<=min(N-1,jj+63); j=j+1)
C[i][j]=C_copy[(i-ii)][(j-jj)];
}
}
}
}
}
}
}
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
return 1;
}
|
convolution_1x1_pack4_fp16s.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
float16x4_t _v2 = vld1_f16(r0 + 16);
float16x4_t _v3 = vld1_f16(r0 + 24);
float16x8_t _v01 = vcombine_f16(_v0, _v1);
float16x8_t _v23 = vcombine_f16(_v2, _v3);
vst1q_f16(outptr, _v01);
vst1q_f16(outptr + 8, _v23);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
float16x8_t _v = vcombine_f16(_v0, _v1);
vst1q_f16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
float16x4_t _v = vld1_f16(r0);
vst1_f16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
fig4.72-num-threads-clause.c
|
/*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
int TID;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
for (int n=5; n<11; n+=5)
{
#pragma omp parallel if (n > 5) num_threads(n) default(none) \
private(TID) shared(n)
{
TID = omp_get_thread_num();
#pragma omp single
{
printf("Value of n = %d\n",n);
printf("Number of threads in parallel region: %d\n",
omp_get_num_threads());
}
printf("Print statement executed by thread %d\n",TID);
} /*-- End of parallel region --*/
}
return(0);
}
|
fourier.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
columns,
rows;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images->next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
columns=MagickMin(Cr_image->columns,Ci_image->columns);
rows=MagickMin(Cr_image->rows,Ci_image->rows);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(Cr_image,complex_images,rows,1L)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->red*Br->red+
QuantumScale*Bi->red*Bi->red+snr);
Cr->red=gamma*(QuantumScale*Ar->red*Br->red+QuantumScale*Ai->red*
Bi->red);
Ci->red=gamma*(QuantumScale*Ai->red*Br->red-QuantumScale*Ar->red*
Bi->red);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->green*
Br->green+QuantumScale*Bi->green*Bi->green+snr);
Cr->green=gamma*(QuantumScale*Ar->green*Br->green+QuantumScale*
Ai->green*Bi->green);
Ci->green=gamma*(QuantumScale*Ai->green*Br->green-QuantumScale*
Ar->green*Bi->green);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->blue*
Br->blue+QuantumScale*Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(QuantumScale*Ar->blue*Br->blue+QuantumScale*
Ai->blue*Bi->blue);
Ci->blue=gamma*(QuantumScale*Ai->blue*Br->blue-QuantumScale*
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->opacity*
Br->opacity+QuantumScale*Bi->opacity*Bi->opacity+snr);
Cr->opacity=gamma*(QuantumScale*Ar->opacity*Br->opacity+
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=gamma*(QuantumScale*Ai->opacity*Br->opacity-
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(QuantumScale*Ar->red*Ar->red+QuantumScale*
Ai->red*Ai->red);
Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(QuantumScale*Ar->green*Ar->green+QuantumScale*
Ai->green*Ai->green);
Ci->green=atan2((double) Ai->green,(double) Ar->green)/
(2.0*MagickPI)+0.5;
Cr->blue=sqrt(QuantumScale*Ar->blue*Ar->blue+QuantumScale*
Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(QuantumScale*Ar->opacity*Ar->opacity+
QuantumScale*Ai->opacity*Ai->opacity);
Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/
(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=(QuantumScale*Ar->red*Br->red-(double)
Ai->red*Bi->red);
Ci->red=(QuantumScale*Ai->red*Br->red+(double)
Ar->red*Bi->red);
Cr->green=(QuantumScale*Ar->green*Br->green-(double)
Ai->green*Bi->green);
Ci->green=(QuantumScale*Ai->green*Br->green+(double)
Ar->green*Bi->green);
Cr->blue=(QuantumScale*Ar->blue*Br->blue-(double)
Ai->blue*Bi->blue);
Ci->blue=(QuantumScale*Ai->blue*Br->blue+(double)
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=(QuantumScale*Ar->opacity*Br->opacity-
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=(QuantumScale*Ai->opacity*Br->opacity+
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (Cr_image->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize forward transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
GB_unop__frexpe_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__frexpe_fp32_fp32
// op(A') function: GB_unop_tran__frexpe_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_frexpef (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpef (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_frexpef (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__frexpe_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__frexpe_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__max_int8.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int8)
// A*D function (colscale): GB (_AxD__max_int8)
// D*A function (rowscale): GB (_DxB__max_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int8)
// C=scalar+B GB (_bind1st__max_int8)
// C=scalar+B' GB (_bind1st_tran__max_int8)
// C=A+scalar GB (_bind2nd__max_int8)
// C=A'+scalar GB (_bind2nd_tran__max_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.c
|
#include <cdnn/utils.h>
float cache;
int return_cache;
int nn_threads;
/**!
* Creates a matrix filled with zeros.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer to the created matrix.
* @return A pointer to the created matrix.
*/
dARRAY * zeros(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(dims[0]*dims[1],sizeof(float));
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Creates a matrix filled with ones.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer to the created matrix.
* @return A pointer to the created matrix.
*/
dARRAY * ones(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)malloc(sizeof(float)*(dims[0]*dims[1]));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads)
for(int i=0;i<dims[0]*dims[1];i++){
matrix->matrix[i]=1;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Creates an identity matrix.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer of identity matrix.
* @return A pointer of identity matrix.
*/
dARRAY * eye(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc((dims[0]*dims[1]),sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) collapse(1)
for(int i=0;i<dims[0]; i++){
for(int j=0;j<dims[1];j++)
matrix->matrix[i*dims[1]+j] = i==j ? 1: 0;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Finds the transpose of the given matrix.
* @param Matrix The input Matrix of dARRAY Object
* @result A pointer to the result of Transpose(Matrix)
* @return A pointer to the result of Transpose(Matrix)
*/
dARRAY * transpose(dARRAY * restrict Matrix){
if(Matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call transpose() only after intializing dARRAY object.\033[0m\n");
exit(EXIT_FAILURE);
}
if(Matrix->shape[0]==1 && Matrix->shape[1]==1) return Matrix;
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(Matrix->shape[0]*Matrix->shape[1],sizeof(float));
#pragma omp task
cblas_somatcopy(CblasRowMajor,CblasTrans,Matrix->shape[0],Matrix->shape[1],1,Matrix->matrix,Matrix->shape[1],matrix->matrix,Matrix->shape[0]);
matrix->shape[0] = Matrix->shape[1];
matrix->shape[1] = Matrix->shape[0];
return matrix;
}
/**!
* Finds the transpose of the given matrix (legacy implementation leaving it here for reference (fast transpose without using CBLAS)).
* @param Matrix The input Matrix of dARRAY Object
* @result A pointer to the result of Transpose_my(Matrix)
* @return A pointer to the result of Transpose_my(Matrix)
*/
dARRAY * transpose_my(dARRAY * restrict Matrix){
if(Matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call transpose() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
if(Matrix->shape[0]==1 && Matrix->shape[1]==1) return Matrix;
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(Matrix->shape[0]*Matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(Matrix,matrix) schedule(static)
for(int i=0;i<Matrix->shape[0];i++)
for(int j=0;j<Matrix->shape[1];j++)
matrix->matrix[j*Matrix->shape[0]+i] = Matrix->matrix[i*Matrix->shape[1]+j];
matrix->shape[0] = Matrix->shape[1];
matrix->shape[1] = Matrix->shape[0];
return matrix;
}
/**!
* Finds the dot product (Matrix Multiplication) of two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of dot(MatrixA,MatrixB)
* @return A pointer to the result of dot(MatrixA,MatrixB)
*/
dARRAY * dot(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA->shape[1]!=MatrixB->shape[0]){
printf("\033[1;31mError:\033[93m Shape error while performing dot(). Matrix dimensions do not align. %d(dim1) != %d(dim0)\033[0m\n",MatrixA->shape[1],MatrixB->shape[0]);
exit(EXIT_FAILURE);
}
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call dot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call dot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
long long int m,n,k;
m = MatrixA->shape[0];
n = MatrixB->shape[1];
k = MatrixB->shape[0];
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(m*n,sizeof(float));
#pragma omp task
cblas_sgemm(CblasRowMajor,\
CblasNoTrans,\
CblasNoTrans,\
m,n,k,\
1,\
MatrixA->matrix,\
k,\
MatrixB->matrix,\
n,\
0,\
result->matrix,\
n);
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixB->shape[1];
return result;
}
/**!
* Finds the dot product (Matrix Multiplication) of two matrices (legacy implementation leaving here for reference (fast matrix multiplication without using CBLAS)).
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of dot_my(MatrixA,MatrixB)
* @return A pointer to the result of dot_my(MatrixA,MatrixB)
*/
dARRAY * dot_my(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA->shape[1]!=MatrixB->shape[0]){
printf("\033[1;31mError:\033[93m Shape error while performing dot(). Matrix dimensions do not align. %d(dim1) != %d(dim0)\033[0m\n",MatrixA->shape[1],MatrixB->shape[0]);
return NULL;
}
if(MatrixB == NULL || MatrixA == NULL){
printf("\033[1;31mError:\033[93m One of the input matrices is empty. Call dot() only after initializing dARRAY object\033[0m\n");
return NULL;
}
dARRAY * BT = NULL;
dARRAY * result = NULL;
result = (dARRAY *)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixB->shape[1],sizeof(float));
BT = transpose(MatrixB);
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) collapse(1) schedule(static)
for(int i=0;i<MatrixA->shape[0];i++){
for(int j=0;j<MatrixB->shape[1];j++){
for(int k=0;k<MatrixB->shape[0];k++){
result->matrix[i * MatrixB->shape[1]+j] += MatrixA->matrix[i*MatrixA->shape[1]+k] * BT->matrix[j*MatrixB->shape[0]+k];
}
}
}
free2d(BT);
BT = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixB->shape[1];
return result;
}
/**!
* Function performs element-wise multiplication on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of multiply(MatrixA,MatrixB)
* @return A pointer to the result of multiply(MatrixA,MatrixB)
*/
dARRAY * multiply(dARRAY * restrict MatrixA, dARRAY * restrict MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call multiply() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call multiply() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * temp = NULL;
int x = 0, y = 0;
#pragma omp sections nowait
{
#pragma omp section
x = size(MatrixA);
#pragma omp section
y = size(MatrixB);
}
int flag = 0;
if(x>y){
temp = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
temp = b_cast(MatrixB,MatrixA);
flag=1;
}
if(temp==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform multiply(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,m,n) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = matrixA[i] * matrixB[i];
}
else{
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix,*temp_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
temp_matrix = temp->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,temp_matrix,m,n,x,y) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = x>y ? matrixA[i] * temp_matrix[i] : temp_matrix[i] * matrixB[i];
}
if(temp!=NULL)
free2d(temp);
temp = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise divison on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of divison(MatrixA,MatrixB)
* @return A pointer to the result of divison(MatrixA,MatrixB)
*/
dARRAY * divison(dARRAY * restrict MatrixA, dARRAY * restrict MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call divison() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call divison() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * temp = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
temp = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
temp = b_cast(MatrixB,MatrixA);
flag=1;
}
if(temp==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform divison(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,m,n) private(i) schedule(static)
for(i=0;i<m*n;i++){
res_matrix[i] = matrixA[i] / matrixB[i];
}
}
else{
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix,*temp_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
temp_matrix = temp->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,temp_matrix,m,n,x,y) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = x>y ? matrixA[i] / temp_matrix[i] : temp_matrix[i] / matrixB[i];
}
if(temp!=NULL)
free2d(temp);
temp = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise addition on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of add(MatrixA,MatrixB)
* @return A pointer to the result of add(MatrixA,MatrixB)
*/
dARRAY * add(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call add() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call add() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * bcast_arr = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
bcast_arr = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
bcast_arr = b_cast(MatrixB,MatrixA);
flag=1;
}
if(bcast_arr==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform add(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
cblas_scopy(MatrixB->shape[0]*MatrixB->shape[1],MatrixB->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,MatrixA->matrix,1,result->matrix,1);
}
else{
if(x>y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],bcast_arr->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,MatrixA->matrix,1,result->matrix,1);
}
else{
cblas_scopy(MatrixB->shape[0]*MatrixB->shape[1],MatrixB->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,bcast_arr->matrix,1,result->matrix,1);
}
}
if(bcast_arr!=NULL)
free2d(bcast_arr);
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise subtraction on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of subtract(MatrixA,MatrixB)
* @return A pointer to the result of subtract(MatrixA,MatrixB)
*/
dARRAY * subtract(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call subtract() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call subtract() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * bcast_arr = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
bcast_arr = b_cast(MatrixA,MatrixB);
}
else if(x<y){
bcast_arr = b_cast(MatrixB,MatrixA);
flag=1;
}
if(bcast_arr==NULL && flag==1){
printf("\033[1;31mError:\033[93m Could not perform subtract(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],MatrixA->matrix,1,result->matrix,1);
cblas_saxpy(MatrixB->shape[0]*MatrixB->shape[1],-1,MatrixB->matrix,1,result->matrix,1);
}
else{
if(x>y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],MatrixA->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],-1,bcast_arr->matrix,1,result->matrix,1);
}
else{
cblas_scopy(bcast_arr->shape[0]*bcast_arr->shape[1],bcast_arr->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],-1,MatrixB->matrix,1,result->matrix,1);
}
}
if(bcast_arr!=NULL)
free2d(bcast_arr);
bcast_arr = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function Adds a scalar value to each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be added to each element of matrix.
* @result A pointer to the result of addScalar(matrix,scalar)
* @return A pointer to the result of addScalar(matrix,scalar)
*/
dARRAY * addScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call addScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] + scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function subtracts a scalar value from each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be subtracted from each element of matrix.
* @result A pointer to the result of subScalar(matrix,scalar)
* @return A pointer to the result of subScalar(matrix,scalar)
*/
dARRAY * subScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call subScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] - scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function multiplies a scalar value with each element of a matrix.
* @param matrix A matrix of dARRAY Object
* @param scalar A scalar value that needs to be multiplied with each element of matrix.
* @result A pointer to the result of mulScalar(matrix,scalar)
* @return A pointer to the result of mulScalar(matrix,scalar)
*/
dARRAY * mulScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call mulScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] * scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
dARRAY * mulScalarm(dARRAY * matrix, float scalar){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call divScalar() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
float * div_mat = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
#pragma omp task
cblas_sscal(matrix->shape[0]*matrix->shape[1],scalar,matrix->matrix,1);
#pragma omp task
cblas_scopy(matrix->shape[0]*matrix->shape[1],matrix->matrix,1,div_mat,1);
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = div_mat;
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function divides a scalar value with each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be divided with each element of matrix.
* @result A pointer to the result of divScalar(matrix,scalar)
* @return A pointer to the result of divScalar(matrix,scalar)
*/
dARRAY * divScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call divScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] / scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
dARRAY * divScalarm(dARRAY * matrix, float scalar){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call divScalar() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
float * div_mat = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp task
cblas_sscal(matrix->shape[0]*matrix->shape[1],(1/scalar),matrix->matrix,1);
#pragma omp task
cblas_scopy(matrix->shape[0]*matrix->shape[1],matrix->matrix,1,div_mat,1);
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = div_mat;
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function raises the elements of a matrix to the specified power.
* @param matrix A matrix of dARRAY Object
* @param power A value to which each element in matrix must be raised.
* @result A pointer to the result of power(matrix,power)
* @return A pointer to the result of power(matrix,power)
*/
dARRAY * power(dARRAY * matrix, float power){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call power() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,power) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = (float)pow(matrix->matrix[i],power);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function finds the sqrt() of the elements of a matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of squareroot(matrix)
* @return A pointer to the result of squareroot(matrix)
*/
dARRAY * squareroot(dARRAY * matrix){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call squareroot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = (float)sqrt(matrix->matrix[i]);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function finds the exp() of the elements of a matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of exponential(matrix)
* @return A pointer to the result of exponential(matrix)
*/
dARRAY * exponentional(dARRAY * matrix){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call exponential() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = exp(matrix->matrix[i]);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function performs broadcasting of matrices
* Refer to www.numpy.org for detailed explanation of broadcasting.
* The implementation used here is similar to the one in www.numpy.org.
* @param MatrixA Matrix of dARRAY Object
* @param MatrixB Matrix of dARRAY Object
* @result A pointer to the broadcasted matrix
* @return A pointer to the broadcasted matrix
*/
dARRAY * b_cast(dARRAY * MatrixA, dARRAY * MatrixB){
dARRAY * b_castArr = NULL;
if(MatrixA->shape[1]==MatrixB->shape[1] && MatrixB->shape[0]==1 && MatrixA->shape[0]>MatrixB->shape[0]){
//B matrix has the shape of (1,n)
//we need to copy B m times
//M(5,4) B(1,4) repeat 5 * 4 = 20 times
b_castArr = (dARRAY*)malloc(sizeof(dARRAY));
b_castArr->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
float * bcast_matrix, *matrixB;
bcast_matrix = b_castArr->matrix;
matrixB = MatrixB->matrix;
int m = MatrixA->shape[0];
int n = MatrixB->shape[1];
int i = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrixB,bcast_matrix,m,n) private(i) schedule(static,8)
for(i=0;i<m*n;i++){
bcast_matrix[i] = matrixB[(i%n)];
}
b_castArr->shape[0] = MatrixA->shape[0];
b_castArr->shape[1] = MatrixB->shape[1];
}
else if(MatrixA->shape[0]==MatrixB->shape[0] && MatrixB->shape[1]==1 && MatrixA->shape[1]>MatrixB->shape[1]){
//B is of the form (m,1)
//A is of (m,n)
//copy column wise.
b_castArr = (dARRAY*)malloc(sizeof(dARRAY));
b_castArr->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
int k=0;
float * bcast_matrix, *matrixB;
bcast_matrix = b_castArr->matrix;
matrixB = MatrixB->matrix;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
int i = 0;
int j = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrixB,bcast_matrix,m,n,k) private(i,j) schedule(static,8)
for(i=0;i<m;i++){
//copy b n times
for(j=0;j<n;j++){
bcast_matrix[k] = matrixB[i];
k++;
}
}
b_castArr->shape[0] = MatrixA->shape[0];
b_castArr->shape[1] = MatrixA->shape[1];
}
return b_castArr;
}
/**!
* Function finds the sum of elements of matrix.
* @param matrix A matrix of dARRAY Object
* @param axis If axis == 1, then sums all elements in a row. If axis == 0, then sums all the elements in a column.
* @result A pointer to the result of sum(matrix,axis)
* @return A pointer to the result of sum(matrix,axis)
*/
dARRAY * sum(dARRAY * matrix, int axis){
if(axis!=0 && axis!=1){
printf("\033[1;31mError:\033[93m axis=%d not supported. Instead use axis=0 or axis=1\033[0m\n",axis);
return NULL;
}
// if(matrix->shape[0]==1 || matrix->shape[1]==1) return matrix;
dARRAY * new = (dARRAY*)malloc(sizeof(dARRAY));
new->matrix = NULL;
if(axis==0){
new->matrix = (float*)calloc(matrix->shape[1],sizeof(float));
dARRAY * temp = transpose(matrix);
float sum_ = 0.0f;
int i = 0;
int j = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(temp,new) private(i,j) reduction(+:sum_)
for(i=0;i<temp->shape[0];i++){
sum_=0.0;
for(j=0;j<temp->shape[1];j++){
sum_+= temp->matrix[i*temp->shape[1]+j];
}
new->matrix[i] = sum_;
}
new->shape[0] = 1;
new->shape[1] = matrix->shape[1];
free2d(temp);
temp=NULL;
}
else if(axis==1){
new->matrix = (float*)calloc(matrix->shape[0],sizeof(float));
omp_set_num_threads(nn_threads);
int j = 0, i = 0;
float temp = 0.0f;
#pragma omp parallel for num_threads(nn_threads) shared(matrix,new) private(i,j) reduction(+:temp)
for(i=0;i<matrix->shape[0];i++){
temp = 0.0;
for(j=0;j<matrix->shape[1];j++){
temp += matrix->matrix[i*matrix->shape[1]+j];
}
new->matrix[i] = temp;
}
new->shape[0] = matrix->shape[0];
new->shape[1] = 1;
}
return new;
}
/**!
* Function finds the frobenius_norm of matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of frobenius_norm(matrix)
* @return A pointer to the result of frobenius_norm(matrix)
*/
float frobenius_norm(dARRAY * matrix){
float frobenius_norm = 0.0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix) reduction(+:frobenius_norm) schedule(static)
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
frobenius_norm += pow(matrix->matrix[i],2);
}
return frobenius_norm;
}
/**!
* Function finds the Manhattan_distance of matrix.
* @param matrix A matrix of dARRAY Object
* @result Result of Manhattan_distance(matrix)
* @return Result of Manhattan_distance(matrix)
*/
float Manhattan_distance(dARRAY * matrix){
float dist = 0.0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix) reduction(+:dist) schedule(static)
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
dist += abs(matrix->matrix[i]);
}
return dist;
}
/**!
* Function generates a matrix of specified dimensions filled with random variables
* from normal distribution with mean 0 and unit standard deviation.
* @param dims An array of matrix dimensions [rows,columns]
* @result A pointer to the generated matrix.
* @return A pointer to the generated matrix.
*/
dARRAY * randn(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)malloc(sizeof(float)*dims[0]*dims[1]);
omp_set_num_threads(nn_threads);
#pragma omp parallel for collapse(1) shared(matrix)
for(int i=0;i<dims[0];i++){
for(int j=0;j<dims[1];j++){
matrix->matrix[i*dims[1]+j] = rand_norm(0.0,1.0);
}
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Function creates an array that contains shuffled indices
* @param length Number of elements in the array to be shuffled
* @result Pointer to the array containing shuffled indices
* @return Pointer to the array containing shuffled indices
*/
int * permutation(int length){
int * permute_arr = (int*)malloc(sizeof(int)*length);
#pragma omp parallel for num_threads(nn_threads) shared(permute_arr)
for(int i=0;i<length;i++){
permute_arr[i] = i;
}
srand(time(NULL));
#pragma omp parallel for
for(int i = length-1;i>0;i--){
int j = rand()%(i+1);
int temp = permute_arr[i];
permute_arr[i] = permute_arr[j];
permute_arr[j] = temp;
}
return permute_arr;
}
/**!
* Function reshapes a given matrix to specified dimensions
* @param matrix Matrix to be reshaped
* @param dims An array of matrix dimension [rows,columns]
* @result Pointer to the reshaped matrix
* @return Pointer to the reshaped matrix
*/
dARRAY * reshape(dARRAY * matrix, int * dims){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call reshape() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
if(size(matrix)!=(dims[0]*dims[1])){
printf("\033[1;31mError:\033[93m Shape Error. Matrix could not be reshaped to the specified dims.\033[0m\n");
return matrix;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Function mean of a matrix.
* @param matrix A matrix of dARRAY Object
* @result Mean of a matrix
* @return Mean of a matrix
*/
float mean(dARRAY * matrix){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Cannot find mean of empty matrix. Call mean() only after intializing dARRAY object.\033[0m\n");
return (float)0;
}
float sum = 0;
for(int i=0; i<matrix->shape[0]*matrix->shape[1];i++)
sum += matrix->matrix[i];
return sum/(matrix->shape[0]*matrix->shape[1]);
}
/**!
* Function finds the variance of a matrix.
* @param matrix A matrix of dARRAY Object
* @param type if type=='sample' then function finds the sample variance else it finds the population variance.
* @result Variance of the matrix
* @return Variance of the matrix
*/
float var(dARRAY * matrix, char * type){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Cannot find variance of empty matrix. Call var() only after intializing dARRAY object.\033[0m\n");
return (float)0;
}
float errorSum = 0;
float xbar = mean(matrix);
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
errorSum += pow((matrix->matrix[i]-xbar),2);
}
if(!strcmp(type,(const char *)"sample"))
return errorSum/(matrix->shape[0]*matrix->shape[1]-1);
else if(!strcmp(type,(const char *)"population"))
return errorSum/(matrix->shape[0]*matrix->shape[1]);
else{
printf("\033[1;31mError:\033[93m \"type\" parameter can only take values \"sample\" or \"population\".\033[0m\n");
return (float)0;
}
}
/**!
* Function finds the standard deviation of matrix.
* @param matrix A matrix of dARRAY Object
* @param type if type=='sample' then function finds the sample std else it finds the population std.
* @result Standard deviation of matrix
* @return Standard deviation of matrix
*/
float std(dARRAY * matrix, char * type){
return pow(var(matrix,type), 0.5);
}
/**!
* Helper function of gaussRandom()
* Function generates a random variable with normal distribution.
* @param cache A pointer to the cache value
* @param return_cache A pointer to check if cache has a value.
* @result A random variable of normal distribution.
* @return A random variable of normal distribution.
*/
float gaussGenerator(float * cache, int * return_cache){
if(*return_cache){
*return_cache = 0;
return *cache;
}
//use drand48 to generate random values from uniform distribution
float u = 2.0 * drand48() - 1.0;
float v = 2.0 * drand48() - 1.0;
float r = u*u + v*v;
if(r==0.0 || r>1) return gaussGenerator(cache,return_cache);
float c = sqrt(-2*log(r)/r);
*cache = c*v; //store this in cache
*return_cache = 1;
return u*c;
}
/**!
* Function generates a random variable with normal distribution.
* @result A random variable of normal distribution.
* @return A random variable of normal distribution.
*/
float gaussRandom(){
cache=0.0;
return_cache = 0;
return gaussGenerator(&cache,&return_cache);
}
/**!
* Function generates a random variable with normal distribution with specified mean and standard deviation.
* @param mu Mean
* @param std Standard Deviation
* @result A random variable of normal distribution [X ~ N(mu,std*std)].
* @return A random variable of normal distribution [X ~ N(mu,std*std)].
*/
float rand_norm(float mu, float std){
return mu+gaussRandom()*std;
}
/**!
* Function deallocates a 2D Matrix.
* @param matrix Matrix that needs to be freed.
* @result void
* @return void
*/
void free2d(dARRAY * matrix){
if(matrix==NULL) {
printf("\033[1;93mWarning:\033[93m Matrix is Empty. No need for deallocation.\033[0m\n");
return;
}
free(matrix->matrix);
free(matrix);
// matrix = NULL;
return;
}
/**!
* Function returns the size of the matrix
* @param A Matrix of type dARRAY Object
* @result Total size of the matrix
* @return Total size of the matrix
*/
int size(dARRAY * A){
if(A==NULL){
printf("\033[1;31mError:\033[93m Matrix is Empty. Call size() only after intializing dARRAY object.\033[0m\n");
return 0;
}
return A->shape[0]*A->shape[1];
}
/**!
* Function displays the shape of the matrix
* @param A Matrix of type dARRAY Object
* @result Prints the shape of input matrix
* @return void
*/
void shape(dARRAY * A){
if(A==NULL){
printf("\033[1;31mError:\033[93m Matrix is Empty. Call shape() only after intializing dARRAY object.\033[0m\n");
return;
}
//printf("first element of matrix is : %f\n",A->matrix[0]);
printf("(%d,%d)\n",A->shape[0],A->shape[1]);
}
//Function to create a time delay. Mimicks thread.sleep() of Java
void sleep_my(int milliseconds) {
unsigned int duration = time(0) + (milliseconds/1000);
while(time(0)<duration);
}
//This function is used instead of fflush(stdin) as it is a bad practice to use it
//due to undefined behaviour.
void cleanSTDIN() {
int ch;
while ((ch = getchar()) != '\n' && ch != EOF){}
}
/**!
* Function calculates the safe numbe rof threads to use.
* @return void
*/
void get_safe_nn_threads(){
int num_cpu_cores = sysconf(_SC_NPROCESSORS_CONF);
if(num_cpu_cores<=4){
nn_threads = num_cpu_cores*2;
}
else if(num_cpu_cores>=8){
nn_threads = num_cpu_cores/2;
}
else nn_threads = num_cpu_cores;
}
|
MLFDeserializer.h
|
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <boost/noncopyable.hpp>
#include "HTKDeserializer.h"
#include "CorpusDescriptor.h"
#include "MLFUtils.h"
#include "FileWrapper.h"
#include "Index.h"
namespace CNTK
{
static float s_oneFloat = 1.0;
static double s_oneDouble = 1.0;
// A constant used in 1-hot vectors to identify the first frame of a phone.
// Used only in CTC-type training.
static float s_phoneBoundary = 2.0f;
// Sparse labels for an utterance.
template <class ElemType>
struct MLFSequenceData : SparseSequenceData
{
vector<ElemType> m_values;
vector<IndexType> m_indexBuffer;
const NDShape& m_frameShape;
MLFSequenceData(size_t numberOfSamples, const NDShape& frameShape)
: m_values(numberOfSamples, 1), m_frameShape(frameShape)
{
if (numberOfSamples > numeric_limits<IndexType>::max())
{
RuntimeError("Number of samples in an MLFSequenceData (%zu) "
"exceeds the maximum allowed value (%zu)\n",
numberOfSamples, (size_t) numeric_limits<IndexType>::max());
}
m_indexBuffer.resize(numberOfSamples);
m_nnzCounts.resize(numberOfSamples, static_cast<IndexType>(1));
m_numberOfSamples = (uint32_t) numberOfSamples;
m_totalNnzCount = static_cast<IndexType>(numberOfSamples);
m_indices = &m_indexBuffer[0];
}
MLFSequenceData(size_t numberOfSamples, const vector<size_t>& phoneBoundaries, const NDShape& frameShape)
: MLFSequenceData(numberOfSamples, frameShape)
{
for (auto boundary : phoneBoundaries)
m_values[boundary] = s_phoneBoundary;
}
const void* GetDataBuffer() override
{
return m_values.data();
}
const NDShape& GetSampleShape() override
{
return m_frameShape;
}
};
// Class represents an MLF deserializer.
// Provides a set of chunks/sequences to the upper layers.
class MLFDeserializer : public DataDeserializerBase, boost::noncopyable
{
public:
// Expects new configuration.
MLFDeserializer(CorpusDescriptorPtr corpus, const ConfigParameters& config, bool primary);
// TODO: Should be removed, when all readers go away, expects configuration in a legacy mode.
MLFDeserializer(CorpusDescriptorPtr corpus, const ConfigParameters& config, const std::wstring& streamName);
MLFDeserializer(CorpusDescriptorPtr corpus, bool primary);
// Retrieves sequence description by its key. Used for deserializers that are not in "primary"/"driving" mode.
bool GetSequenceInfoByKey(const SequenceKey& key, SequenceInfo& s) override;
// Gets description of all chunks.
virtual std::vector<ChunkInfo> ChunkInfos() override;
// Get sequence descriptions of a particular chunk.
virtual void SequenceInfosForChunk(ChunkIdType chunkId, std::vector<SequenceInfo>& s) override;
// Retrieves a chunk with data.
virtual ChunkPtr GetChunk(ChunkIdType) override;
static inline bool LessByFirstItem(const std::tuple<size_t, size_t, size_t>& a, const std::tuple<size_t, size_t, size_t>& b)
{
return std::get<0>(a) < std::get<0>(b);
}
// Base class for chunks in frame and sequence mode.
// The lifetime is always less than the lifetime of the parent deserializer.
class ChunkBase : public Chunk
{
public:
vector<vector<MLFFrameRange>> m_sequences; // Each sequence is a vector of sequential frame ranges.
ChunkBase(const MLFDeserializer& deserializer, const ChunkDescriptor& descriptor, const wstring& fileName, const StateTablePtr& states)
: m_parser(states),
m_descriptor(descriptor),
m_deserializer(deserializer)
{
if (descriptor.NumberOfSequences() == 0 || descriptor.SizeInBytes() == 0)
LogicError("Empty chunks are not supported.");
auto f = FileWrapper::OpenOrDie(fileName, L"rbS");
size_t sizeInBytes = descriptor.SizeInBytes();
// Make sure we always have 0 at the end for buffer overrun.
m_buffer.resize(sizeInBytes + 1);
m_buffer[sizeInBytes] = 0;
// Seek and read chunk into memory.
f.SeekOrDie(descriptor.StartOffset(), SEEK_SET);
f.ReadOrDie(m_buffer.data(), sizeInBytes, 1);
// all sequences are valid by default.
m_valid.resize(m_descriptor.NumberOfSequences(), true);
}
string KeyOf(const SequenceDescriptor& s)
{
return m_deserializer.m_corpus->IdToKey(s.m_key);
}
void CleanBuffer()
{
// Make sure we do not keep unnecessary memory after sequences have been parsed.
vector<char> tmp;
m_buffer.swap(tmp);
}
void GetSequence(size_t sequenceIndex, vector<SequenceDataPtr>& result) override
{
if (m_deserializer.m_elementType == DataType::Float)
return GetSequence<float>(sequenceIndex, result);
else
{
assert(m_deserializer.m_elementType == DataType::Double);
return GetSequence<double>(sequenceIndex, result);
}
}
template <class ElementType>
void GetSequence(size_t sequenceIndex, vector<SequenceDataPtr>& result)
{
if (!m_valid[sequenceIndex])
{
SparseSequenceDataPtr s = make_shared<MLFSequenceData<ElementType>>(0, m_deserializer.m_streams.front().m_sampleLayout);
s->m_isValid = false;
result.push_back(s);
return;
}
const auto& utterance = m_sequences[sequenceIndex];
const auto& sequence = m_descriptor.Sequences()[sequenceIndex];
// Packing labels for the utterance into sparse sequence.
vector<size_t> sequencePhoneBoundaries(m_deserializer.m_withPhoneBoundaries ? utterance.size() : 0);
if (m_deserializer.m_withPhoneBoundaries)
{
for (size_t i = 0; i < utterance.size(); ++i)
sequencePhoneBoundaries[i] = utterance[i].FirstFrame();
}
auto s = make_shared<MLFSequenceData<ElementType>>(sequence.m_numberOfSamples, sequencePhoneBoundaries, m_deserializer.m_streams.front().m_sampleLayout);
auto* startRange = s->m_indices;
for (const auto& range : utterance)
{
if (range.ClassId() >= m_deserializer.m_dimension)
// TODO: Possibly set m_valid to false, but currently preserving the old behavior.
RuntimeError("Class id '%ud' exceeds the model output dimension '%d'.", range.ClassId(), (int) m_deserializer.m_dimension);
// Filling all range of frames with the corresponding class id.
fill(startRange, startRange + range.NumFrames(), static_cast<IndexType>(range.ClassId()));
startRange += range.NumFrames();
}
result.push_back(s);
}
vector<char> m_buffer; // Buffer for the whole chunk
vector<bool> m_valid; // Bit mask whether the parsed sequence is valid.
MLFUtteranceParser m_parser;
const MLFDeserializer& m_deserializer;
const ChunkDescriptor& m_descriptor; // Current chunk descriptor.
};
// MLF chunk when operating in sequence mode.
class SequenceChunk : public ChunkBase
{
public:
SequenceChunk(const MLFDeserializer& parent, const ChunkDescriptor& descriptor, const wstring& fileName, StateTablePtr states)
: ChunkBase(parent, descriptor, fileName, states)
{
this->m_sequences.resize(m_descriptor.Sequences().size());
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < descriptor.Sequences().size(); ++i)
CacheSequence(descriptor.Sequences()[i], i);
CleanBuffer();
}
void CacheSequence(const SequenceDescriptor& sequence, size_t index)
{
auto start = m_buffer.data() + sequence.OffsetInChunk();
auto end = start + sequence.SizeInBytes();
vector<MLFFrameRange> utterance;
auto absoluteOffset = m_descriptor.StartOffset() + sequence.OffsetInChunk();
bool parsed = m_parser.Parse(boost::make_iterator_range(start, end), utterance, absoluteOffset);
if (!parsed) // cannot parse
{
fprintf(stderr, "WARNING: Cannot parse the utterance '%s'\n", KeyOf(sequence).c_str());
m_valid[index] = false;
return;
}
m_sequences[index] = move(utterance);
}
};
// MLF chunk when operating in frame mode.
// Implementation is different because frames of the same sequence can be accessed
// in parallel by the randomizer, so all parsing/preprocessing should be done during
// sequence caching, so that GetSequence only works with read only data structures.
class FrameChunk : public ChunkBase
{
// Actual values of frames.
vector<ClassIdType> m_classIds;
//For each sequence this vector contains the sequence offset in samples from the beginning of the chunk.
std::vector<uint32_t> m_sequenceOffsetInChunkInSamples;
public:
FrameChunk(const MLFDeserializer& parent, const ChunkDescriptor& descriptor, const wstring& fileName, StateTablePtr states)
: ChunkBase(parent, descriptor, fileName, states)
{
uint32_t numSamples = static_cast<uint32_t>(m_descriptor.NumberOfSamples());
// The current assumption is that the number of samples in a chunk fits in uint32,
// therefore we can save 4 bytes per sequence, storing offsets in samples as uint32.
if (numSamples != m_descriptor.NumberOfSamples())
RuntimeError("Exceeded maximum number of samples in a chunk");
// Preallocate a big array for filling in class ids for the whole chunk.
m_classIds.resize(numSamples);
m_sequenceOffsetInChunkInSamples.resize(m_descriptor.NumberOfSequences());
uint32_t offset = 0;
for (auto i = 0; i < m_descriptor.NumberOfSequences(); ++i)
{
m_sequenceOffsetInChunkInSamples[i] = offset;
offset += descriptor[i].m_numberOfSamples;
}
if (numSamples != offset)
RuntimeError("Unexpected number of samples in a FrameChunk.");
// Parse the data on different threads to avoid locking during GetSequence calls.
#pragma omp parallel for schedule(dynamic)
for (auto i = 0; i < m_descriptor.NumberOfSequences(); ++i)
CacheSequence(descriptor[i], i);
CleanBuffer();
}
// Get utterance by the absolute frame index in chunk.
// Uses the upper bound to do the binary search among sequences of the chunk.
size_t GetUtteranceForChunkFrameIndex(size_t frameIndex) const
{
auto result = upper_bound(
m_sequenceOffsetInChunkInSamples.begin(),
m_sequenceOffsetInChunkInSamples.end(),
frameIndex,
[](size_t fi, const size_t& a) { return fi < a; });
return result - 1 - m_sequenceOffsetInChunkInSamples.begin();
}
void GetSequence(size_t sequenceIndex, vector<SequenceDataPtr>& result) override
{
size_t utteranceId = GetUtteranceForChunkFrameIndex(sequenceIndex);
if (!m_valid[utteranceId])
{
SparseSequenceDataPtr s = make_shared<MLFSequenceData<float>>(0, m_deserializer.m_streams.front().m_sampleLayout);
s->m_isValid = false;
result.push_back(s);
return;
}
size_t label = m_classIds[sequenceIndex];
assert(label < m_deserializer.m_categories.size());
result.push_back(m_deserializer.m_categories[label]);
}
// Parses and caches sequence in the buffer for GetSequence fast retrieval.
void CacheSequence(const SequenceDescriptor& sequence, size_t index)
{
auto start = m_buffer.data() + sequence.OffsetInChunk();
auto end = start + sequence.SizeInBytes();
vector<MLFFrameRange> utterance;
auto absoluteOffset = m_descriptor.StartOffset() + sequence.OffsetInChunk();
bool parsed = m_parser.Parse(boost::make_iterator_range(start, end), utterance, absoluteOffset);
if (!parsed)
{
m_valid[index] = false;
fprintf(stderr, "WARNING: Cannot parse the utterance %s\n", KeyOf(sequence).c_str());
return;
}
auto startRange = m_classIds.begin() + m_sequenceOffsetInChunkInSamples[index];
for (size_t i = 0; i < utterance.size(); ++i)
{
const auto& range = utterance[i];
if (range.ClassId() >= m_deserializer.m_dimension)
// TODO: Possibly set m_valid to false, but currently preserving the old behavior.
RuntimeError("Class id '%ud' exceeds the model output dimension '%d'.", range.ClassId(), (int) m_deserializer.m_dimension);
fill(startRange, startRange + range.NumFrames(), range.ClassId());
startRange += range.NumFrames();
}
}
};
// Initializes reader params.
std::wstring InitializeReaderParams(const ConfigParameters& cfg, bool primary);
// Initializes chunk descriptions.
void InitializeChunkInfos(CorpusDescriptorPtr corpus, const ConfigHelper& config, const wstring& stateListPath);
// Initializes a single stream this deserializer exposes.
void InitializeStream(const std::wstring& name);
// In frame mode initializes data for all categories/labels in order to
// avoid memory copy.
void InitializeReadOnlyArrayOfLabels();
// Sorted vector that maps SequenceKey.m_sequence into an utterance ID (or type max() if the key is not assigned).
std::vector<std::tuple<size_t, ChunkIdType, uint32_t>> m_keyToChunkLocation;
// Type of the data this serializer provides.
DataType m_elementType;
// Array of available categories.
// We do no allocate data for all input sequences, only returning a pointer to existing category.
std::vector<SparseSequenceDataPtr> m_categories;
// A list of category indices
// (a list of numbers from 0 to N, where N = (number of categories -1))
std::vector<IndexType> m_categoryIndices;
// Flag that indicates whether a single speech frames should be exposed as a sequence.
bool m_frameMode;
CorpusDescriptorPtr m_corpus;
std::vector<const ChunkDescriptor*> m_chunks;
std::map<const ChunkDescriptor*, size_t> m_chunkToFileIndex;
size_t m_dimension;
size_t m_chunkSizeBytes;
// Track phone boundaries
bool m_withPhoneBoundaries;
StateTablePtr m_stateTable;
std::vector<std::shared_ptr<Index>> m_indices;
std::vector<std::wstring> m_mlfFiles;
bool m_textReader;
};
}
|
simd_utils.h
|
/*
* Project : SIMD_Utils
* Version : 0.2.2
* Author : JishinMaster
* Licence : BSD-2
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#define MAJOR_VERSION 0
#define MINOR_VERSION 2
#define SUB_VERSION 2
#ifdef OMP
#include <omp.h>
#endif
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include "simd_utils_constants.h"
#include "mysincosf.h"
/* if the user insures that all of their pointers are aligned,
* they can use ALWAYS_ALIGNED to hope for some minor speedup on small vectors
*/
static inline int isAligned(uintptr_t ptr, size_t alignment)
{
#ifndef ALWAYS_ALIGNED
#ifndef ARM // ARM manages disalignment in hardware
if (((uintptr_t) (ptr) % alignment) == 0)
return 1;
return 0;
#else
return 1;
#endif
#else
return 1;
#endif
}
static inline int areAligned2(uintptr_t ptr1, uintptr_t ptr2, size_t alignment)
{
#ifndef ALWAYS_ALIGNED
#ifndef ARM // ARM manages disalignment in hardware
if (((uintptr_t) (ptr1) % alignment) == 0)
if (((uintptr_t) (ptr2) % alignment) == 0)
return 1;
return 0;
#else
return 1;
#endif
#else
return 1;
#endif
}
static inline int areAligned3(uintptr_t ptr1, uintptr_t ptr2, uintptr_t ptr3, size_t alignment)
{
#ifndef ALWAYS_ALIGNED
#ifndef ARM // ARM manages disalignment in hardware
if (((uintptr_t) (ptr1) % alignment) == 0)
if (((uintptr_t) (ptr2) % alignment) == 0)
if (((uintptr_t) (ptr3) % alignment) == 0)
return 1;
return 0;
#else
return 1;
#endif
#else
return 1;
#endif
}
static inline void simd_utils_get_version(void)
{
printf("Simd Utils Version : %d.%d.%d\n", MAJOR_VERSION, MINOR_VERSION, SUB_VERSION);
}
#ifdef SSE
#ifdef NO_SSE3
static inline __m128 _mm_movehdup_ps(__m128 __X)
{
return _mm_shuffle_ps(__X, __X, 0xF5);
}
static inline __m128 _mm_moveldup_ps(__m128 __X)
{
return _mm_shuffle_ps(__X, __X, 0xA0);
}
#endif
#ifdef NO_SSE4
static inline __m128i _mm_cmpeq_epi64(__m128i __X, __m128i __Y)
{
int64_t *ptr_x = (int64_t *) &__X;
int64_t *ptr_y = (int64_t *) &__Y;
__m128i ret;
int64_t *ptr_ret = (int64_t *) &ret;
ptr_ret[0] = (ptr_x[0] == ptr_y[0]) ? 0xFFFFFFFFFFFFFFFF : 0;
ptr_ret[1] = (ptr_x[1] == ptr_y[1]) ? 0xFFFFFFFFFFFFFFFF : 0;
return ret;
}
static inline __m128d _mm_blendv_pd(__m128d __X, __m128d __Y, __m128d __M)
{
__m128d b_tmp = _mm_and_pd(__Y, __M);
__m128d a_tmp = _mm_and_pd(__X, _mm_cmpeq_pd(__M, *(__m128d *) _pd_zero));
return _mm_or_pd(a_tmp, b_tmp);
}
static inline __m128 _mm_blendv_ps(__m128 __X, __m128 __Y, __m128 __M)
{
__m128 b_tmp = _mm_and_ps(__Y, __M);
__m128 a_tmp = _mm_and_ps(__X, _mm_cmpeq_ps(__M, *(__m128 *) _ps_zero));
return _mm_or_ps(a_tmp, b_tmp);
}
static inline __m128i _mm_stream_load_si128(__m128i *__X)
{
return _mm_load_si128(__X);
}
static inline __m128 _mm_round_ps(__m128 X, int mode)
{
__m128 ret;
__m128i reti;
unsigned int old_mode = _MM_GET_ROUNDING_MODE();
switch (mode) {
case _MM_FROUND_TRUNC:
case _MM_ROUND_TOWARD_ZERO:
case ROUNDTOZERO:
_MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
break;
case ROUNDTOCEIL:
case _MM_ROUND_UP:
_MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
break;
case ROUNDTOFLOOR:
case _MM_ROUND_DOWN:
_MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
break;
default:
//_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
break;
}
reti = _mm_cvtps_epi32(X);
ret = _mm_cvtepi32_ps(reti);
_MM_SET_ROUNDING_MODE(old_mode);
return ret;
}
/* not accurate but might do the trick for most cases
where the full range is not needed */
static inline __m128i _mm_packus_epi32(__m128i a, __m128i b)
{
return _mm_packs_epi32(a, b);
}
#endif
#ifndef ARM
#include "sse_mathfun.h"
#else /* ARM */
#include "neon_mathfun.h"
#endif /* ARM */
static inline v4sfx2 _mm_load2_ps(float const *mem_addr)
{
#ifdef ARM
return vld2q_f32(mem_addr);
#else
v4sf tmp1 = _mm_load_ps(mem_addr);
v4sf tmp2 = _mm_load_ps(mem_addr + SSE_LEN_FLOAT);
v4sfx2 ret;
ret.val[0] = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
ret.val[1] = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
return ret;
#endif
}
static inline v4sfx2 _mm_load2u_ps(float const *mem_addr)
{
#ifdef ARM
return vld2q_f32(mem_addr);
#else
v4sf tmp1 = _mm_loadu_ps(mem_addr);
v4sf tmp2 = _mm_loadu_ps(mem_addr + SSE_LEN_FLOAT);
v4sfx2 ret;
ret.val[0] = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
ret.val[1] = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
return ret;
#endif
}
static inline void _mm_store2_ps(float *mem_addr, v4sfx2 a)
{
#ifdef ARM
vst2q_f32(mem_addr, a);
#else
v4sf tmp1 = _mm_unpacklo_ps(a.val[0], a.val[1]);
v4sf tmp2 = _mm_unpackhi_ps(a.val[0], a.val[1]);
_mm_store_ps(mem_addr, tmp1);
_mm_store_ps(mem_addr + SSE_LEN_FLOAT, tmp2);
#endif
}
static inline void _mm_store2u_ps(float *mem_addr, v4sfx2 a)
{
#ifdef ARM
vst2q_f32(mem_addr, a);
#else
v4sf tmp1 = _mm_unpacklo_ps(a.val[0], a.val[1]);
v4sf tmp2 = _mm_unpackhi_ps(a.val[0], a.val[1]);
_mm_storeu_ps(mem_addr, tmp1);
_mm_storeu_ps(mem_addr + SSE_LEN_FLOAT, tmp2);
#endif
}
static inline __m128 _mm_fmadd_ps_custom(__m128 a, __m128 b, __m128 c)
{
// Haswell comes with avx2 and fma
// ARM has vmla instead of fma in 32bits
#if defined(ARM) || defined(FMA)
return _mm_fmadd_ps(a, b, c);
#else
return _mm_add_ps(_mm_mul_ps(a, b), c);
#endif
}
static inline __m128 _mm_fmaddsub_ps_custom(__m128 a, __m128 b, __m128 c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm_addsub_ps(_mm_mul_ps(a, b), c);
#else /* FMA */
return _mm_fmaddsub_ps(a, b, c);
#endif /* FMA */
}
static inline __m128 _mm_fmsubadd_ps_custom(__m128 a, __m128 b, __m128 c)
{
#if !defined(FMA) || defined(ARM)
v4sf d = _mm_mul_ps(*(v4sf *) _ps_conj_mask, c);
return _mm_addsub_ps(_mm_mul_ps(a, b), d);
#else /* FMA */
return _mm_fmsubadd_ps(a, b, c);
#endif /* FMA */
}
static inline __m128 _mm_fnmadd_ps_custom(__m128 a, __m128 b, __m128 c)
{
// Haswell comes with avx2 and fma
// ARM has vmla instead of fma in 32bits
#if defined(ARM) || defined(FMA)
return _mm_fnmadd_ps(a, b, c);
#else
return _mm_sub_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline __m128d _mm_fmadd_pd_custom(__m128d a, __m128d b, __m128d c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm_add_pd(_mm_mul_pd(a, b), c);
#else /* FMA */
return _mm_fmadd_pd(a, b, c);
#endif /* FMA */
}
static inline __m128d _mm_fnmadd_pd_custom(__m128d a, __m128d b, __m128d c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm_sub_pd(c, _mm_mul_pd(a, b));
#else /* FMA */
return _mm_fnmadd_pd(a, b, c);
#endif /* FMA */
}
#include "simd_utils_sse_double.h"
#include "simd_utils_sse_float.h"
#include "simd_utils_sse_int32.h"
#endif /* SSE */
#ifdef AVX
#ifndef __clang__
#ifndef __INTEL_COMPILER
#ifndef __cplusplus // TODO : it seems to be defined with G++ 9.2 and not GCC 9.2
static inline __m256 _mm256_set_m128(__m128 H, __m128 L) // not present on every GCC version
{
return _mm256_insertf128_ps(_mm256_castps128_ps256(L), H, 1);
}
#endif
#endif
#endif /* __clang__ */
static inline __m256 _mm256_fmadd_ps_custom(__m256 a, __m256 b, __m256 c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm256_add_ps(_mm256_mul_ps(a, b), c);
#else /* FMA */
return _mm256_fmadd_ps(a, b, c);
#endif /* FMA */
}
static inline __m256 _mm256_fmaddsub_ps_custom(__m256 a, __m256 b, __m256 c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm256_addsub_ps(_mm256_mul_ps(a, b), c);
#else /* FMA */
return _mm256_fmaddsub_ps(a, b, c);
#endif /* FMA */
}
static inline __m256 _mm256_fnmadd_ps_custom(__m256 a, __m256 b, __m256 c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
#else /* FMA */
return _mm256_fnmadd_ps(a, b, c);
#endif /* FMA */
}
static inline __m256d _mm256_fmadd_pd_custom(__m256d a, __m256d b, __m256d c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm256_add_pd(_mm256_mul_pd(a, b), c);
#else /* FMA */
return _mm256_fmadd_pd(a, b, c);
#endif /* FMA */
}
static inline __m256d _mm256_fnmadd_pd_custom(__m256d a, __m256d b, __m256d c)
{
#ifndef FMA // Haswell comes with avx2 and fma
return _mm256_sub_pd(c, _mm256_mul_pd(a, b));
#else /* FMA */
return _mm256_fnmadd_pd(a, b, c);
#endif /* FMA */
}
#include "avx_mathfun.h"
static inline v8sfx2 _mm256_load2_ps(float const *mem_addr)
{
v4sfx2 src_1 = _mm_load2_ps(mem_addr);
v4sfx2 src_2 = _mm_load2_ps(mem_addr + 2 * SSE_LEN_FLOAT);
v8sfx2 ret;
ret.val[0] = _mm256_set_m128(src_2.val[0], src_1.val[0]);
ret.val[1] = _mm256_set_m128(src_2.val[1], src_1.val[1]);
return ret;
}
static inline v8sfx2 _mm256_load2u_ps(float const *mem_addr)
{
v4sfx2 src_1 = _mm_load2u_ps(mem_addr);
v4sfx2 src_2 = _mm_load2u_ps(mem_addr + 2 * SSE_LEN_FLOAT);
v8sfx2 ret;
ret.val[0] = _mm256_set_m128(src_2.val[0], src_1.val[0]);
ret.val[1] = _mm256_set_m128(src_2.val[1], src_1.val[1]);
return ret;
}
static inline void _mm256_store2_ps(float *mem_addr, v8sfx2 a)
{
v8sf cplx0 = _mm256_unpacklo_ps(a.val[0], a.val[1]);
v8sf cplx1 = _mm256_unpackhi_ps(a.val[0], a.val[1]);
v8sf perm0 = _mm256_permute2f128_ps(cplx0, cplx1, 0x20); // permute mask [cplx1(127:0],cplx0[127:0])
v8sf perm1 = _mm256_permute2f128_ps(cplx0, cplx1, 0x31); // permute mask [cplx1(255:128],cplx0[255:128])
_mm256_store_ps(mem_addr, perm0);
_mm256_store_ps(mem_addr + AVX_LEN_FLOAT, perm1);
}
static inline void _mm256_store2u_ps(float *mem_addr, v8sfx2 a)
{
v8sf cplx0 = _mm256_unpacklo_ps(a.val[0], a.val[1]);
v8sf cplx1 = _mm256_unpackhi_ps(a.val[0], a.val[1]);
v8sf perm0 = _mm256_permute2f128_ps(cplx0, cplx1, 0x20); // permute mask [cplx1(127:0],cplx0[127:0])
v8sf perm1 = _mm256_permute2f128_ps(cplx0, cplx1, 0x31); // permute mask [cplx1(255:128],cplx0[255:128])
_mm256_storeu_ps(mem_addr, perm0);
_mm256_storeu_ps(mem_addr + AVX_LEN_FLOAT, perm1);
}
#include "simd_utils_avx_double.h"
#include "simd_utils_avx_float.h"
#include "simd_utils_avx_int32.h"
#endif /* AVX */
#ifdef AVX512
static const float _ps512_conj_mask[16] __attribute__((aligned(64))) = {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f,
1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f};
static inline __m512 _mm512_fmadd_ps_custom(__m512 a, __m512 b, __m512 c)
{
return _mm512_fmadd_ps(a, b, c);
}
static inline __m512 _mm512_fmaddsub_ps_custom(__m512 a, __m512 b, __m512 c)
{
return _mm512_fmaddsub_ps(a, b, c);
}
static inline __m512 _mm512_fnmadd_ps_custom(__m512 a, __m512 b, __m512 c)
{
return _mm512_fnmadd_ps(a, b, c);
}
static inline __m512d _mm512_fmadd_pd_custom(__m512d a, __m512d b, __m512d c)
{
return _mm512_fmadd_pd(a, b, c);
}
static inline __m512d _mm512_fnmadd_pd_custom(__m512d a, __m512d b, __m512d c)
{
return _mm512_fnmadd_pd(a, b, c);
}
#include "avx512_mathfun.h"
static inline v16sfx2 _mm512_load2_ps(float const *mem_addr)
{
v16sf vec1 = _mm512_load_ps(mem_addr); // load 0 1 2 3 4 5 6 7
v16sf vec2 = _mm512_load_ps(mem_addr + AVX512_LEN_FLOAT); // load 8 9 10 11 12 13 14 15
v16sfx2 ret;
ret.val[0] = _mm512_permutex2var_ps(vec2, *(v16si *) _pi32_512_idx_re, vec1);
ret.val[1] = _mm512_permutex2var_ps(vec2, *(v16si *) _pi32_512_idx_im, vec1);
return ret;
}
static inline v16sfx2 _mm512_load2u_ps(float const *mem_addr)
{
v16sf vec1 = _mm512_loadu_ps(mem_addr); // load 0 1 2 3 4 5 6 7
v16sf vec2 = _mm512_loadu_ps(mem_addr + AVX512_LEN_FLOAT); // load 8 9 10 11 12 13 14 15
v16sfx2 ret;
ret.val[0] = _mm512_permutex2var_ps(vec2, *(v16si *) _pi32_512_idx_re, vec1);
ret.val[1] = _mm512_permutex2var_ps(vec2, *(v16si *) _pi32_512_idx_im, vec1);
return ret;
}
static inline void _mm512_store2_ps(float *mem_addr, v16sfx2 a)
{
v16sf tmp1 = _mm512_permutex2var_ps(a.val[1], *(v16si *) _pi32_512_idx_cplx_lo, a.val[0]);
v16sf tmp2 = _mm512_permutex2var_ps(a.val[1], *(v16si *) _pi32_512_idx_cplx_hi, a.val[0]);
_mm512_store_ps(mem_addr, tmp1);
_mm512_store_ps(mem_addr + AVX512_LEN_FLOAT, tmp2);
}
static inline void _mm512_store2u_ps(float *mem_addr, v16sfx2 a)
{
v16sf tmp1 = _mm512_permutex2var_ps(a.val[1], *(v16si *) _pi32_512_idx_cplx_lo, a.val[0]);
v16sf tmp2 = _mm512_permutex2var_ps(a.val[1], *(v16si *) _pi32_512_idx_cplx_hi, a.val[0]);
_mm512_storeu_ps(mem_addr, tmp1);
_mm512_storeu_ps(mem_addr + AVX512_LEN_FLOAT, tmp2);
}
#include "simd_utils_avx512_double.h"
#include "simd_utils_avx512_float.h"
#include "simd_utils_avx512_int32.h"
#endif /* AVX512 */
#ifdef ICC
#include "simd_utils_svml.h"
#endif
#ifdef RISCV /* RISCV */
#include "simd_utils_riscv.h"
#endif /* RISCV */
#ifdef ALTIVEC
#include "simd_utils_altivec_float.h"
#endif /* ALTIVEC */
#ifdef CUSTOM_MALLOC
// Thanks to Jpommier pfft https://bitbucket.org/jpommier/pffft/src/default/pffft.c
static inline int posix_memalign(void **pointer, size_t len, int alignement)
{
void *p, *p0 = malloc(len + alignement);
if (!p0)
return (void *) NULL;
p = (void *) (((size_t) p0 + alignement) & (~((size_t) (alignement - 1))));
*((void **) p - 1) = p0;
*pointer = p;
return 0;
}
static inline void *aligned_malloc(size_t len, int alignement)
{
void *p, *p0 = malloc(len + alignement);
if (!p0)
return (void *) NULL;
p = (void *) (((size_t) p0 + alignement) & (~((size_t) (alignement - 1))));
*((void **) p - 1) = p0;
return p;
}
// Work in progress
static inline void aligned_free(void *p)
{
if (p)
free(*((void **) p - 1));
}
#endif /* CUSTOM_MALLOC */
////////// C Test functions ////////////////
static inline void log10f_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++)
dst[i] = log10f(src[i]);
}
static inline void log2f_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++)
dst[i] = log2f(src[i]);
}
static inline void lnf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++)
dst[i] = logf(src[i]);
}
static inline void expf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = expf(src[i]);
}
}
static inline void cbrtf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = cbrtf(src[i]);
}
}
static inline void fabsf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = fabsf(src[i]);
}
}
static inline void setf_C(float *dst, float value, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = value;
}
}
static inline void zerof_C(float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = 0.0f;
}
}
static inline void copyf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i];
}
}
static inline void addcf_C(float *src, float value, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] + value;
}
}
static inline void addcs_C(int32_t *src, int32_t value, int32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] + value;
}
}
static inline void mulf_C(float *src1, float *src2, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src1[i] * src2[i];
}
}
static inline void mulcf_C(float *src, float value, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] * value;
}
}
static inline void muladdf_C(float *_a, float *_b, float *_c, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (_a[i] * _b[i]) + _c[i];
}
}
static inline void mulcaddf_C(float *_a, float _b, float *_c, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (_a[i] * _b) + _c[i];
}
}
static inline void mulcaddcf_C(float *_a, float _b, float _c, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (_a[i] * _b) + _c;
}
}
static inline void muladdcf_C(float *_a, float *_b, float _c, float *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = _a[i] * _b[i] + _c;
}
}
static inline void muls_c(int32_t *a, int32_t *b, int32_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] * b[i];
}
}
static inline void divf_C(float *src1, float *src2, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src1[i] / src2[i];
}
}
static inline void cplxtorealf_C(complex32_t *src, float *dstRe, float *dstIm, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dstRe[i] = src[i].re;
dstIm[i] = src[i].im;
}
}
static inline void realtocplx_C(float *srcRe, float *srcIm, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = srcRe[i];
dst[i].im = srcIm[i];
}
}
static inline void convert_64f32f_C(double *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (float) src[i];
}
}
static inline void convert_32f64f_C(float *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (double) src[i];
}
}
static inline void convertFloat32ToU8_C(float *src, uint8_t *dst, int len, int rounding_mode, int scale_factor)
{
float scale_fact_mult = 1.0f / (float) (1 << scale_factor);
if (rounding_mode == RndZero) {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = floorf(src[i] * scale_fact_mult);
dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp);
}
} else if (rounding_mode == RndNear) {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = roundf(src[i] * scale_fact_mult);
dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp);
}
} else if (rounding_mode == RndFinancial) {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = (roundf(src[i] * scale_fact_mult * 0.5f) / 2.0f);
dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp);
}
} else {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = src[i] * scale_fact_mult;
dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp);
}
}
}
static inline void convertFloat32ToI16_C(float *src, int16_t *dst, int len, int rounding_mode, int scale_factor)
{
float scale_fact_mult = 1.0f / (float) (1 << scale_factor);
// Default bankers rounding => round to nearest even
if (rounding_mode == RndFinancial) {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = (roundf(src[i] * scale_fact_mult * 0.5f) / 2.0f);
dst[i] = (int16_t) (tmp > 32767.0f ? 32767.0f : tmp); // round to nearest even with round(x/2)*2
}
} else {
if (rounding_mode == RndZero) {
fesetround(FE_TOWARDZERO);
} else {
fesetround(FE_TONEAREST);
}
// Default round toward zero
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = nearbyintf(src[i] * scale_fact_mult);
dst[i] = (int16_t) (tmp > 32767.0f ? 32767.0f : tmp);
}
}
}
static inline void convertFloat32ToU16_C(float *src, uint16_t *dst, int len, int rounding_mode, int scale_factor)
{
float scale_fact_mult = 1.0f / (float) (1 << scale_factor);
// Default bankers rounding => round to nearest even
if (rounding_mode == RndFinancial) {
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = (roundf(src[i] * scale_fact_mult * 0.5f) / 2.0f);
dst[i] = (uint16_t) (tmp > 65535.0f ? 65535.0f : tmp); // round to nearest even with round(x/2)*2
}
} else {
if (rounding_mode == RndZero) {
fesetround(FE_TOWARDZERO);
} else {
fesetround(FE_TONEAREST);
}
// Default round toward zero
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float tmp = nearbyintf(src[i] * scale_fact_mult);
dst[i] = (uint16_t) (tmp > 65535.0f ? 65535.0f : tmp);
}
}
}
static inline void convertInt16ToFloat32_C(int16_t *src, float *dst, int len, int scale_factor)
{
float scale_fact_mult = 1.0f / (float) (1 << scale_factor);
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (float) src[i] * scale_fact_mult;
}
}
static inline void threshold_gt_f_C(float *src, float *dst, int len, float value)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] < value ? src[i] : value;
}
}
static inline void threshold_gtabs_f_C(float *src, float *dst, int len, float value)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
if (src[i] >= 0.0f) {
dst[i] = src[i] > value ? value : src[i];
} else {
dst[i] = src[i] < (-value) ? (-value) : src[i];
}
}
}
static inline void threshold_lt_f_C(float *src, float *dst, int len, float value)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] > value ? src[i] : value;
}
}
static inline void threshold_ltabs_f_C(float *src, float *dst, int len, float value)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
if (src[i] >= 0.0f) {
dst[i] = src[i] < value ? value : src[i];
} else {
dst[i] = src[i] > (-value) ? (-value) : src[i];
}
}
}
static inline void threshold_ltval_gtval_f_C(float *src, float *dst, int len, float ltlevel, float ltvalue, float gtlevel, float gtvalue)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] < ltlevel ? ltvalue : src[i];
dst[i] = dst[i] > gtlevel ? gtvalue : dst[i];
}
}
static inline void magnitudef_C_interleaved(complex32_t *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sqrtf(src[i].re * src[i].re + (src[i].im * src[i].im));
}
}
static inline void magnitudef_C_split(float *srcRe, float *srcIm, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sqrtf(srcRe[i] * srcRe[i] + (srcIm[i] * srcIm[i]));
}
}
static inline void powerspectf_C_split(float *srcRe, float *srcIm, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = srcRe[i] * srcRe[i] + (srcIm[i] * srcIm[i]);
}
}
static inline void powerspectf_C_interleaved(complex32_t *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i].re * src[i].re + (src[i].im * src[i].im);
}
}
static inline void meanf_C(float *src, float *dst, int len)
{
float acc = 0.0f;
int i;
#ifdef OMP
#pragma omp simd reduction(+ \
: acc)
#endif
for (i = 0; i < len; i++) {
acc += src[i];
}
acc = acc / (float) len;
*dst = acc;
}
static inline void sumf_C(float *src, float *dst, int len)
{
float tmp_acc = 0.0f;
for (int i = 0; i < len; i++) {
tmp_acc += src[i];
}
*dst = tmp_acc;
}
static inline void maxlocf_C(float *src, float *max, int *idx, int len)
{
float max_val = src[0];
int i;
int max_idx;
for (i = 1; i < len; i++) {
if (src[i] > max_val) {
max_val = src[i];
max_idx = i;
}
}
*idx = max_idx;
*max = max_val;
}
static inline void flipf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[len - i - 1] = src[i];
}
}
static inline void asinf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = asinf(src[i]);
}
}
static inline void asin_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = asin(src[i]);
}
}
static inline void tanf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = tanf(src[i]);
}
}
static inline void tanhf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = tanhf(src[i]);
}
}
static inline void sinhf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sinhf(src[i]);
}
}
static inline void coshf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = coshf(src[i]);
}
}
static inline void atanhf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = atanhf(src[i]);
}
}
static inline void asinhf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = asinhf(src[i]);
}
}
static inline void acoshf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = acoshf(src[i]);
}
}
static inline void atan_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = atan(src[i]);
}
}
static inline void atanf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = atanf(src[i]);
}
}
static inline void atan2f_C(float *src1, float *src2, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = atan2f(src1[i], src2[i]);
}
}
static inline void atan2f_interleaved_C(complex32_t *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = atan2f(src[i].im, src[i].re);
}
}
static inline void sinf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sinf(src[i]);
}
}
static inline void cosf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = cosf(src[i]);
}
}
static inline void sincosf_C(float *src, float *dst_sin, float *dst_cos, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
mysincosf(src[i], dst_sin + i, dst_cos + i);
}
}
static inline void sincosd_C(double *src, double *dst_sin, double *dst_cos, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst_sin[i] = sin(src[i]);
dst_cos[i] = cos(src[i]);
}
}
// e^ix = cos(x) + i*sin(x)
static inline void sincosf_C_interleaved(float *src, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
mysincosf(src[i], &(dst[i].re), &(dst[i].im));
}
}
static inline void sqrtf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sqrtf(src[i]);
}
}
static inline void floorf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = floorf(src[i]);
}
}
static inline void ceilf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = ceilf(src[i]);
}
}
static inline void roundf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = roundf(src[i]);
}
}
static inline void truncf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = truncf(src[i]);
}
}
static inline void floord_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = floor(src[i]);
}
}
static inline void ceild_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = ceil(src[i]);
}
}
static inline void roundd_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = round(src[i]);
}
}
static inline void truncd_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = trunc(src[i]);
}
}
static inline void cplxvecdiv_C(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float c2d2 = src2[i].re * src2[i].re + src2[i].im * src2[i].im;
dst[i].re = (src1[i].re * src2[i].re + (src1[i].im * src2[i].im)) / c2d2;
dst[i].im = (-src1[i].re * src2[i].im + (src2[i].re * src1[i].im)) / c2d2;
}
}
static inline void cplxvecmul_C(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = (src1[i].re * src2[i].re) - src1[i].im * src2[i].im;
dst[i].im = src1[i].re * src2[i].im + (src2[i].re * src1[i].im);
}
}
static inline void cplxvecmul_C_unrolled8(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
int stop_len = len / 8;
stop_len *= 8;
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < stop_len; i += 8) {
dst[i].re = (src1[i].re * src2[i].re) - src1[i].im * src2[i].im;
dst[i].im = src1[i].re * src2[i].im + (src2[i].re * src1[i].im);
dst[i + 1].re = (src1[i + 1].re * src2[i + 1].re) - src1[i + 1].im * src2[i + 1].im;
dst[i + 1].im = src1[i + 1].re * src2[i + 1].im + (src2[i + 1].re * src1[i + 1].im);
dst[i + 2].re = (src1[i + 2].re * src2[i + 2].re) - src1[i + 2].im * src2[i + 2].im;
dst[i + 2].im = src1[i + 2].re * src2[i + 2].im + (src2[i + 2].re * src1[i + 2].im);
dst[i + 3].re = (src1[i + 3].re * src2[i + 3].re) - src1[i + 3].im * src2[i + 3].im;
dst[i + 3].im = src1[i + 3].re * src2[i + 3].im + (src2[i + 3].re * src1[i + 3].im);
dst[i + 4].re = (src1[i + 4].re * src2[i + 4].re) - src1[i + 4].im * src2[i + 4].im;
dst[i + 4].im = src1[i + 4].re * src2[i + 4].im + (src2[i + 4].re * src1[i + 4].im);
dst[i + 5].re = (src1[i + 5].re * src2[i + 5].re) - src1[i + 5].im * src2[i + 5].im;
dst[i + 5].im = src1[i + 5].re * src2[i + 5].im + (src2[i + 5].re * src1[i + 5].im);
dst[i + 6].re = (src1[i + 6].re * src2[i + 6].re) - src1[i + 6].im * src2[i + 6].im;
dst[i + 6].im = src1[i + 6].re * src2[i + 6].im + (src2[i + 6].re * src1[i + 6].im);
dst[i + 7].re = (src1[i + 7].re * src2[i + 7].re) - src1[i + 7].im * src2[i + 7].im;
dst[i + 7].im = src1[i + 7].re * src2[i + 7].im + (src2[i + 7].re * src1[i + 7].im);
}
for (int i = stop_len; i < len; i++) {
dst[i].re = (src1[i].re * src2[i].re) - src1[i].im * src2[i].im;
dst[i].im = src1[i].re * src2[i].im + (src2[i].re * src1[i].im);
}
}
static inline void cplxvecmul_C2(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = (float) ((double) src1[i].re * (double) src2[i].re - (double) src1[i].im * (double) src2[i].im);
dst[i].im = (float) ((double) src1[i].re * (double) src2[i].im + (double) src2[i].re * (double) src1[i].im);
}
}
static inline void cplxvecmul_C_split(float *src1Re, float *src1Im, float *src2Re, float *src2Im, float *dstRe, float *dstIm, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dstRe[i] = (src1Re[i] * src2Re[i]) - src1Im[i] * src2Im[i];
dstIm[i] = src1Re[i] * src2Im[i] + (src2Re[i] * src1Im[i]);
}
}
static inline void cplxconjvecmul_C(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = src1[i].re * src2[i].re + (src1[i].im * src2[i].im);
dst[i].im = (src2[i].re * src1[i].im) - src1[i].re * src2[i].im;
}
}
static inline void cplxconjvecmul_C2(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = (float) ((double) src1[i].re * (double) src2[i].re + (double) src1[i].im * (double) src2[i].im);
dst[i].im = (float) ((double) src2[i].re * (double) src1[i].im - (double) src1[i].re * (double) src2[i].im);
}
}
static inline void cplxconjvecmul_C_split(float *src1Re, float *src1Im, float *src2Re, float *src2Im, float *dstRe, float *dstIm, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dstRe[i] = src1Re[i] * src2Re[i] + (src1Im[i] * src2Im[i]);
dstIm[i] = (src2Re[i] * src1Im[i]) - src1Re[i] * src2Im[i];
}
}
static inline void cplxconjvecmul_C_split2(float *src1Re, float *src1Im, float *src2Re, float *src2Im, float *dstRe, float *dstIm, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dstRe[i] = (float) ((double) src1Re[i] * (double) src2Re[i] + (double) src1Im[i] * (double) src2Im[i]);
dstIm[i] = (float) ((double) src2Re[i] * (double) src1Im[i] - (double) src1Re[i] * (double) src2Im[i]);
}
}
static inline void cplxconj_C(complex32_t *src, complex32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i].re = src[i].re;
dst[i].im = -src[i].im;
}
}
static inline void vectorSlopef_C(float *dst, int len, float offset, float slope)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (float) i * slope + offset;
}
}
static inline void vectorSloped_C(double *dst, int len, double offset, double slope)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (double) i * slope + offset;
}
}
static inline void vectorSlopes_C(int32_t *dst, int len, int32_t offset, int32_t slope)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (int32_t) i * slope + offset;
}
}
static inline void maxeveryf_c(float *src1, float *src2, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src1[i] > src2[i] ? src1[i] : src2[i];
}
}
static inline void mineveryf_c(float *src1, float *src2, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src1[i] < src2[i] ? src1[i] : src2[i];
}
}
static inline void minmaxf_c(float *src, int len, float *min_value, float *max_value)
{
float min_tmp = src[0];
float max_tmp = src[0];
#ifdef OMP
#pragma omp simd
#endif
for (int i = 1; i < len; i++) {
max_tmp = max_tmp > src[i] ? max_tmp : src[i];
min_tmp = min_tmp < src[i] ? min_tmp : src[i];
}
*max_value = max_tmp;
*min_value = min_tmp;
}
static inline void addf_c(float *a, float *b, float *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] + b[i];
}
}
static inline void adds_c(int32_t *a, int32_t *b, int32_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] + b[i];
}
}
static inline void subf_c(float *a, float *b, float *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] - b[i];
}
}
static inline void subcrevf_C(float *src, float value, float *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = value - src[i];
}
}
static inline void subs_c(int32_t *a, int32_t *b, int32_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] - b[i];
}
}
/*static inline void orf_c(float *a, float *b, float *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] | b[i];
}
}*/
static inline void setd_C(double *dst, double value, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = value;
}
}
static inline void zerod_C(double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = 0.0;
}
}
static inline void copyd_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i];
}
}
static inline void copys_C(int32_t *src, int32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i];
}
}
static inline void sqrtd_C(double *src, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = sqrt(src[i]);
}
}
static inline void addd_c(double *a, double *b, double *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] + b[i];
}
}
static inline void muld_c(double *a, double *b, double *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] * b[i];
}
}
static inline void subd_c(double *a, double *b, double *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] - b[i];
}
}
static inline void divd_c(double *a, double *b, double *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] / b[i];
}
}
static inline void mulcd_C(double *src, double value, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] * value;
}
}
static inline void muladdd_C(double *_a, double *_b, double *_c, double *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = _a[i] * _b[i] + _c[i];
}
}
static inline void mulcaddd_C(double *_a, double _b, double *_c, double *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = _a[i] * _b + _c[i];
}
}
static inline void mulcaddcd_C(double *_a, double _b, double _c, double *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = _a[i] * _b + _c;
}
}
static inline void muladdcd_C(double *_a, double *_b, double _c, double *dst, int len)
{
for (int i = 0; i < len; i++) {
dst[i] = _a[i] * _b[i] + _c;
}
}
static inline void addcd_C(double *src, double value, double *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = src[i] + value;
}
}
static inline void ors_C(int32_t *a, int32_t *b, int32_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] | b[i];
}
}
/*static inline void andf_C(float *a, float *b, float *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] & b[i];
}
}*/
static inline void ands_C(int32_t *a, int32_t *b, int32_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = a[i] & b[i];
}
}
static inline void sigmoidf_C(float *src, float *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = 1.0f / (1.0f + expf(-src[i]));
}
}
// parametric ReLU
// simple ReLU can be expressed as threshold_lt with value = 0
static inline void PReluf_C(float *src, float *dst, float alpha, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
if (src[i] > 0.0f)
dst[i] = src[i];
else
dst[i] = alpha * src[i];
}
}
static inline void softmaxf_C(float *src, float *dst, int len)
{
float acc = 0.0f;
#ifdef OMP
#pragma omp simd reduction(+ \
: acc)
#endif
for (int i = 0; i < len; i++) {
dst[i] = expf(src[i]);
acc += dst[i];
}
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] /= acc;
}
}
static inline void absdiff16s_c(int16_t *a, int16_t *b, int16_t *c, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
c[i] = abs(a[i] - b[i]);
}
}
static inline void powerspect16s_c_interleaved(complex16s_t *src, int32_t *dst, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
dst[i] = (int32_t) src[i].re * (int32_t) src[i].re + (int32_t) src[i].im * (int32_t) src[i].im;
}
}
/*
x = r × cos( θ )
y = r × sin( θ )
*/
static inline void pol2cart2Df_C(float *r, float *theta, float *x, float *y, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float sin_tmp, cos_tmp;
mysincosf(theta[i], &sin_tmp, &cos_tmp);
x[i] = r[i] * cos_tmp;
y[i] = r[i] * sin_tmp;
}
}
//https://fr.mathworks.com/help/matlab/ref/cart2pol.html
static inline void cart2pol2Df_C(float *x, float *y, float *r, float *theta, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float y_square = y[i] * y[i];
r[i] = sqrtf(x[i] * x[i] + y_square);
//theta[i] = atanf(y[i] / x[i]);
theta[i] = atan2f(y[i], x[i]);
}
}
#if 0
/*
theta angle to X axis, rho angle to Z axis
x = r * sin(theta) * cos(rho)
y = r * sin(theta) * sin(rho)
z = r * cos(theta)
*/
static inline void pol2cart3Df_C(float *r, float *theta, float *rho, float *x, float *y, float *z, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float sin_tmp_rho, cos_tmp_rho, sin_tmp_t, cos_tmp_t;
mysincosf(theta[i], &sin_tmp_rho, &cos_tmp_rho);
mysincosf(theta[i], &sin_tmp_t, &cos_tmp_t);
x[i] = r[i] * sin_tmp_t * cos_tmp_rho;
y[i] = r[i] * sin_tmp_t * sin_tmp_rho;
z[i] = r[i] * cos_tmp_t;
}
}
/*
r = sqrtf(x * x + y * y + z * z)
rho = acosf(x / sqrtf(x * x + y * y)) * (y < 0 ? -1 : 1)
theta = acosf(z / r)
*/
static inline void cart2pol3Df_C(float *x, float *y, float *z, float *r, float *theta, float *rho, int len)
{
#ifdef OMP
#pragma omp simd
#endif
for (int i = 0; i < len; i++) {
float x_square = x[i]*x[i];
float xy_square_sum = y[i]*y[i] + x_square;
float r_tmp = sqrtf(xy_square_sum + z[i]*z[i]);
r[i] = r_tmp;
float tmp = sqrtf(xy_square_sum);
tmp = acosf(x[i]/tmp);
rho[i] = (y < 0)? -tmp:tmp;
theta[i] = acosf(z[i]/r_tmp);
}
}
#endif
#ifdef __cplusplus
}
#endif
|
OMPIRBuilder.h
|
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
LocationDescription(const IRBuilderBase &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier);
/// Modifies the canonical loop a statically-scheduled workshare loop with a
/// user-specified chunk size.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in
/// the preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after the
/// loop.
/// \param ChunkSize The user-specified chunk size.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *ChunkSize);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// The concrete transformation is done by applyStaticWorkshareLoop,
/// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
/// on the value of \p SchedKind and \p ChunkSize.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param SchedKind Scheduling algorithm to use.
/// \param ChunkSize The chunk size for the inner loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(
DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
bool NeedsBarrier,
llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
Value *ChunkSize = nullptr);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Add metadata to simd-ize a loop.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to simd-ize.
void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a hidden global flag \p Name in the module with initial value \p
/// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB, *OuterAllocaBB;
SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Create a runtime call for __tgt_interop_init
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param InteropType type of interop operation
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_init call
CallInst *createOMPInteropInit(const LocationDescription &Loc,
Value *InteropVar,
omp::OMPInteropType InteropType, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_destroy
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_destroy call
CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_use
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_use call
CallInst *createOMPInteropUse(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences, Value *DependenceAddress,
bool HaveNowaitClause);
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture, Compare };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocaIP The insertion point to be used for alloca
/// instructions.
/// \param X The target atomic pointer to be updated
/// \param XElemTy The element type of the atomic pointer.
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *>
emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
Type *ElemTy = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
InsertPointTy AllocaIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Emit atomic compare for constructs: --- Only scalar data types
/// cond-update-atomic:
/// x = x ordop expr ? expr : x;
/// x = expr ordop x ? expr : x;
/// x = x == e ? d : x;
/// x = e == x ? d : x; (this one is not in the spec)
/// cond-update-stmt:
/// if (x ordop expr) { x = expr; }
/// if (expr ordop x) { x = expr; }
/// if (x == e) { x = d; }
/// if (e == x) { x = d; } (this one is not in the spec)
///
/// \param Loc The insert and source location description.
/// \param X The target atomic pointer to be updated.
/// \param E The expected value ('e') for forms that use an
/// equality comparison or an expression ('expr') for
/// forms that use 'ordop' (logically an atomic maximum or
/// minimum).
/// \param D The desired value for forms that use an equality
/// comparison. If forms that use 'ordop', it should be
/// \p nullptr.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param Op Atomic compare operation. It can only be ==, <, or >.
/// \param IsXBinopExpr True if the conditional statement is in the form where
/// x is on LHS. It only matters for < or >.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy createAtomicCompare(const LocationDescription &Loc,
AtomicOpValue &X, Value *E, Value *D,
AtomicOrdering AO,
omp::OMPAtomicCompareOp Op,
bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
/// Sets the number of loop iterations to the given value. This value must be
/// valid in the condition block (i.e., defined in the preheader) and is
/// interpreted as an unsigned integer.
void setTripCount(Value *TripCount);
/// Replace all uses of the canonical induction variable in the loop body with
/// a new one.
///
/// The intended use case is to update the induction variable for an updated
/// iteration space such that it can stay normalized in the 0...tripcount-1
/// range.
///
/// The \p Updater is called with the (presumable updated) current normalized
/// induction variable and is expected to return the value that uses of the
/// pre-updated induction values should use instead, typically dependent on
/// the new induction variable. This is a lambda (instead of e.g. just passing
/// the new value) to be able to distinguish the uses of the pre-updated
/// induction variable and uses of the induction varible to compute the
/// updated induction variable value.
void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
versaoA.c
|
// Fernanda Lyra Alves
// Ivan Dos Santos Muniz
// Programação Concorrente e Distribuída - 2020.2
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
static const int cel_morta = 0;
static const int cel_viva = 1;
static const unsigned int num_geracoes = 2000;
static const unsigned int srand_value = 1985;
static const unsigned int n_threads = 8;
int tabuleiro_n = 2048;
int coord_lim(int coord);
void copia_tabuleiro(int **origem, int **destino);
int vivos(int **tab);
int vizinhos(int **tab, int l, int c);
int decide_vida(int **tab, int **tab_novo, int l, int c);
int main()
{
struct timeval inicio_exe;
gettimeofday(&inicio_exe, NULL);
// Declaração e inicialização da memória dos tabuleiros
int **tabuleiro = NULL;
int **tabuleiro_novo = NULL;
tabuleiro = calloc(tabuleiro_n, sizeof(int *));
for (int col = 0; col < tabuleiro_n; col++)
tabuleiro[col] = calloc(tabuleiro_n, sizeof(int));
tabuleiro_novo = calloc(tabuleiro_n, sizeof(int *));
for (int col = 0; col < tabuleiro_n; col++)
tabuleiro_novo[col] = calloc(tabuleiro_n, sizeof(int));
// Geração do tabuleiro inicial e cópia para a nova geração
srand(srand_value);
for (int i = 0; i < tabuleiro_n; i++)
for (int j = 0; j < tabuleiro_n; j++)
tabuleiro[i][j] = rand() % 2;
printf("Condição inicial: %d\n", vivos(tabuleiro));
struct timeval inicio_ger;
gettimeofday(&inicio_ger, NULL);
// Execução das gerações
for (unsigned int g = 0; g < num_geracoes; g++)
{
int l = 0;
int c = 0;
#pragma omp parallel private(l, c) shared(tabuleiro, tabuleiro_novo) num_threads(n_threads)
{
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
for (c = 0; c < tabuleiro_n; c++)
decide_vida(tabuleiro, tabuleiro_novo, l, c);
}
copia_tabuleiro(tabuleiro_novo, tabuleiro);
printf("Geração %u: %d\n", g + 1, vivos(tabuleiro));
}
// Liberação da memória dos tabuleiros
for (int col = 0; col < tabuleiro_n; col++)
free(tabuleiro[col]);
free(tabuleiro);
for (int col = 0; col < tabuleiro_n; col++)
free(tabuleiro_novo[col]);
free(tabuleiro_novo);
struct timeval fim;
gettimeofday(&fim, NULL);
printf("Tempo de execução total: %lf\nTempo de execução das gerações: %lf\n",
(double)(fim.tv_usec - inicio_exe.tv_usec)/1000000 + (double)(fim.tv_sec - inicio_exe.tv_sec),
(double)(fim.tv_usec - inicio_ger.tv_usec)/1000000 + (double)(fim.tv_sec - inicio_ger.tv_sec));
return 0;
}
int coord_lim(int coord)
{
int r;
if (coord >= 0)
r = coord % tabuleiro_n;
else
r = tabuleiro_n + coord;
return r;
}
void copia_tabuleiro(int **origem, int **destino)
{
int l = 0;
#pragma omp parallel shared(origem, destino) private(l) num_threads(n_threads)
{
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
memcpy(destino[l], origem[l], sizeof(int) * tabuleiro_n);
}
}
int vivos(int **tab)
{
int n_vivos = 0;
int l = 0;
int c = 0;
#pragma omp parallel shared(tab) private (l, c) num_threads(n_threads)
{
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
for (c = 0; c < tabuleiro_n; c++)
#pragma omp critical
n_vivos += tab[l][c];
}
return n_vivos;
}
int vizinhos(int **tab, int l, int c)
{
int vizinhos_linhaacima = tab[coord_lim(l - 1)][coord_lim(c - 1)] + tab[coord_lim(l - 1)][coord_lim(c)] + tab[coord_lim(l - 1)][coord_lim(c + 1)];
int vizinhos_linhaatual = tab[coord_lim(l)][coord_lim(c - 1)] + tab[coord_lim(l)][coord_lim(c + 1)];
int vizinhos_linhaabaixo = tab[coord_lim(l + 1)][coord_lim(c - 1)] + tab[coord_lim(l + 1)][coord_lim(c)] + tab[coord_lim(l + 1)][coord_lim(c + 1)];
return vizinhos_linhaabaixo + vizinhos_linhaatual + vizinhos_linhaacima;
}
int decide_vida(int **tab, int **tab_novo, int l, int c)
{
int vizinhos_celula = vizinhos(tab, l, c);
if (tab[l][c] == cel_viva && (vizinhos_celula < 2 || vizinhos_celula >= 4))
tab_novo[l][c] = cel_morta;
else if (tab[l][c] == cel_morta && vizinhos_celula == 3)
tab_novo[l][c] = cel_viva;
else
tab_novo[l][c] = tab[l][c];
}
|
GB_builder.c
|
//------------------------------------------------------------------------------
// GB_builder: build a matrix from tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLED BY: GB_build, GB_wait, and GB_transpose
// CALLS: Generated/GB_red_build__* workers
// This function is called by GB_build to build a matrix T for GrB_Matrix_build
// or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending
// tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can
// appear if called by GB_build or GB_wait, but not GB_transpose.
// The indices are provided either as (I_input,J_input) or (I_work,J_work), not
// both. The values are provided as S_input or S_work, not both. On return,
// the *work arrays are either transplanted into T, or freed, since they are
// temporary workspaces.
// The work is done in major 5 Steps, some of which can be skipped, depending
// on how the tuples are provided (*_work or *_input), and whether or not they
// are sorted, or have duplicates. If vdim <= 1, some work is skipped (for
// GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on
// input. Let p be the # of threads used.
// STEP 1: copy user input. O(e/p) read/write per thread, or skipped.
// STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if
// the tuples are already sorted.
// STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no
// duplicates, or skipped if already done. O(e/p) read/writes
// per thread if duplicates appear.
// STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if
// T is a vector.
// STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the
// values can be transplanted into T as-is.
// For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already
// sorted with no duplicates, and no typecasting needs to be done, then Step 1
// still must be done (each thread does O(e/p) reads of (I_input,J_input) and
// writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3
// are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then
// I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread
// to copy S into T->x.
// For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes
// per thread. The input is always a vector, so vdim == 1 always holds. Step
// 2 is skipped if the indices are already sorted, and Step 3 does no work at
// all unless duplicates appear. Step 4 takes no time, for any vector. Step 5
// does O(e/p) reads/writes per thread.
// For GrB_reduce_to_vector: like GrB_Vector_build, but many duplicates are
// likely, and the indices will not be sorted. The input is always a single
// vector (vdim == 1). Step 1 only does a parallel memcpy, from I_input to
// I_work. Step 2 takes O((e log e)/p) time to sort the (i,k) tuples. Step 3
// does O(e/p) read/writes. Step 4 takes no time. Step 5 does O(e/p)
// read/writes per thread.
// For GB_wait: the pending tuples are provided as I_work, J_work, and S_work,
// so Step 1 is skipped (no need to check for invalid indices). The input
// J_work may be null (vdim can be anything, since GB_wait is used for both
// vectors and matrices). The tuples might be in sorted order already, which
// is known precisely known from A->Pending->sorted. Step 2 does O((e log e)/p)
// work to sort the tuples. Duplicates may appear, and out-of-order tuples are
// likely. Step 3 does O(e/p) read/writes. Step 4 does O(e/p) reads per
// thread of (I_work,J_work), or just I_work. Step 5 does O(e/p) read/writes
// per thread, or O(1) time if S_work can be transplanted into T->x.
// For GB_transpose: uses I_work, J_work, and either S_input (if no op applied
// to the values) or S_work (if an op was applied to the A->x values). This is
// only done for matrices, not vectors, so vdim > 1 will always hold. The
// indices are valid so Step 1 is skipped. The tuples are not sorted, so Step
// 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so
// Step 3 only does O(e/p) reads of J_work to count the vectors in each slice.
// Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5
// does O(e/p) read/writes per thread, but it uses the simpler case in
// GB_reduce_build_template since no duplicates can appear. It is unlikely
// able to transplant S_work into T->x since the input will almost always be
// unsorted.
#include "GB_build.h"
#include "GB_sort.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t])
#define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t]))
#define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t]))
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (tstart_slice, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (tnvec_slice, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (tnz_slice, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kbad, nthreads, sizeof (int64_t)) ; \
GB_FREE_MEMORY (ilast_slice, nthreads, sizeof (int64_t)) ; \
GB_FREE_MEMORY (*I_work_handle, ijslen, sizeof (int64_t)) ; \
GB_FREE_MEMORY (*J_work_handle, ijslen, sizeof (int64_t)) ; \
GB_FREE_MEMORY (*S_work_handle, ijslen, ssize) ; \
GB_FREE_MEMORY (K_work, nvals, sizeof (int64_t)) ; \
GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ; \
GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; \
GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; \
}
//------------------------------------------------------------------------------
// GB_builder
//------------------------------------------------------------------------------
GrB_Info GB_builder // build a matrix from tuples
(
GrB_Matrix *Thandle, // matrix T to build
const GrB_Type ttype, // type of output matrix T
const int64_t vlen, // length of each vector of T
const int64_t vdim, // number of vectors in T
const bool is_csc, // true if T is CSC, false if CSR
int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples
int64_t **J_work_handle, // for (j,i,k) tuples
GB_void **S_work_handle, // array of values of tuples, size ijslen
bool known_sorted, // true if tuples known to be sorted
bool known_no_duplicates, // true if tuples known to not have dupl
int64_t ijslen, // size of I_work and J_work arrays
const bool is_matrix, // true if T a GrB_Matrix, false if vector
const bool ijcheck, // true if I_input,J_input must be checked
const int64_t *restrict I_input,// original indices, size nvals
const int64_t *restrict J_input,// original indices, size nvals
const GB_void *restrict S_input,// array of values of tuples, size nvals
const int64_t nvals, // number of tuples, and size of K_work
const GrB_BinaryOp dup, // binary function to assemble duplicates,
// if NULL use the SECOND operator to
// keep the most recent duplicate.
const GB_Type_code scode, // GB_Type_code of S_work or S_input array
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Thandle != NULL) ;
ASSERT (nvals >= 0) ;
ASSERT (scode <= GB_UDT_code) ;
ASSERT_OK (GB_check (ttype, "ttype for builder", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (dup, "dup for builder", GB0)) ;
ASSERT (I_work_handle != NULL) ;
ASSERT (J_work_handle != NULL) ;
ASSERT (S_work_handle != NULL) ;
//--------------------------------------------------------------------------
// get S
//--------------------------------------------------------------------------
GB_void *restrict S_work = (*S_work_handle) ;
const GB_void *restrict S = (S_work == NULL) ? S_input : S_work ;
size_t tsize = ttype->size ;
size_t ssize = GB_code_size (scode, tsize) ;
ASSERT (S != NULL) ;
//==========================================================================
// symbolic phase of the build =============================================
//==========================================================================
// The symbolic phase sorts the tuples and finds any duplicates. The
// output matrix T is constructed (not including T->i and T->x), and T->h
// and T->p are computed. Then I_work is transplanted into T->i, or T->i is
// allocated. T->x is then allocated. It is not computed until the
// numeric phase.
// When this function returns, I_work is either freed or transplanted into
// T->i. J_work is freed, and the I_work and J_work pointers (in the
// caller) are set to NULL by setting their handles to NULL. Note that
// J_work may already be NULL on input, if T has one or zero vectors
// (J_work_handle is always non-NULL however).
GrB_Info info ;
GrB_Matrix T = NULL ;
(*Thandle) = NULL ;
int64_t *restrict I_work = (*I_work_handle) ;
int64_t *restrict J_work = (*J_work_handle) ;
int64_t *restrict K_work = NULL ;
int64_t *restrict W0 = NULL ;
int64_t *restrict W1 = NULL ;
int64_t *restrict W2 = NULL ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict tstart_slice = NULL ; // size nthreads+1
int64_t *restrict tnvec_slice = NULL ; // size nthreads+1
int64_t *restrict tnz_slice = NULL ; // size nthreads+1
int64_t *restrict kbad = NULL ; // size nthreads
int64_t *restrict ilast_slice = NULL ; // size [nthreads]
GB_CALLOC_MEMORY (tstart_slice, nthreads+1, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (tnvec_slice, nthreads+1, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (tnz_slice, nthreads+1, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (kbad, nthreads, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (ilast_slice, nthreads, sizeof (int64_t)) ;
if (tstart_slice == NULL || tnvec_slice == NULL || tnz_slice == NULL ||
kbad == NULL || ilast_slice == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// partition the tuples for the threads
//--------------------------------------------------------------------------
// Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1.
// Each thread handles about the same number of tuples. This partition
// depends only on nvals.
tstart_slice [0] = 0 ;
for (int tid = 1 ; tid < nthreads ; tid++)
{
tstart_slice [tid] = GB_PART (tid, nvals, nthreads) ;
}
tstart_slice [nthreads] = nvals ;
// tstart_slice [tid]: first tuple in slice tid
// tnvec_slice [tid]: # of vectors that start in a slice. If a vector
// starts in one slice and ends in another, it is
// counted as being in the first slice.
// tnz_slice [tid]: # of entries in a slice after removing duplicates
// sentinel values for the final cumulative sum
tnvec_slice [nthreads] = 0 ;
tnz_slice [nthreads] = 0 ;
// this becomes true if the first pass computes tnvec_slice and tnz_slice,
// and if the (I_input,J_input) tuples were found to be already sorted with
// no duplicates present.
bool tnvec_and_tnz_slice_computed = false ;
//--------------------------------------------------------------------------
// STEP 1: copy user input and check if valid
//--------------------------------------------------------------------------
// If the indices are provided by (I_input,J_input), then import them into
// (I_work,J_work) and check if they are valid, and sorted. If the input
// happens to be already sorted, then duplicates are detected and the # of
// vectors in each slice is counted.
if (I_work == NULL)
{
//----------------------------------------------------------------------
// allocate I_work
//----------------------------------------------------------------------
// allocate workspace to load and sort the index tuples:
// vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k]
// vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and
// j = J_input [k]. If the tuples are found to be already sorted on
// input, then J_work is not allocated, and J_input is used instead.
// The k value in the tuple gives the position in the original set of
// tuples: I_input [k] and S [k] when vdim <= 1, and also J_input [k]
// for matrices with vdim > 1.
// The workspace I_work and J_work are allocated here but freed (or
// transplanted) inside GB_builder. K_work is allocated, used, and
// freed in GB_builder.
ASSERT (J_work == NULL) ;
GB_MALLOC_MEMORY (I_work, nvals, sizeof (int64_t)) ;
(*I_work_handle) = I_work ;
ijslen = nvals ;
if (I_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// create the tuples to sort, and check for any invalid indices
//----------------------------------------------------------------------
known_sorted = true ;
bool no_duplicates_found = true ;
if (nvals == 0)
{
//------------------------------------------------------------------
// nothing to do
//------------------------------------------------------------------
;
}
else if (is_matrix)
{
//------------------------------------------------------------------
// C is a matrix; check both I_input and J_input
//------------------------------------------------------------------
ASSERT (J_input != NULL) ;
ASSERT (I_work != NULL) ;
ASSERT (vdim >= 0) ;
ASSERT (I_input != NULL) ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (int tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t my_tnvec = 0 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i,j)
int64_t i = I_input [k] ;
int64_t j = J_input [k] ;
if (i < 0 || i >= vlen || j < 0 || j >= vdim)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted &&
((jlast < j) || (jlast == j && ilast <= i)) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(jlast == j && ilast == i)) ;
// copy the tuple into I_work. J_work is done later.
I_work [k] = i ;
if (j > jlast)
{
// vector j starts in this slice (but this is
// valid only if J_input is sorted on input)
my_tnvec++ ;
}
// log the last index seen
ilast = i ; jlast = j ;
}
// these are valid only if I_input and J_input are sorted on
// input, with no duplicates present.
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = kend - kstart ;
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
int64_t j = J_input [kbad [tid]] ;
int64_t row = is_csc ? i : j ;
int64_t col = is_csc ? j : i ;
int64_t nrows = is_csc ? vlen : vdim ;
int64_t ncols = is_csc ? vdim : vlen ;
GB_FREE_WORK ;
return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG,
"index ("GBd","GBd") out of bounds,"
" must be < ("GBd", "GBd")", row, col, nrows, ncols))) ;
}
}
// if the tuples were found to be already in sorted order, and if
// no duplicates were found, then tnvec_slice and tnz_slice are now
// valid, Otherwise, they can only be computed after sorting.
tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ;
//------------------------------------------------------------------
// allocate J_work, if needed
//------------------------------------------------------------------
if (vdim > 1 && !known_sorted)
{
// copy J_input into J_work, so the tuples can be sorted
GB_MALLOC_MEMORY (J_work, nvals, sizeof (int64_t)) ;
(*J_work_handle) = J_work ;
if (J_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads);
}
else
{
// J_work is a shallow copy of J_input. The pointer is not
// copied into (*J_work_handle), so it will not be freed.
// J_input is not modified, even though it is typecast to the
// int64_t *J_work, since J_work is not modified in this case.
J_work = (int64_t *) J_input ;
}
}
else if (ijcheck)
{
//------------------------------------------------------------------
// C is a typecasted GrB_Vector; check only I_input
//------------------------------------------------------------------
ASSERT (I_input != NULL) ;
ASSERT (J_input == NULL) ;
ASSERT (vdim == 1) ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (int tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i)
int64_t i = I_input [k] ;
if (i < 0 || i >= vlen)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted && (ilast <= i) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(ilast == i)) ;
// copy the tuple into the work arrays to be sorted
I_work [k] = i ;
// log the last index seen
ilast = i ;
}
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
GB_FREE_WORK ;
return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG,
"index ("GBd") out of bounds, must be < ("GBd")",
i, vlen))) ;
}
}
}
else
{
//------------------------------------------------------------------
// GB_reduce_to_vector: do not check I_input, assume not sorted
//------------------------------------------------------------------
// Many duplicates are possible, since the tuples are being used to
// construct a single vector. For a CSC format, each entry A(i,j)
// becomes an (i,aij) tuple, with the vector index j discarded. All
// entries in a single row i are reduced to a single entry in the
// vector. The input is unlikely to be sorted, so do not bother to
// check.
GB_memcpy (I_work, I_input, nvals * sizeof (int64_t), nthreads) ;
known_sorted = false ;
}
//----------------------------------------------------------------------
// determine if duplicates are possible
//----------------------------------------------------------------------
// The input is now known to be sorted, or not. If it is sorted, and
// if no duplicates were found, then it is known to have no duplicates.
// Otherwise, duplicates might appear, but a sort is required first to
// check for duplicates.
known_no_duplicates = known_sorted && no_duplicates_found ;
}
//--------------------------------------------------------------------------
// STEP 2: sort the tuples in ascending order
//--------------------------------------------------------------------------
// If the tuples are known to already be sorted, Step 2 is skipped. In
// that case, K_work is NULL (not allocated), which implicitly means that
// K_work [k] = k for all k = 0:nvals-1.
if (!known_sorted)
{
// create the k part of each tuple
GB_MALLOC_MEMORY (K_work, nvals, sizeof (int64_t)) ;
if (K_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// The k part of each tuple (i,k) or (j,i,k) records the original
// position of the tuple in the input list. This allows an unstable
// sorting algorith to be used. Since k is unique, it forces the
// result of the sort to be stable regardless of whether or not the
// sorting algorithm is stable. It also keeps track of where the
// numerical value of the tuple can be found; it is in S[k] for the
// tuple (i,k) or (j,i,k), regardless of where the tuple appears in the
// list after it is sorted.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < nvals ; k++)
{
K_work [k] = k ;
}
// sort all the tuples
if (vdim > 1)
{
//------------------------------------------------------------------
// sort a set of (j,i,k) tuples
//------------------------------------------------------------------
if (nthreads == 1)
{
//--------------------------------------------------------------
// sequential quicksort
//--------------------------------------------------------------
GB_qsort_3 (J_work, I_work, K_work, nvals) ;
}
else
{
//--------------------------------------------------------------
// parallel mergesort
//--------------------------------------------------------------
GB_MALLOC_MEMORY (W0, nvals, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (W1, nvals, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (W2, nvals, sizeof (int64_t)) ;
if (W0 == NULL || W1 == NULL || W2 == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GB_msort_3 (J_work, I_work, K_work, W0, W1, W2, nvals,
nthreads) ;
GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ;
GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ;
GB_FREE_MEMORY (W2, nvals, sizeof (int64_t)) ;
}
}
else
{
//------------------------------------------------------------------
// sort a set of (i,k) tuples
//------------------------------------------------------------------
if (nthreads == 1)
{
//--------------------------------------------------------------
// sequential quicksort
//--------------------------------------------------------------
GB_qsort_2 (I_work, K_work, nvals) ;
}
else
{
//--------------------------------------------------------------
// parallel mergesort
//--------------------------------------------------------------
GB_MALLOC_MEMORY (W0, nvals, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (W1, nvals, sizeof (int64_t)) ;
if (W0 == NULL || W1 == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GB_msort_2 (I_work, K_work, W0, W1, nvals, nthreads) ;
GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ;
GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ;
}
}
}
//--------------------------------------------------------------------------
// STEP 3: count vectors and duplicates in each slice
//--------------------------------------------------------------------------
// Duplicates are located, counted and their indices negated. The # of
// vectors in each slice is counted. If the indices are known to not have
// duplicates, then only the vectors are counted. Counting the # of
// vectors is skipped if already done by Step 1.
if (known_no_duplicates)
{
//----------------------------------------------------------------------
// no duplicates: just count # vectors in each slice
//----------------------------------------------------------------------
// This is much faster, particularly if the # of vectors in each slice
// has already been computed.
#ifdef GB_DEBUG
{
// assert that there are no duplicates
int64_t ilast = -1, jlast = -1 ;
for (int64_t t = 0 ; t < nvals ; t++)
{
int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ;
bool is_duplicate = (i == ilast && j == jlast) ;
ASSERT (!is_duplicate) ;
ilast = i ; jlast = j ;
}
}
#endif
if (vdim <= 1)
{
// all tuples appear in at most one vector, and there are no
// duplicates, so there is no need to scan I_work or J_work.
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
tnvec_slice [tid] = 0 ;
tnz_slice [tid] = tend - tstart ;
}
tnvec_slice [0] = (nvals == 0) ? 0 : 1 ;
}
else
{
// count the # of unique vector indices in J_work. No need to scan
// I_work since there are no duplicates to be found. Also no need
// to compute them if already found in Step 1.
if (!tnvec_and_tnz_slice_computed)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = J_work [t] ;
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = tend - tstart ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// look for duplicates and count # vectors in each slice
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
ilast_slice [tid] = GB_I_WORK (tstart-1) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t my_ndupl = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t ilast = ilast_slice [tid] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
// tuples are now sorted but there may be duplicates
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
// check if (j,i,k) is a duplicate
if (i == ilast && j == jlast)
{
// flag the tuple as a duplicate
I_work [t] = -1 ;
my_ndupl++ ;
// the sort places earlier duplicate tuples (with smaller
// k) after later ones (with larger k).
ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ;
}
else
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
ilast = i ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = (tend - tstart) - my_ndupl ;
}
}
//--------------------------------------------------------------------------
// find total # of vectors and duplicates in all tuples
//--------------------------------------------------------------------------
// Replace tnvec_slice with its cumulative sum, after which each slice tid
// will be responsible for the # vectors in T that range from tnvec_slice
// [tid] to tnvec_slice [tid+1]-1.
GB_cumsum (tnvec_slice, nthreads, NULL, 1) ;
int64_t tnvec = tnvec_slice [nthreads] ;
// Replace tnz_slice with its cumulative sum
GB_cumsum (tnz_slice, nthreads, NULL, 1) ;
// find the total # of final entries, after assembling duplicates
int64_t tnz = tnz_slice [nthreads] ;
int64_t ndupl = nvals - tnz ;
//--------------------------------------------------------------------------
// allocate T; always hypersparse
//--------------------------------------------------------------------------
// [ allocate T; allocate T->p and T->h but do not initialize them.
// T is always hypersparse.
GB_NEW (&T, ttype, vlen, vdim, GB_Ap_malloc, is_csc, GB_FORCE_HYPER,
GB_ALWAYS_HYPER, tnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (info) ;
}
ASSERT (T->is_hyper) ;
ASSERT (T->nzmax == 0) ; // T->i and T->x not yet allocated
ASSERT (T->x == NULL) ;
ASSERT (T->i == NULL) ;
(*Thandle) = T ;
//--------------------------------------------------------------------------
// STEP 4: construct the vector pointers and hyperlist for T
//--------------------------------------------------------------------------
// Step 4 scans the J_work indices and constructs T->h and T->p.
int64_t *restrict Th = T->h ;
int64_t *restrict Tp = T->p ;
if (vdim <= 1)
{
//----------------------------------------------------------------------
// special case for vectors
//----------------------------------------------------------------------
ASSERT (tnvec == 0 || tnvec == 1) ;
if (tnvec > 0)
{
Th [0] = 0 ;
Tp [0] = 0 ;
}
}
else if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates appear
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = GB_J_WORK (t) ;
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = t ;
my_tnvec++ ;
jlast = j ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// it is known that at least one duplicate appears
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
if (i >= 0)
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = my_tnz ;
my_tnvec++ ;
jlast = j ;
}
my_tnz++ ;
}
}
}
}
// log the end of the last vector
T->nvec_nonempty = tnvec ;
T->nvec = tnvec ;
Tp [tnvec] = tnz ;
ASSERT (T->nvec == T->plen) ;
T->magic = GB_MAGIC ; // T->p and T->h are now valid ]
//--------------------------------------------------------------------------
// free J_work if it exists
//--------------------------------------------------------------------------
ASSERT (J_work_handle != NULL) ;
GB_FREE_MEMORY (*J_work_handle, ijslen, sizeof (int64_t)) ;
J_work = NULL ;
//--------------------------------------------------------------------------
// allocate T->i
//--------------------------------------------------------------------------
T->nzmax = GB_IMAX (tnz, 1) ;
if (ndupl == 0)
{
// shrink I_work from size ijslen to size T->nzmax
if (T->nzmax < ijslen)
{
// this cannot fail since the size is shrinking.
bool ok ;
GB_REALLOC_MEMORY (I_work, T->nzmax, ijslen, sizeof (int64_t), &ok);
ASSERT (ok) ;
}
// transplant I_work into T->i
T->i = I_work ;
I_work = NULL ;
(*I_work_handle) = NULL ;
}
else
{
// duplicates exist, so allocate a new T->i. I_work must be freed later
GB_MALLOC_MEMORY (T->i, tnz, sizeof (int64_t)) ;
if (T->i == NULL)
{
// out of memory
GB_MATRIX_FREE (&T) ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
int64_t *restrict Ti = T->i ;
//==========================================================================
// numerical phase of the build: assemble any duplicates
//==========================================================================
// The tuples have been sorted. Assemble any duplicates with a switch
// factory of built-in workers, or four generic workers. The vector
// pointers T->p and hyperlist T->h (if hypersparse) have already been
// computed.
// If there are no duplicates, T->i holds the row indices of the tuple.
// Otherwise, the row indices are still in I_work. K_work holds the
// positions of each tuple in the array S. The tuples are sorted so that
// duplicates are adjacent to each other and they appear in the order they
// appeared in the original tuples. This method assembles the duplicates
// and computes T->i and T->x from I_work, K_work, and S. into T, becoming
// T->i. If no duplicates appear, T->i is already computed, and S just
// needs to be copied and permuted into T->x.
// The (i,k,S[k]) tuples are held in two integer arrays: (1) I_work or T->i,
// and (2) K_work, and an array S of numerical values. S has not been
// sorted, nor even accessed yet. It is identical to the original unsorted
// tuples. The (i,k,S[k]) tuple holds the row index i, the position k, and
// the value S [k]. This entry becomes T(i,j) = S [k] in the matrix T, and
// duplicates (if any) are assembled via the dup operator.
//--------------------------------------------------------------------------
// get opcodes and check types
//--------------------------------------------------------------------------
// With GB_build, there can be 1 to 2 different types.
// T->type is identical to the types of x,y,z for z=dup(x,y).
// dup is never NULL and all its three types are the same
// The type of S (scode) can different but must be compatible
// with T->type
// With GB_wait, there can be 1 to 5 different types:
// The pending tuples are in S, of type scode which must be
// compatible with dup->ytype and T->type
// z = dup (x,y): can be NULL or have 1 to 3 different types
// T->type: must be compatible with all above types.
// dup may be NULL, in which case it is assumed be the implicit SECOND
// operator, with all three types equal to T->type
GrB_Type xtype, ytype, ztype ;
GxB_binary_function fdup ;
#ifndef GBCOMPACT
GB_Opcode opcode ;
#endif
GB_Type_code tcode = ttype->code ;
bool op_2nd ;
ASSERT_OK (GB_check (ttype, "ttype for build_factorize", GB0)) ;
if (dup == NULL)
{
//----------------------------------------------------------------------
// dup is the implicit SECOND operator
//----------------------------------------------------------------------
// z = SECOND (x,y) where all three types are the same as ttype
// T(i,j) = (ttype) S(k) will be done for all tuples.
#ifndef GBCOMPACT
opcode = GB_SECOND_opcode ;
#endif
ASSERT (GB_op_is_second (dup, ttype)) ;
xtype = ttype ;
ytype = ttype ;
ztype = ttype ;
fdup = NULL ;
op_2nd = true ;
}
else
{
//----------------------------------------------------------------------
// dup is an explicit operator
//----------------------------------------------------------------------
// T(i,j) = (ttype) S[k] will be done for the first tuple.
// for subsequent tuples: T(i,j) += S[k], via the dup operator and
// typecasting:
//
// y = (dup->ytype) S[k]
// x = (dup->xtype) T(i,j)
// z = (dup->ztype) dup (x,y)
// T(i,j) = (ttype) z
ASSERT_OK (GB_check (dup, "dup for build_factory", GB0)) ;
#ifndef GBCOMPACT
opcode = dup->opcode ;
#endif
xtype = dup->xtype ;
ytype = dup->ytype ;
ztype = dup->ztype ;
fdup = dup->function ;
op_2nd = GB_op_is_second (dup, ttype) ;
}
//--------------------------------------------------------------------------
// get the sizes and codes of each type
//--------------------------------------------------------------------------
GB_Type_code zcode = ztype->code ;
GB_Type_code xcode = xtype->code ;
GB_Type_code ycode = ytype->code ;
ASSERT (GB_code_compatible (tcode, scode)) ; // T(i,j) = (ttype) S
ASSERT (GB_code_compatible (ycode, scode)) ; // y = (ytype) S
ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j)
ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z
size_t zsize = ztype->size ;
size_t xsize = xtype->size ;
size_t ysize = ytype->size ;
// so that tcode can match scode
GB_Type_code tcode2 = (tcode == GB_UCT_code) ? GB_UDT_code : tcode ;
GB_Type_code scode2 = (scode == GB_UCT_code) ? GB_UDT_code : scode ;
// no typecasting if all 5 types are the same
bool nocasting = (tcode2 == scode2) &&
(ttype == xtype) && (ttype == ytype) && (ttype == ztype) ;
//--------------------------------------------------------------------------
// STEP 5: assemble the tuples
//--------------------------------------------------------------------------
bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ;
if (copy_S_into_T && S_work != NULL)
{
//----------------------------------------------------------------------
// transplant S_work into T->x
//----------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to copy S
// into Tx. S can be directly transplanted into T->x since S is
// provided as S_work. GB_builder must either transplant or free
// S_work. The transplant can be used by GB_wait (whenever the tuples
// are already sorted, with no duplicates, and no typecasting is
// needed, since S_work is always A->Pending->x). This transplant can
// rarely be used for GB_transpose (when op is NULL and the transposed
// tuples happen to be sorted, which is unlikely).
T->x = S_work ;
S_work = NULL ;
(*S_work_handle) = NULL ;
}
else
{
//----------------------------------------------------------------------
// allocate T->x
//----------------------------------------------------------------------
GB_MALLOC_MEMORY (T->x, tnz, ttype->size) ;
if (T->x == NULL)
{
// out of memory
GB_MATRIX_FREE (&T) ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GB_void *restrict Tx = T->x ;
if (copy_S_into_T)
{
//------------------------------------------------------------------
// copy S into T->x
//------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to
// copy S into Tx. S cannot be transplanted into T->x since
// S_work is NULL and S_input cannot be modified by GB_builder.
ASSERT (S_work == NULL) ;
ASSERT (S == S_input) ;
GB_memcpy (Tx, S, nvals * tsize, nthreads) ;
}
else if (nocasting)
{
//------------------------------------------------------------------
// assemble the values, S, into T, no typecasting needed
//------------------------------------------------------------------
// S (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled.
// There are 44 common cases of this function for built-in types
// and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types
// (all but boolean; and OR, AND, XOR, and EQ for boolean.
// In addition, the FIRST and SECOND operators are hard-coded, for
// another 22 workers, since SECOND is used by GB_wait and since
// FIRST is useful for keeping the first tuple seen. It is
// controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they
// do not appear in GB_reduce_to_* where the FIRST and SECOND
// operators are not needed.
// Early exit cannot be exploited, so the terminal is ignored.
#define GB_INCLUDE_SECOND_OPERATOR
bool done = false ;
#define GB_red(opname,aname) GB_red_build_ ## opname ## aname
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, Ti, \
(atype *) S, nvals, ndupl, I_work, K_work, tstart_slice,\
tnz_slice, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#ifndef GBCOMPACT
// controlled by opcode and typecode
GB_Type_code typecode = tcode ;
#include "GB_red_factory.c"
#endif
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
//--------------------------------------------------------------
// no typecasting, but use the fdup function pointer and memcpy
//--------------------------------------------------------------
// Either the fdup operator or type of S and T are
// user-defined, or fdup is not an associative operator handled
// by the GB_red_factory, or some combination of these
// conditions. User-defined types cannot be typecasted, so
// this handles all user-defined types.
// Tx [p] = (ttype) S [k], but with no typecasting
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
memcpy (Tx +((p)*tsize), S +((k)*tsize), tsize) ;
if (op_2nd)
{
//----------------------------------------------------------
// dup is the SECOND operator, with no typecasting
//----------------------------------------------------------
// Tx [p] += (ttype) S [k], but 2nd op and no typecasting
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k)
#include "GB_reduce_build_template.c"
}
else
{
//----------------------------------------------------------
// dup is another operator, with no typecasting needed
//----------------------------------------------------------
// Tx [p] += (ttype) S [k], but with no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
fdup (Tx +((p)*tsize), Tx +((p)*tsize), S +((k)*tsize));
#include "GB_reduce_build_template.c"
}
}
}
else
{
//------------------------------------------------------------------
// assemble the values S into T, typecasting as needed
//------------------------------------------------------------------
// S (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled. Not all of the 5 types are
// the same, but all of them are built-in since user-defined types
// cannot be typecasted.
GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ;
GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ;
GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ;
GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ;
ASSERT (scode <= GB_FP64_code) ;
ASSERT (tcode <= GB_FP64_code) ;
ASSERT (xcode <= GB_FP64_code) ;
ASSERT (ycode <= GB_FP64_code) ;
ASSERT (zcode <= GB_FP64_code) ;
// Tx [p] = (ttype) S [k], with typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
cast_S_to_T (Tx +((p)*tsize), S +((k)*ssize), ssize) ;
if (op_2nd)
{
//--------------------------------------------------------------
// dup operator is the SECOND operator, with typecasting
//--------------------------------------------------------------
// Tx [p] += (ttype) S [k], but 2nd op, with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k)
#include "GB_reduce_build_template.c"
}
else
{
//--------------------------------------------------------------
// dup is another operator, with typecasting required
//--------------------------------------------------------------
// Tx [p] += S [k], with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \
{ \
/* ywork = (ytype) S [k] */ \
GB_void ywork [GB_PGI(ysize)] ; \
cast_S_to_Y (ywork, S +((k)*ssize), ssize) ; \
/* xwork = (xtype) Tx [p] */ \
GB_void xwork [GB_PGI(xsize)] ; \
cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \
/* zwork = f (xwork, ywork) */ \
GB_void zwork [GB_PGI(zsize)] ; \
fdup (zwork, xwork, ywork) ; \
/* Tx [tnz-1] = (ttype) zwork */ \
cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \
}
#include "GB_reduce_build_template.c"
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_OK (GB_check (T, "T built", GB0)) ;
return (GrB_SUCCESS) ;
}
|
schedbench.c
|
/****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: [email protected] or [email protected] *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include "common.h"
#include "schedbench.h"
//extern void pthread_create(void);
//extern void pthread_join(void);
//extern pthread_t;
typedef struct
{
void * p; /* Pointer to actual object */
unsigned int x; /* Extra information - reuse count etc */
//mjc add attr into pthread_t
//struct pthread_attr_t_* attr;
} pthread_t;
int cksz, itersperthr = 128;
char testName[32];
static void testpthread();
static void testpthreadintegrated();
static void dummypthread();
int schedbench_main(int argc, char **argv) {
ompbench_init(argc, argv);
/* GENERATE REFERENCE TIME */
reference("reference time", &refer);
/*REFERENCE PTHREAD*/
// benchmark("pthreadrecreate", &testpthread);
benchmark("dummypthread", &dummypthread);
benchmark("pthreadonce",&testpthreadintegrated);
/* TEST STATIC */
benchmark("STATIC", &teststatic);
/* TEST STATIC,n */
cksz = 1;
while (cksz <= itersperthr) {
sprintf(testName, "STATIC %d", cksz);
benchmark(testName, &teststaticn);
cksz *= 2;
}
/* TEST DYNAMIC,n */
cksz = 1;
while (cksz <= itersperthr) {
sprintf(testName, "DYNAMIC %d", cksz);
benchmark(testName, &testdynamicn);
cksz *= 2;
}
/* TEST GUIDED,n */
cksz = 1;
while (cksz <= itersperthr / nthreads) {
sprintf(testName, "GUIDED %d", cksz);
benchmark(testName, &testguidedn);
cksz *= 2;
}
finalise();
return EXIT_SUCCESS;
}
void integratedloop(){
for(int j=0;j<innerreps;j++){
for(int i=0;i<itersperthr;i++){
delay(delaylength);
}
}
}
void runloop(){
for(int i=0;i<itersperthr;i++){
delay(delaylength);
}
}
void dummy(){
return;
}
static void dummypthread(){
int i,j;
pthread_t threads[nthreads];
for(i=0; i<nthreads; i++){
pthread_create(&threads[i],NULL, &dummy,NULL);
}
for(i=0; i<nthreads; i++){
pthread_join(threads[i],NULL);
}
}
static void testpthreadintegrated(){
int i,j;
pthread_t threads[nthreads];
for(i=0; i<nthreads; i++){
pthread_create(&threads[i],NULL, &integratedloop,NULL);
}
for(i=0; i<nthreads; i++){
pthread_join(threads[i],NULL);
}
}
static void testpthread(){
int i,j;
pthread_t threads[nthreads];
for(j=0; j < innerreps; j++){
for(i=0; i<nthreads; i++){
pthread_create(&threads[i],NULL, &runloop,NULL);
}
for(i=0; i<nthreads; i++){
pthread_join(threads[i],NULL);
}
}
}
static void refer() {
int i, j;
for (j = 0; j < innerreps; j++) {
for (i = 0; i < itersperthr; i++) {
delay(delaylength);
}
}
}
void teststatic() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(static)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void teststaticn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(static,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void testdynamicn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(dynamic,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void testguidedn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(guided,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
|
whirlpool_fmt_plug.c
|
/* whirlpool cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_whirlpool_0;
extern struct fmt_main fmt_whirlpool_1;
extern struct fmt_main fmt_whirlpool;
#elif FMT_REGISTERS_H
john_register_one(&fmt_whirlpool_0);
john_register_one(&fmt_whirlpool_1);
john_register_one(&fmt_whirlpool);
#else
#include <string.h>
#include "arch.h"
#include "openssl_local_overrides.h"
#include "sph_whirlpool.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include <openssl/opensslv.h>
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#endif
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Whirpool"
#define FORMAT_NAME ""
#define FORMAT_TAG "$whirlpool$"
#define TAG_LENGTH 11
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 128
#define BINARY_SIZE 64
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
static struct fmt_tests whirlpool_0_tests[] = {
{"B3E1AB6EAF640A34F784593F2074416ACCD3B8E62C620175FCA0997B1BA2347339AA0D79E754C308209EA36811DFA40C1C32F1A2B9004725D987D3635165D3C8", ""},
// repeat hash in exactly the same form that is used in john.pot
{FORMAT_TAG "B3E1AB6EAF640A34F784593F2074416ACCD3B8E62C620175FCA0997B1BA2347339AA0D79E754C308209EA36811DFA40C1C32F1A2B9004725D987D3635165D3C8", ""},
{NULL}
};
static struct fmt_tests whirlpool_1_tests[] = {
{"470F0409ABAA446E49667D4EBE12A14387CEDBD10DD17B8243CAD550A089DC0FEEA7AA40F6C2AAAB71C6EBD076E43C7CFCA0AD32567897DCB5969861049A0F5A", ""},
// repeat hash in exactly the same form that is used in john.pot
{FORMAT_TAG "470F0409ABAA446E49667D4EBE12A14387CEDBD10DD17B8243CAD550A089DC0FEEA7AA40F6C2AAAB71C6EBD076E43C7CFCA0AD32567897DCB5969861049A0F5A", ""},
{NULL}
};
static struct fmt_tests whirlpool_tests[] = {
{"19FA61D75522A4669B44E39C1D2E1726C530232130D407F89AFEE0964997F7A73E83BE698B288FEBCF88E3E03C4F0757EA8964E59B63D93708B138CC42A66EB3", ""},
// repeat hash in exactly the same form that is used in john.pot
{FORMAT_TAG "19FA61D75522A4669B44E39C1D2E1726C530232130D407F89AFEE0964997F7A73E83BE698B288FEBCF88E3E03C4F0757EA8964E59B63D93708B138CC42A66EB3", ""},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int extra;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (hexlen(p, &extra) != CIPHERTEXT_LENGTH || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
strupr(out + TAG_LENGTH);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_0(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_whirlpool0_context ctx;
sph_whirlpool0_init(&ctx);
sph_whirlpool0(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool0_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_1(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_whirlpool1_context ctx;
sph_whirlpool1_init(&ctx);
sph_whirlpool1(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool1_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_2(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
WHIRLPOOL_CTX ctx;
WHIRLPOOL_Init(&ctx);
WHIRLPOOL_Update(&ctx, saved_key[index], strlen(saved_key[index]));
WHIRLPOOL_Final((unsigned char*)crypt_out[index], &ctx);
#else
sph_whirlpool_context ctx;
sph_whirlpool_init(&ctx);
sph_whirlpool(&ctx, saved_key[index], strlen(saved_key[index]));
sph_whirlpool_close(&ctx, (unsigned char*)crypt_out[index]);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void whirlpool_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_whirlpool_0 = {
{
"whirlpool0",
"",
"WHIRLPOOL-0 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
whirlpool_0_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_0,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_whirlpool_1 = {
{
"whirlpool1",
"",
"WHIRLPOOL-1 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
whirlpool_1_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_1,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_whirlpool = {
{
"whirlpool",
"",
"WHIRLPOOL " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
whirlpool_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
whirlpool_set_key,
get_key,
fmt_default_clear_keys,
crypt_2,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pr85956.c
|
/* PR middle-end/85956 */
/* { dg-do compile } */
/* { dg-additional-options "-O2 -Wall" } */
void
foo (int n, void *p)
{
int (*a)[n] = (int (*)[n]) p;
#pragma omp parallel shared(a) default(none)
#pragma omp master
a[-1][-1] = 42; /* { dg-warning "array subscript -1 is below array bounds" } */
}
|
nowait.c
|
#include <stdio.h>
#include <omp.h>
#define N 1000
#define CHUNKSIZE 100
int main(int argc, char *argv[]) {
int i, chunk;
float a[N], b[N], c[N];
/* Some initializations */
for (i = 0; i < N; i++)
{
a[i] = b[i] = i;
}
chunk = CHUNKSIZE;
for (i = 0; i < N; i++)
{
printf("%d\n",a[i]);
}
for (i = 0; i < N; i++)
{
printf("%d\n",b[i]);
}
#pragma omp parallel shared(a,b,c,chunk) private(i)
{
#pragma omp for schedule(dynamic,chunk) nowait
for (i = 0; i < N; i++)
c[i] = a[i] + b[i];
} /* end of parallel region */
// for (i = 0; i < N; i++)
// {
// printf("%d\n",c[i]);
// }
return 0;
}
|
GeneralMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(Index shift=0; shift<threads; ++shift)
{
Index i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
#pragma omp critical
{
for(Index i=0; i<threads; ++i)
#pragma omp atomic
--(info[i].users);
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
EIGEN_ALIGN_DEFAULT LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_DEFAULT RhsScalar m_staticB[SizeB];
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index m = this->m_mc;
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::evalTo(dst, lhs, rhs);
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::addTo(dst, lhs, rhs);
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::subTo(dst, lhs, rhs);
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
forest.c
|
#include "forest.h"
#ifdef USING_MPI
const MPI_Comm comm = MPI_COMM_WORLD;
int myrank, size;
#endif
int main(int argc, char** argv) {
int i, y, seed;
args* myArgs;
forest* f;
cell** swapGrid;
struct random_data* rand_state;
char rand_buf[256];
memset(rand_buf, 'q', (size_t)256);
int startX = 0, startY = 0;
int endX = 0, endY = 0;
int* rands;
#ifdef USING_OMP
int threads = omp_get_max_threads();
#else
int threads = 1;
#endif
#ifdef USING_MPI
//MPI_Request request[3];
//MPI_Status* status = (MPI_Status*) malloc(sizeof(MPI_Status) * 3);
MPI_Status status[3];
memset(&status, 0, sizeof(MPI_Status) * 3);
MPI_Init(&argc, &argv);
MPI_Comm_rank(comm, &myrank);
MPI_Comm_size(comm, &size);
if (size != 4) {
printf("Unfortunately this program has been hardcoded to run on 4 nodes.\nExiting...\n");
MPI_Finalize();
exit(1);
}
#endif
myArgs = parse_args(argc, argv);
f = alloc_forest(myArgs);
// set up which parts of the grid we're going to iterate over
#ifdef USING_MPI
int r0startX = 0;
int r0startY = 0;
//int r0endX = f->dimX/2;
int r0endY = f->dimY/2;
//int r1startX = (f->dimX/2) + 1;
int r1startX = (f->dimX/2);
int r1startY = 0;
//int r1endX = f->dimX;
int r1endY = f->dimY/2;
int r2startX = 0;
//int r2startY = (f->dimY/2) + 1;
int r2startY = (f->dimY/2);
//int r2endX = f->dimX/2;
int r2endY = f->dimY;
//int r3startX = (f->dimX/2) + 1;
int r3startX = (f->dimX/2);
//int r3startY = (f->dimY/2) + 1;
int r3startY = (f->dimY/2);
//int r3endX = f->dimX;
int r3endY = f->dimY;
switch(myrank) {
case 0:
startX = 0;
startY = 0;
endX = f->dimX/2;
endY = f->dimY/2;
break;
case 1:
//startX = (f->dimX/2) + 1;
startX = (f->dimX/2);
startY = 0;
endX = f->dimX;
endY = f->dimY/2;
break;
case 2:
startX = 0;
//startY = (f->dimY/2) + 1;
startY = (f->dimY/2);
endX = f->dimX/2;
endY = f->dimY;
break;
case 3:
//startX = (f->dimX/2) + 1;
startX = (f->dimX/2);
//startY = (f->dimY/2) + 1;
startY = (f->dimY/2);
endX = f->dimX;
endY = f->dimY;
break;
}
#else
startX = 0;
startY = 0;
endX = f->dimX;
endY = f->dimY;
#endif
// ncurses requires some setup
if (myArgs->output == NCURSES) {
init_ncurses(endX, endY);
}
if (myArgs->logging) {
myArgs->log = fopen("forest.log","w");
}
rand_state = (struct random_data*) malloc(sizeof(struct random_data) * threads);
memset(rand_state, 0, sizeof(struct random_data) * threads);
if (rand_state == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
for (int q = 0; q < threads; q++) {
if (myArgs->output == VERIFY) {
seed = 5; //deterministic
} else {
#ifdef USING_MPI
seed = time(NULL) ^ (q + myrank);
#else
seed = time(NULL) ^ q; //non-deterministic
#endif
}
initstate_r(seed, rand_buf, 256, &rand_state[q]);
srandom_r(seed, &rand_state[q]);
}
#ifdef USING_MPI
//fprintf(stderr,"rank %d is working on x %d to %d and y %d to %d\n",myrank, startX, endX, startY, endY);
#endif
rands = (int*) malloc(sizeof(int)*4);
memset(rands, 0, sizeof(int)*4);
// main loop
for (i = 0; i < f->simLength; i++) {
//fprintf(stdout,"rank %d got to timestep %d\n",myrank,f->time);
//fflush();
#pragma omp parallel for private(rands)
for (y = startY; y < endY; y++) {
for (int x = startX; x < endX; x+=4) {
#ifdef USING_OMP
random_r(&rand_state[omp_get_thread_num()], &rands[0]);
random_r(&rand_state[omp_get_thread_num()], &rands[1]);
random_r(&rand_state[omp_get_thread_num()], &rands[2]);
random_r(&rand_state[omp_get_thread_num()], &rands[3]);
#else
random_r(&rand_state[0], &rands[0]);
random_r(&rand_state[0], &rands[1]);
random_r(&rand_state[0], &rands[2]);
random_r(&rand_state[0], &rands[3]);
#endif
cell_auto(f, x, y, f->oldGrid[y][x].status, &rands[0]);
cell_auto(f, x+1, y, f->oldGrid[y][x+1].status, &rands[1]);
cell_auto(f, x+2, y, f->oldGrid[y][x+2].status, &rands[2]);
cell_auto(f, x+3, y, f->oldGrid[y][x+3].status, &rands[3]);
}
}
if (myArgs->logging) {
fprintf(myArgs->log, "%d %d %d\n", i, f->treeCount, f->burnCount);
}
#ifdef USING_MPI
int z = 0;
int sendOne = 0, sendTwo = 0, sendThree = 0;
//int err = 0;
//size_t xferSize = sizeof(cell) * (f->dimX/2) * (f->dimY/2);
int xferSize = (int)sizeof(cell) * (f->dimX/2) * (f->dimY/2);
size_t rowSize = sizeof(cell) * (f->dimX/2);
//fprintf(stderr,"dimx = %d, dimy = %d, xfersize = %d, rowsize = %d\n",f->dimX, f->dimY, (int)xferSize, (int)rowSize);
//fprintf(stderr, "size of a cell is %d bytes\n",(int)sizeof(cell));
//fprintf(stderr, "there should be %d cells in the sendGrid.\n",((f->dimX/2) * (f->dimY/2)));
//fprintf(stderr, "but we are copying in %d times %d cells.\n", (endY - startY), f->dimX/2);
// copy worked-on part of grid to sendGrid for sending
for (int e = startY; e < endY; e++) {
memcpy(f->sendGrid[z], &f->newGrid[e][startX], rowSize);
z++;
}
switch(myrank) {
case 0:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid1, xferSize, MPI_BYTE, 1, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid2, xferSize, MPI_BYTE, 2, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid3, xferSize, MPI_BYTE, 3, 0, comm, &status[2]);
//checksum_grid(f->recvGrid1, f->dimX/2, f->dimY/2);
// copy from recvgrids
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 1:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid1, xferSize, MPI_BYTE, 0, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid2, xferSize, MPI_BYTE, 3, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid3, xferSize, MPI_BYTE, 2, 0, comm, &status[2]);
//checksum_grid(f->sendGrid, f->dimX/2, f->dimY/2);
// copy from recvgrids
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 2:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid1, xferSize, MPI_BYTE, 3, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid2, xferSize, MPI_BYTE, 0, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid3, xferSize, MPI_BYTE, 1, 0, comm, &status[2]);
// copy from recvgrids
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 3:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid1, xferSize, MPI_BYTE, 2, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid2, xferSize, MPI_BYTE, 1, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid3, xferSize, MPI_BYTE, 0, 0, comm, &status[2]);
// copy from recvgrids
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid3[z], rowSize);
z++;
}
break;
default:
fprintf(stderr,"your switch case is broke\n");
break;
}
if (sendOne || sendTwo || sendThree) {
fprintf(stderr,"i'm %d, %d %d %d\n",myrank, sendOne, sendTwo, sendThree);
}
// sanity check
/*
for (int zz = 0; zz < f->dimY; zz++) {
f->newGrid[25][zz].status = BURN;
f->newGrid[26][zz].status = BURN;
f->newGrid[27][zz].status = BURN;
f->newGrid[28][zz].status = BURN;
}
*/
// only one rank needs to output
if (myrank == 0) {
myArgs->out(f);
}
#else
myArgs->out(f); // it's a function pointer!
#endif
f->time++;
swapGrid = f->oldGrid;
f->oldGrid = f->newGrid;
f->newGrid = swapGrid;
}
if (myArgs->output == NCURSES) {
endwin();
}
if (myArgs->logging) {
fclose(myArgs->log);
}
#ifdef USING_MPI
text_output(f->newGrid, f->dimX, f->dimY);
//checksum_grid(f->newGrid, f->dimX, f->dimY);
MPI_Finalize();
#endif
return 0;
}
void text_output(cell** grid, int x, int y) {
char* out = (char*) malloc(sizeof(char) * x * y + (y + 1));
int c = 0;
int arr;
for (int s = 0; s < y; s++) {
for (int t = 0; t < x; t++) {
switch(grid[s][t].status) {
case EMPTY:
out[c++] = ' ';
break;
case TREE:
out[c++] = 'T';
break;
case BURN:
out[c++] = 'B';
break;
default:
fprintf(stderr,"well that's not good - %d %d\n",s,t);
break;
}
}
out[c++] = '\n';
}
out[c++] = '\0';
#ifdef USING_MPI
arr = myrank;
#else
arr = 0;
#endif
fprintf(stderr,"hi i'm %d and this is what i have for you\n%s\n",arr,out);
free(out);
}
void out_null(forest* f) {
return;
}
void out_verify(forest* f) {
int x, y;
int treeCheck = 0;
int burnCheck = 0;
char* strGrid;
char salt[] = "$1$cosc3500";
char* res;
if (f->time == f->simLength) {
fprintf(stdout,"Calculating grid integrity...\n");
strGrid = (char*)malloc((sizeof(char) * f->dimX * f->dimY) + 1);
if (strGrid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
strGrid[f->dimX * f->dimY] = '\0';
for (y = 0; y < f->dimY; y++) {
for (x = 0; x < f->dimX; x++) {
switch(f->newGrid[y][x].status) {
case EMPTY:
strGrid[(y * f->dimY) + x] = ' ';
break;
case TREE:
strGrid[(y * f->dimY) + x] = 'T';
treeCheck++;
break;
case BURN:
strGrid[(y * f->dimY) + x] = 'B';
burnCheck++;
break;
default:
fprintf(stderr, "bad tree status at %d %d\n",x,y);
break;
}
}
}
if (treeCheck == f->treeCount && burnCheck == f->burnCount) {
fprintf(stdout, "treeCount and burnCount okay\n");
} else {
fprintf(stdout, "error: treeCount = %d, treeCheck = %d\n", f->treeCount, treeCheck);
fprintf(stdout, "error: burnCount = %d, burnCheck = %d\n", f->burnCount, burnCheck);
}
res = crypt(strGrid, salt);
fprintf(stdout,"Grid checksum is: %s\n",res);
}
return;
}
void checksum_grid(cell** grid, int x, int y) {
int i, j;
int e = 0, t = 0, b = 0;
char salt[] = "$1$cosc3500";
char* res;
char* strGrid = (char*)malloc((sizeof(char) * x * y) + 1);
if (strGrid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
strGrid[x * y] = '\0';
int c = 0;
for (i = 0; i < y; i++) {
for (j = 0; j < x; j++) {
switch(grid[i][j].status) {
case EMPTY:
strGrid[c++] = ' ';
e++;
break;
case TREE:
strGrid[c++] = 'T';
t++;
break;
case BURN:
strGrid[c++] = 'B';
b++;
break;
default:
strGrid[c++] = '.';
fprintf(stderr, "bad tree status at %d %d, status is %d\n",x,y,grid[i][j].status);
break;
}
}
}
strGrid[c++] = '\0';
res = crypt(strGrid, salt);
#ifdef USING_MPI
fprintf(stderr,"rank %d, e=%d, t=%d, b=%d, %s\n", myrank, e, t, b, res);
//fprintf(stderr,"strgrid is %d chars long and c is %d \n",(int)strlen(strGrid),c);
#endif
free(strGrid);
}
/* perform cellular automata rules on each cell in the forest */
void cell_auto(forest* f, int x, int y, int mode, int* rand) {
int dx,dy;
int rx,ry;
cell* c = &f->oldGrid[y][x];
cell* n = &f->newGrid[y][x];
switch(mode) {
case EMPTY:
if ( (*rand % 10000) < GROWCHANCE) {
n->status = TREE;
n->age = 0;
#pragma omp atomic
f->treeCount++;
} else {
n->status = EMPTY;
}
return;
case TREE:
// chance of catching on fire from neighbouring trees
for (dx = -1; dx <= 1; dx++) {
for (dy = -1; dy <= 1; dy++) {
if (dx == 0 && dy == 0) { // don't examine yourself
continue;
}
rx = x + dx;
ry = y + dy;
if (rx >= 0 && rx < f->dimX &&
ry >= 0 && ry < f->dimY) { // bounds checking
if (f->oldGrid[ry][rx].status == BURN &&
(*rand % 720) < c->age) {
n->status = BURN;
n->age = 0;
n->burnTime = BURNLENGTH;
#pragma omp atomic
f->burnCount++;
f->treeCount--;
return; // nothing else to do
}
}
}
}
// chance of bursting into flames spontaneously
if ( (*rand % 150000) < BURNCHANCE) {
n->status = BURN;
n->burnTime = BURNLENGTH;
#pragma omp atomic
f->burnCount++;
f->treeCount--;
return;
}
// if we get here, it's a tree and is still a tree...?
n->status = TREE;
if (c->age < 240) {
n->age = c->age + 4;
}
return;
case BURN:
if (c->burnTime == 0) {
n->status = EMPTY;
n->age = 0;
#pragma omp atomic
f->burnCount--;
} else {
n->status = BURN;
n->burnTime = c->burnTime - 1;
}
return;
default:
fprintf(stderr,"unitialised cell at %d,%d, cell has status %d\n", x, y, mode);
return;
}
}
/* allocate and populate the forest struct, including initial trees */
forest* alloc_forest(args* myArgs) {
bitmap_t* png;
int x = myArgs->dimX;
int y = myArgs->dimY;
forest* f = (forest*)malloc(sizeof(forest));
if (f == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
f->newGrid = alloc_grid(x, y);
f->oldGrid = alloc_grid(x, y);
#ifdef USING_MPI
f->sendGrid = alloc_2d_grid(x/2, y/2);
f->recvGrid1 = alloc_2d_grid(x/2, y/2);
f->recvGrid2 = alloc_2d_grid(x/2, y/2);
f->recvGrid3 = alloc_2d_grid(x/2, y/2);
#endif
if (myArgs->output == PNG) {
// alloc pixel array
png = (bitmap_t*)malloc(sizeof(bitmap_t));
if (png == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
png->pixels = calloc(sizeof(pixel_t), x * y);
png->width = x;
png->height = y;
f->png = png;
}
f->dimX = x;
f->dimY = y;
f->treeCount = 0;
f->burnCount = 0;
f->time = 1;
f->simLength = myArgs->simLength;
return f;
}
cell** alloc_grid(int x, int y) {
int i;
cell** grid = (cell**)malloc(sizeof(cell*) * y);
if (grid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
// i'm so sorry valgrind
memset(grid, 0, sizeof(cell*) * y);
for (i = 0; i < y; i++) {
grid[i] = (cell*)malloc(sizeof(cell) * x);
if (grid[i] == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
memset(grid[i], 0, sizeof(cell) * x);
for (int j = 0; j < x; j++) {
grid[i][j].status = EMPTY;
}
}
return grid;
}
// returns a 2d array that is allocated as one contiguous
// block of memory. cool trick!
cell** alloc_2d_grid(int x, int y) {
cell* data = (cell*)malloc(sizeof(cell) * x * y);
if (data == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
// according to valgrind i am a horrible person
memset(data, 0, sizeof(cell) * x * y);
cell** array = (cell**)malloc(sizeof(cell*) * y);
if (array == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
for (int i = 0; i < y; i++) {
array[i] = &(data[x * i]);
}
return array;
}
/* parse argv for valid parameters and return as a struct */
args* parse_args(int argc, char** argv) {
int len;
args* myArgs = (args*)malloc(sizeof(args));
if (myArgs == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
char usage[] = "Usage: forest dimensionX dimensionY output simlength [log]\n";
myArgs->logging = 0;
switch(argc) {
case 6:
if (!strcmp(argv[5],"log")) {
myArgs->logging = 1;
} else {
printf("%s", usage);
}
case 5:
myArgs->dimX = atoi(argv[1]);
myArgs->dimY = atoi(argv[2]);
if (myArgs->dimX % 8 != 0 || myArgs->dimY % 8 != 0) {
printf("Error: please use mod8 grid dimensions\n");
printf("%s", usage);
exit(1);
}
errno = 0;
len = strtol(argv[4], NULL, 10);
if ((errno == EINVAL) || (errno == ERANGE) || (len < 1) || (len > INT_MAX)) {
printf("Error: invalid simlength, please enter an integer between 1 and %d\n", INT_MAX);
printf("%s", usage);
exit(2);
}
myArgs->simLength = len;
if (!strcmp(argv[3],"ncurses")) {
myArgs->output = NCURSES;
myArgs->out = out_ncurses;
} else if (!strcmp(argv[3],"png")) {
myArgs->output = PNG;
myArgs->out = out_png;
} else if (!strcmp(argv[3],"null")) {
myArgs->output = NULLOUT;
myArgs->out = out_null;
} else if (!strcmp(argv[3],"verify")) {
myArgs->output = VERIFY;
myArgs->out = out_verify;
} else {
printf("Error: output should be one of 'png' 'ncurses' 'null' 'verify'\n");
printf("%s", usage);
exit(1);
}
return myArgs;
default:
printf("%s", usage);
exit(1);
}
return myArgs;
}
|
loop1.c
|
#include <stdio.h>
#include <omp.h>
int main()
{
int i,j;
// int innerreps = 100;
#pragma omp parallel private(j)
{
// for (j=0; j<innerreps; j++)
{
#pragma omp for schedule(static,2)
for (i=0; i<32; i++)
{
printf ("thread %d is executing %d \n",omp_get_thread_num(),i);
// delay(500);
}
}
}
return 0;
}
|
t.simple.c
|
#include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#pragma offload_attribute(push, target(mic))
#include "mkl.h"
#pragma offload_attribute(pop)
int manual_sync;
omp_lock_t offload_lock;
__declspec(target(mic))
void local_dgemm(int N, int LD, double *A, double *B, double *C)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
N, N, N, 1.0, A, LD, B, LD, 1.0, C, LD);
}
double offload_dgemm(int N, int LD, double *A, double *B, double *C)
{
double t;
static int first_run = 1;
t = dsecnd();
/* Allocate memory on the card only on the first offload to improve
* performance. The memory is released only when the process exits. This is
* only suitable for benchmarking. */
#pragma offload target(mic:0) in(N, LD) \
in(A: length(N*LD) alloc_if(first_run) free_if(0)) \
in(B: length(N*LD) alloc_if(first_run) free_if(0)) \
inout(C: length(N*LD) alloc_if(first_run) free_if(0))
{
local_dgemm(N, LD, A, B, C);
}
t = dsecnd() - t;
first_run = 0;
return t;
}
double host_ao_dgemm(int N, int LD, double *A, double *B, double *C)
{
double t = dsecnd();
local_dgemm(N, LD, A, B, C);
return dsecnd() - t;
}
void bench_dgemm(int use_offload, int N)
{
/* Choose such leading dimension that there is no cache aliasing. */
int LD = (N % 512) ? N : N + 128;
/* Allocate memory using MKL function to make sure the addresses are
* properly aligned. */
double *A = mkl_malloc(sizeof(double) * N * LD, 4096);
double *B = mkl_malloc(sizeof(double) * N * LD, 4096);
double *C = mkl_malloc(sizeof(double) * N * LD, 4096);
/* Select DGEMM kind: offload or host/Automatic Offload. */
double (*dgemm_func)(int, int, double *, double *, double *);
dgemm_func = (use_offload) ? offload_dgemm : host_ao_dgemm;
#pragma omp barrier
double t = 0.0;
const int NITERS = 3;
for (int i = 0; i < NITERS + 1; i++) {
double t_tmp = dgemm_func(N, LD, A, B, C);
/* Discard performance obtained on the warmup iteration. */
if (i > 0) t += t_tmp;
}
mkl_free(A);
mkl_free(B);
mkl_free(C);
const double NOPS = 2.0 * N * N * N;
double gflops = NOPS / (t * 1E9 / NITERS);
printf("%s %dx%d DGEMM: %8.2f GFlops\n",
(use_offload) ? "Offload" : "Host/AO", N, N, gflops);
}
int main(int argc, char **argv)
{
if (argc != 3) {
printf("Usage: %s <concurrent coprocessor access=0|1> <N>\n", argv[0]);
return -1;
}
int concurrent = atoi(argv[1]);
int N = atoi(argv[2]);
printf("Coprocessor access: %s\n", concurrent ? "concurrent" : "serial");
printf("N: %d\n", N);
if (concurrent) {
/* The following settings will make MKL use OpenMP even when called
* from an OpenMP region. */
mkl_set_dynamic(0);
omp_set_nested(1);
mkl_set_num_threads(omp_get_max_threads());
}
#pragma omp parallel for num_threads(2) if (concurrent)
for (int i = 0; i < 2; i++)
bench_dgemm(i, N);
return 0;
}
|
3d7pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(32*t3+Nx+28,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
BlockOps.h
|
/*****************************************************************************
*
* Copyright (c) 2003-2018 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014 by Centre for Geoscience Computing (GeoComp)
*
*****************************************************************************/
#ifndef __PASO_BLOCKOPS_H__
#define __PASO_BLOCKOPS_H__
#include "Paso.h"
#include "PasoException.h"
#include <cstring> // memcpy
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
#include <mkl_lapack.h>
#include <mkl_cblas.h>
#else
extern "C" {
#include <clapack.h>
#include <cblas.h>
}
#endif
#endif
namespace paso {
inline void BlockOps_Cpy_N(dim_t N, double* R, const double* V)
{
memcpy((void*)R, (void*)V, N*sizeof(double));
}
/// performs operation R=R-mat*V (V and R are not overlapping) - 2x2
inline void BlockOps_SMV_2(double* R, const double* mat, const double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double A11 = mat[0];
const double A12 = mat[2];
const double A21 = mat[1];
const double A22 = mat[3];
R[0] -= A11 * S1 + A12 * S2;
R[1] -= A21 * S1 + A22 * S2;
}
/// performs operation R=R-mat*V (V and R are not overlapping) - 3x3
inline void BlockOps_SMV_3(double* R, const double* mat, const double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double S3 = V[2];
const double A11 = mat[0];
const double A21 = mat[1];
const double A31 = mat[2];
const double A12 = mat[3];
const double A22 = mat[4];
const double A32 = mat[5];
const double A13 = mat[6];
const double A23 = mat[7];
const double A33 = mat[8];
R[0] -= A11 * S1 + A12 * S2 + A13 * S3;
R[1] -= A21 * S1 + A22 * S2 + A23 * S3;
R[2] -= A31 * S1 + A32 * S2 + A33 * S3;
}
#define PASO_MISSING_CLAPACK throw PasoException("You need to install a LAPACK version to enable operations on block sizes > 3.")
/// performs operation R=R-mat*V (V and R are not overlapping) - NxN
inline void BlockOps_SMV_N(dim_t N, double* R, const double* mat, const double* V)
{
#ifdef ESYS_HAVE_LAPACK
cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, -1., mat, N, V, 1, 1., R, 1);
#else
PASO_MISSING_CLAPACK;
#endif
}
inline void BlockOps_MV_N(dim_t N, double* R, const double* mat, const double* V)
{
#ifdef ESYS_HAVE_LAPACK
cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, 1., mat, N, V, 1, 0., R, 1);
#else
PASO_MISSING_CLAPACK;
#endif
}
inline void BlockOps_invM_2(double* invA, const double* A, int* failed)
{
const double A11 = A[0];
const double A12 = A[2];
const double A21 = A[1];
const double A22 = A[3];
double D = A11*A22-A12*A21;
if (std::abs(D) > 0) {
D = 1./D;
invA[0] = A22*D;
invA[1] = -A21*D;
invA[2] = -A12*D;
invA[3] = A11*D;
} else {
*failed = 1;
}
}
inline void BlockOps_invM_3(double* invA, const double* A, int* failed)
{
const double A11 = A[0];
const double A21 = A[1];
const double A31 = A[2];
const double A12 = A[3];
const double A22 = A[4];
const double A32 = A[5];
const double A13 = A[6];
const double A23 = A[7];
const double A33 = A[8];
double D = A11*(A22*A33-A23*A32) +
A12*(A31*A23-A21*A33) +
A13*(A21*A32-A31*A22);
if (std::abs(D) > 0) {
D = 1./D;
invA[0] = (A22*A33-A23*A32)*D;
invA[1] = (A31*A23-A21*A33)*D;
invA[2] = (A21*A32-A31*A22)*D;
invA[3] = (A13*A32-A12*A33)*D;
invA[4] = (A11*A33-A31*A13)*D;
invA[5] = (A12*A31-A11*A32)*D;
invA[6] = (A12*A23-A13*A22)*D;
invA[7] = (A13*A21-A11*A23)*D;
invA[8] = (A11*A22-A12*A21)*D;
} else {
*failed = 1;
}
}
/// LU factorization of NxN matrix mat with partial pivoting
inline void BlockOps_invM_N(dim_t N, double* mat, index_t* pivot, int* failed)
{
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
int res = 0;
dgetrf(&N, &N, mat, &N, pivot, &res);
if (res != 0)
*failed = 1;
#else
int res = clapack_dgetrf(CblasColMajor, N, N, mat, N, pivot);
if (res != 0)
*failed = 1;
#endif // ESYS_MKL_LAPACK
#else
PASO_MISSING_CLAPACK;
#endif
}
/// solves system of linear equations A*X=B
inline void BlockOps_solve_N(dim_t N, double* X, double* mat, index_t* pivot, int* failed)
{
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
int res = 0;
int ONE = 1;
dgetrs("N", &N, &ONE, mat, &N, pivot, X, &N, &res);
if (res != 0)
*failed = 1;
#else
int res = clapack_dgetrs(CblasColMajor, CblasNoTrans, N, 1, mat, N, pivot, X, N);
if (res != 0)
*failed = 1;
#endif // ESYS_MKL_LAPACK
#else
PASO_MISSING_CLAPACK;
#endif
}
/// inplace matrix vector product - order 2
inline void BlockOps_MViP_2(const double* mat, double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double A11 = mat[0];
const double A12 = mat[2];
const double A21 = mat[1];
const double A22 = mat[3];
V[0] = A11 * S1 + A12 * S2;
V[1] = A21 * S1 + A22 * S2;
}
/// inplace matrix vector product - order 3
inline void BlockOps_MViP_3(const double* mat, double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double S3 = V[2];
const double A11 = mat[0];
const double A21 = mat[1];
const double A31 = mat[2];
const double A12 = mat[3];
const double A22 = mat[4];
const double A32 = mat[5];
const double A13 = mat[6];
const double A23 = mat[7];
const double A33 = mat[8];
V[0] = A11 * S1 + A12 * S2 + A13 * S3;
V[1] = A21 * S1 + A22 * S2 + A23 * S3;
V[2] = A31 * S1 + A32 * S2 + A33 * S3;
}
inline void BlockOps_solveAll(dim_t n_block, dim_t n, double* D,
index_t* pivot, double* x)
{
if (n_block == 1) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
x[i] *= D[i];
} else if (n_block == 2) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
BlockOps_MViP_2(&D[4*i], &x[2*i]);
} else if (n_block == 3) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
BlockOps_MViP_3(&D[9*i], &x[3*i]);
} else {
int failed = 0;
#pragma omp parallel for
for (dim_t i=0; i<n; ++i) {
const dim_t block_size = n_block*n_block;
BlockOps_solve_N(n_block, &x[n_block*i], &D[block_size*i], &pivot[n_block*i], &failed);
}
if (failed > 0) {
throw PasoException("BlockOps_solveAll: solution failed.");
}
}
}
} // namespace paso
#endif // __PASO_BLOCKOPS_H__
|
laplace2d.c
|
/*
* Copyright 2012 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include <openacc.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
Vec.h
|
/*************************************************************************
* Copyright (c) 2014 Zhang Dongdong
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**************************************************************************/
#ifndef VEC_H
#define VEC_H
/*
Szymon Rusinkiewicz
Princeton University
Vec.h
Class for a constant-length vector
Supports the following operations:
vec v1; // Initialized to (0,0,0)
vec v2(1,2,3); // Initialized to (1,2,3)
vec v3(v2); // Copy constructor
float farray[3];
vec v4 = vec(farray); // Explicit: "v4 = farray" won't work
Vec<3,double> vd; // The "vec" used above is Vec<3,float>
point p1, p2, p3; // Same as vec
v3 = v1 + v2; // Also -, *, / (all componentwise)
v3 = 3.5f * v1; // Also vec * scalar, vec / scalar
// NOTE: scalar has to be the same type:
// it won't work to do double * vec<float>
v1 = min(v2,v3); // Componentwise min/max
v1 = sin(v2); // Componentwise - all the usual functions...
swap(v1,v2); // In-place swap
v3 = v1 DOT v2; // Actually operator^
v3 = v1 CROSS v2; // Actually operator%
float f = v1[0]; // Subscript
float *fp = v1; // Implicit conversion to float *
f = len(v1); // Length (also len2 == squared length)
f = dist(p1, p2); // Distance (also dist2 == squared distance)
normalize(v1); // Normalize (i.e., make it unit length)
// normalize(vec(0,0,0)) => vec(1,0,0)
v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted)
cout << v1 << endl; // iostream output in the form (1,2,3)
cin >> v2; // iostream input using the same syntax
Also defines the utility functions sqr, cube, sgn, fract, clamp, mix,
step, smoothstep, faceforward, reflect, and refract
*/
// Windows defines min and max as macros, which prevents us from using the
// type-safe versions from std::, as well as interfering with method defns.
// Also define NOMINMAX, which prevents future bad definitions.
#ifdef min
# undef min
#endif
#ifdef max
# undef max
#endif
#ifndef NOMINMAX
# define NOMINMAX
#endif
#include <cmath>
#include <iostream>
#include <algorithm>
#include <cstddef>
// Let gcc optimize conditional branches a bit better...
#ifndef likely
# if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
# define likely(x) (x)
# define unlikely(x) (x)
# else
# define likely(x) (__builtin_expect((x), 1))
# define unlikely(x) (__builtin_expect((x), 0))
# endif
#endif
// Boost-like compile-time assertion checking
template <bool X> struct VEC_STATIC_ASSERTION_FAILURE;
template <> struct VEC_STATIC_ASSERTION_FAILURE<true>
{ void operator () () {} };
#define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>()
template <int D, class T = float>
class Vec {
protected:
T v[D];
public:
// Constructor for no arguments. Everything initialized to 0.
Vec() { for (int i = 0; i < D; i++) v[i] = T(0); }
// Uninitialized constructor - meant mostly for internal use
#define VEC_UNINITIALIZED ((void *) 0)
Vec(void *) {}
// Constructors for 2-4 arguments
Vec(T x, T y)
{ VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; }
Vec(T x, T y, T z)
{ VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; }
Vec(T x, T y, T z, T w)
{ VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; }
// Constructor from anything that can be accessed using []
// Pretty aggressive, so marked as explicit.
template <class S> explicit Vec(const S &x)
{ for (int i = 0; i < D; i++) v[i] = T(x[i]); }
// No destructor or assignment operator needed
// Array reference and conversion to pointer - no bounds checking
const T &operator [] (int i) const
{ return v[i]; }
T &operator [] (int i)
{ return v[i]; }
operator const T * () const
{ return v; }
operator const T * ()
{ return v; }
operator T * ()
{ return v; }
// Member operators
Vec<D,T> &operator += (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] += x[i];
return *this;
}
Vec<D,T> &operator -= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] -= x[i];
return *this;
}
Vec<D,T> &operator *= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x[i];
return *this;
}
Vec<D,T> &operator *= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x;
return *this;
}
Vec<D,T> &operator /= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x[i];
return *this;
}
Vec<D,T> &operator /= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x;
return *this;
}
// Set each component to min/max of this and the other vector
Vec<D,T> &min(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] < v[i]) v[i] = x[i];
return *this;
}
Vec<D,T> &max(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] > v[i]) v[i] = x[i];
return *this;
}
// Swap with another vector. (Also exists as a global function.)
void swap(Vec<D,T> &x)
{
using namespace std;
#pragma omp critical
for (int i = 0; i < D; i++) swap(v[i], x[i]);
}
// Outside of class: + - * / % ^ << >>
// Some partial compatibility with std::vector
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T *iterator;
typedef const T *const_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
size_t size() const
{ return D; }
T *begin()
{ return &(v[0]); }
const T *begin() const
{ return &(v[0]); }
T *end()
{ return begin() + D; }
const T *end() const
{ return begin() + D; }
// clear() and empty() - set to zero or check for all zero
void clear()
{ for (int i = 0; i < D; i++) v[i] = T(0); }
bool empty() const
{
for (int i = 0; i < D; i++)
if (v[i]) return false;
return true;
}
// Some partial compatibility with std::valarray, plus generalizations
T sum() const
{
T total = v[0];
for (int i = 1; i < D; i++)
total += v[i];
return total;
}
T sumabs() const
{
T total = fabs(v[0]);
for (int i = 1; i < D; i++)
total += fabs(v[i]);
return total;
}
T avg() const
{ return sum() / D; }
T product() const
{
T total = v[0];
for (int i = 1; i < D; i++)
total *= v[i];
return total;
}
T min() const
{
T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] < m) m = v[i];
return m;
}
T max() const
{
T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] > m) m = v[i];
return m;
}
Vec<D,T> apply(T func(T)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result;
}
Vec<D,T> apply(T func(const T&)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result;
}
Vec<D,T> cshift(int n) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
if (n < 0)
n = (n % D) + D;
for (int i = 0; i < D; i++)
result[i] = v[(i+n)%D];
return result;
}
Vec<D,T> shift(int n) const
{
if (abs(n) >= D)
return Vec<D,T>();
Vec<D,T> result; // Must be initialized to zero
int start = n < T(0) ? -n : 0;
int stop = n > T(0) ? D - n : D;
for (int i = start; i < stop; i++) result[i] = v[i+n];
return result;
}
};
// Shorthands for particular flavors of Vecs
typedef Vec<3,float> vec;
typedef Vec<3,float> point;
typedef Vec<2,float> vec2;
typedef Vec<3,float> vec3;
typedef Vec<4,float> vec4;
typedef Vec<2,int> ivec2;
typedef Vec<3,int> ivec3;
typedef Vec<4,int> ivec4;
typedef Vec<3, unsigned short int> uvec3;
// Nonmember operators that take two Vecs
template <int D, class T>
static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] + v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] - v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] * v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] / v2[i];
return result;
}
// Dot product
template <int D, class T>
static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T sum = v1[0] * v2[0];
for (int i = 1; i < D; i++)
sum += v1[i] * v2[i];
return sum;
}
#define DOT ^
// Cross product - only in 3 dimensions
template <class T>
static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]);
}
#define CROSS %
// Component-wise equality and inequality (#include the usual caveats
// about comparing floats for equality...)
template <int D, class T>
static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return false;
return true;
}
template <int D, class T>
static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return true;
return false;
}
// Unary operators
template <int D, class T>
static inline const Vec<D,T> &operator + (const Vec<D,T> &v)
{
return v;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = -v[i];
return result;
}
template <int D, class T>
static inline bool operator ! (const Vec<D,T> &v)
{
return v.empty();
}
// Vec/scalar operators
template <int D, class T>
static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x * v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] * x;
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x / v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] / x;
return result;
}
// iostream operators
template <int D, class T>
static inline std::ostream &operator << (std::ostream &os, const Vec<D,T> &v)
{
os << "(";
for (int i = 0; i < D-1; i++)
os << v[i] << ", ";
return os << v[D-1] << ")";
}
template <int D, class T>
static inline std::istream &operator >> (std::istream &is, Vec<D,T> &v)
{
char c1 = 0, c2 = 0;
is >> c1;
if (c1 == '(' || c1 == '[') {
is >> v[0] >> std::ws >> c2;
for (int i = 1; i < D; i++) {
if (c2 == ',')
is >> v[i] >> std::ws >> c2;
else
is.setstate(std::ios::failbit);
}
}
if (c1 == '(' && c2 != ')')
is.setstate(std::ios::failbit);
else if (c1 == '[' && c2 != ']')
is.setstate(std::ios::failbit);
return is;
}
// Swap two Vecs. Not atomic, unlike class method.
namespace std {
template <int D, class T>
static inline void swap(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
swap(v1[i], v2[i]);
}
}
// Squared length
template <int D, class T>
static inline const T len2(const Vec<D,T> &v)
{
T l2 = v[0] * v[0];
for (int i = 1; i < D; i++)
l2 += v[i] * v[i];
return l2;
}
// Utility functions for square and cube, to go along with sqrt and cbrt
template <class T>
static inline T sqr(const T &x)
{
return x*x;
}
template <class T>
static inline T cube(const T &x)
{
return x*x*x;
}
// Length
template <int D, class T>
static inline const T len(const Vec<D,T> &v)
{
return std::sqrt(len2(v));
}
// Squared distance
template <int D, class T>
static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T d2 = sqr(v2[0]-v1[0]);
for (int i = 1; i < D; i++)
d2 += sqr(v2[i]-v1[i]);
return d2;
}
// Distance
template <int D, class T>
static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return std::sqrt(dist2(v1,v2));
}
// In-place normalization to unit length
template <int D, class T>
static inline Vec<D,T> normalize(Vec<D,T> &v)
{
T l = len(v);
if (unlikely(l <= T(0))) {
v[0] = T(1);
for (int i = 1; i < D; i++)
v[i] = T(0);
return v;
}
l = T(1) / l;
for (int i = 0; i < D; i++)
v[i] *= l;
return v;
}
// Area-weighted triangle face normal
template <class T>
static inline T trinorm(const T &v0, const T &v1, const T &v2)
{
return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0));
}
// Sign of a scalar. Note that sgn(0) == 1.
template <class T>
static inline T sgn(const T &x)
{
return (x < T(0)) ? T(-1) : T(1);
}
// Utility functions based on GLSL
template <class T>
static inline T fract(const T &x)
{
return x - floor(x);
}
template <class T>
static inline T clamp(const T &x, const T &a, const T &b)
{
return x > a ? x < b ? x : b : a; // returns a on NaN
}
template <class T, class S>
static inline T mix(const T &x, const T &y, const S &a)
{
return (S(1)-a) * x + a * y;
}
template <class T>
static inline T step(const T &x, const T &a)
{
return x < a ? T(0) : T(1);
}
template <class T>
static inline T smoothstep(const T &a, const T &b, const T &x)
{
if (b <= a) return step(x,a);
T t = (x - a) / (b - a);
return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t);
}
template <int D, class T>
static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I,
const Vec<D,T> &Nref)
{
return ((Nref DOT I) < T(0)) ? N : -N;
}
template <int D, class T>
static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N)
{
return I - (T(2) * (N DOT I)) * N;
}
template <int D, class T>
static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N,
const T &eta)
{
T NdotI = N DOT I;
T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI));
return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * std::sqrt(k)) * N;
}
// C99 compatibility functions for MSVS
#ifdef _WIN32
#ifdef cbrt
# undef cbrt
#endif
inline float cbrt(float x)
{
return (x < 0.0f) ? -std::pow(-x, 1.0f / 3.0f) : std::pow(x, 1.0f / 3.0f);
}
inline double cbrt(double x)
{
return (x < 0.0) ? -std::pow(-x, 1.0 / 3.0) : std::pow(x, 1.0 / 3.0);
}
inline long double cbrt(long double x)
{
return (x < 0.0L) ? -std::pow(-x, 1.0L / 3.0L) : std::pow(x, 1.0L / 3.0L);
}
#ifdef round
# undef round
#endif
inline float round(float x)
{
return (x < 0.0f) ? float(int(x - 0.5f)) : float(int(x + 0.5f));
}
inline double round(double x)
{
return (x < 0.0f) ? double(int(x - 0.5)) : double(int(x + 0.5));
}
inline long double round(long double x)
{
return (x < 0.0f) ? (long double)(int(x - 0.5L)) : (long double)(int(x + 0.5L));
}
#ifdef trunc
# undef trunc
#endif
inline float trunc(float x)
{
return (x < 0.0f) ? float(int(x)) : float(int(x));
}
inline double trunc(double x)
{
return (x < 0.0f) ? double(int(x)) : double(int(x));
}
inline long double trunc(long double x)
{
return (x < 0.0f) ? (long double)(int(x)) : (long double)(int(x));
}
#endif
// Generic macros for declaring 1-, 2-, and 3- argument
// componentwise functions on vecs
#define VEC_DECLARE_ONEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i]); \
return result; \
}
#define VEC_DECLARE_TWOARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w, const T &x) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w, x); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w, const Vec<D,T> &x) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i], x[i]); \
return result; \
}
VEC_DECLARE_ONEARG(fabs)
VEC_DECLARE_ONEARG(floor)
VEC_DECLARE_ONEARG(ceil)
VEC_DECLARE_ONEARG(round)
VEC_DECLARE_ONEARG(trunc)
VEC_DECLARE_ONEARG(sin)
VEC_DECLARE_ONEARG(asin)
VEC_DECLARE_ONEARG(sinh)
VEC_DECLARE_ONEARG(cos)
VEC_DECLARE_ONEARG(acos)
VEC_DECLARE_ONEARG(cosh)
VEC_DECLARE_ONEARG(tan)
VEC_DECLARE_ONEARG(atan)
VEC_DECLARE_ONEARG(tanh)
VEC_DECLARE_ONEARG(exp)
VEC_DECLARE_ONEARG(log)
VEC_DECLARE_ONEARG(sqrt)
VEC_DECLARE_ONEARG(sqr)
VEC_DECLARE_ONEARG(cbrt)
VEC_DECLARE_ONEARG(cube)
VEC_DECLARE_ONEARG(sgn)
VEC_DECLARE_TWOARG(atan2)
VEC_DECLARE_TWOARG(pow)
VEC_DECLARE_TWOARG(fmod)
VEC_DECLARE_TWOARG(step)
namespace std {
VEC_DECLARE_TWOARG(min)
VEC_DECLARE_TWOARG(max)
}
VEC_DECLARE_THREEARG(smoothstep)
VEC_DECLARE_THREEARG(clamp)
#undef VEC_DECLARE_ONEARG
#undef VEC_DECLARE_TWOARG
#undef VEC_DECLARE_THREEARG
// Inject into std namespace
namespace std {
using ::fabs;
using ::floor;
using ::ceil;
using ::round;
using ::trunc;
using ::sin;
using ::asin;
using ::sinh;
using ::cos;
using ::acos;
using ::cosh;
using ::tan;
using ::atan;
using ::tanh;
using ::exp;
using ::log;
using ::sqrt;
using ::cbrt;
using ::atan2;
using ::pow;
using ::fmod;
}
// Both valarrays and GLSL use abs() on a vector to mean fabs().
// Let's do the same...
template <int D, class T>
static inline Vec<D,T> abs(const Vec<D,T> &v)
{
return fabs(v);
}
#endif
|
transition_matrix.h
|
/*
* Created on: Mar 22, 2016
* Author: Steffen Rechner <[email protected]>
*
* This file is part of the marathon software.
*
* Copyright (c) 2016, Steffen Rechner
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef INCLUDE_MARATHON_TRANSITIONMATRIX_H_
#define INCLUDE_MARATHON_TRANSITIONMATRIX_H_
#include "state_graph.h"
#ifdef USE_ARMADILLO
#include <armadillo>
#endif
#ifdef USE_BLAS
#include <cblas.h>
#endif
namespace marathon {
/**
* Virtual Base Class for Transition Matrix.
*/
template<class T=double>
class TransitionMatrix {
protected:
size_t N; // number of rows and columns
size_t ld; // lead dimension (upper bound on n)
std::vector<T> data; // actual data array
public:
/**
* Standard Constructor. Create uninitialized transition matrix of size N times N.
* @param N number of rows or columns
*/
TransitionMatrix(const size_t N) :
N(N),
ld(((N + 255) / 256) * 256) // lead dimension is next mulitple of 256
{
data.resize(N * ld, 0);
}
/**
* Constructor. Create Transition Matrix from State Graph.
* @param sg Pointer to state graph object.
*/
TransitionMatrix(const StateGraph &sg) :
TransitionMatrix(sg.getNumStates()) {
for (const Transition *t : sg.getArcs()) {
this->data[t->from * ld + t->to] = t->weight.convert_to<T>();
}
}
/**
* Return size of the matrix.
*/
size_t getDimension() const {
return N;
}
/**
* Return lead dimension of the matrix.
*/
size_t getLeadDimension() const {
return ld;
}
/**
* Return a pointer to the data.
*/
const std::vector<T> &getData() const {
return data;
}
/**
* Return P[i,j].
* @param i row index
* @param j column index
* @return P[i,j]
*/
T get(size_t i, size_t j) const {
return data[i * ld + j];
}
/**
* Set P[i,j] to x.
* @param i row index.
* @param j column index.
* @param x value of type T
*/
void set(size_t i, size_t j, T x) {
data[i * ld + j] = x;
}
/**
* Overwrite the current matrix with zeroes.
*/
virtual void clear() {
data.resize(N * ld, T(0));
}
/**
* Compute P^k.
* @param P A pointer to a Transition Matrix.
* @param k Exponent.
* @return P^k
*/
TransitionMatrix<T> pow(uint k) const {
// init matrix
if (k == 0) {
return eye(N);
}
// create binary representation of k
int bin[32];
memset(bin, 0, 32 * sizeof(int));
int l = 31;
int kk = k;
while (kk > 0) {
bin[l] = kk % 2;
kk >>= 1;
l--;
}
l += 2;
#ifdef DEBUG
std::cout << "bin: ";
for (int i = 0; i < 32; i++) {
std::cout << bin[i];
}
std::cout << " l=" << l << std::endl;
#endif
TransitionMatrix<T> A(*this); // will be returned
// binary exponentation - Left to Right (see Don. Knuth: Seminumerical Alg. Vol. 2 page 461)
while (l < 32) {
// square
A = A * A;
// multiply
if (bin[l] == 1)
A = A * *this;
l++;
}
return A;
}
/**
* Matrix multiplication.
* @param P Transition matrix.
* @return P * this
*/
TransitionMatrix<T> operator*(const TransitionMatrix<T> &P) const {
TransitionMatrix<T> X(N); // will be returned
#pragma omp parallel for
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
T p_ij = 0;
for (size_t k = 0; k < N; k++) {
p_ij += this->get(i, k) * P.get(k, j);
}
X.set(i, j, p_ij);
}
}
return X;
}
/**
* Return a string that represents the matrix.
*/
virtual std::string to_string() const {
std::stringstream ss;
ss << "\n";
for (size_t i = 0; i < this->N; i++) {
ss << " ";
for (size_t j = 0; j < this->N - 1; j++) {
ss << std::setprecision(std::numeric_limits<T>::digits10) << std::fixed
<< this->data[i * this->ld + j] << " ";
}
ss << std::setprecision(std::numeric_limits<T>::digits10) << std::fixed
<< this->data[i * this->ld + this->N - 1];
ss << "\n";
}
return ss.str();
}
/**
* To output into streams.
*/
friend inline std::ostream &operator<<(std::ostream &out,
const TransitionMatrix<T> &s) {
out << s.to_string();
return out;
}
/**
* Return the identity matrix with N rows and columns.
* @param N Number of rows and columns.
* @return Identity matrix.
*/
static TransitionMatrix<T> eye(size_t N) {
TransitionMatrix<T> P(N);
for (size_t i = 0; i < N; i++)
P.set(i,i,1);
return P;
}
};
/***********************************************************************
* template specializations
**********************************************************************/
#ifdef USE_BLAS
template<>
TransitionMatrix<float> TransitionMatrix<float>::operator*(const TransitionMatrix<float> &P) const {
const float alpha = 1.0;
const float beta = 0.0;
TransitionMatrix<float> X(N);
// use cblas
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha,
&P.data[0], P.ld, &data[0], ld, beta, &X.data[0], X.ld);
return X;
}
template<>
TransitionMatrix<double> TransitionMatrix<double>::operator*(const TransitionMatrix<double> &P) const {
const double alpha = 1.0;
const double beta = 0.0;
TransitionMatrix<double> X(N);
// use cblas
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha,
&P.data[0], P.ld, &data[0], ld, beta, &X.data[0], X.ld);
return X;
}
#endif
}
#endif /* INCLUDE_MARATHON_TRANSITIONMATRIX_H_ */
|
extern_init.c
|
#include <stdio.h>
#include "assert.h"
#include <unistd.h>
#define NZ 3
#pragma omp declare target
int colstat[NZ]={1,2,3};
#pragma omp end declare target
int main(){
#pragma omp target map(alloc:colstat[0:NZ])
{
colstat[1] = 1111;
}
fprintf(stderr, "BEFORE colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]);
#pragma omp target update from(colstat)
fprintf(stderr, "AFTER colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]);
if (colstat[0] == 1 && colstat[1] == 1111 && colstat[2] == 3)
printf("Success\n");
else
printf("Fail!\n");
return (colstat[0] == 1 && colstat[1] == 1111 && colstat[2] == 3) ? 0 : 1 ;
}
|
SoaDistanceTableAA.h
|
//////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, [email protected], Intel Corp.
// Amrita Mathuriya, [email protected], Intel Corp.
//
// File created by: Jeongnim Kim, [email protected], Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_DTDIMPL_AA_H
#define QMCPLUSPLUS_DTDIMPL_AA_H
#include "simd/algorithm.hpp"
namespace qmcplusplus
{
/**@ingroup nnlist
* @brief A derived classe from DistacneTableData, specialized for dense case
*/
template<typename T, unsigned D, int SC>
struct SoaDistanceTableAA: public DTD_BConds<T,D,SC>, public DistanceTableData
{
int Ntargets;
int Ntargets_padded;
SoaDistanceTableAA(ParticleSet& target)
: DTD_BConds<T,D,SC>(target.Lattice), DistanceTableData(target,target)
{
resize(target.getTotalNum());
}
#if (__cplusplus >= 201103L)
SoaDistanceTableAA()=delete;
SoaDistanceTableAA(const SoaDistanceTableAA&)=delete;
#endif
~SoaDistanceTableAA() {}
size_t compute_size(int N)
{
const size_t N_padded = getAlignedSize<T>(N);
const size_t Alignment = getAlignment<T>();
return (N_padded*(2*N-N_padded+1)+(Alignment-1)*N_padded)/2;
}
void resize(int n)
{
N[SourceIndex]=N[VisitorIndex]=Ntargets=n;
Ntargets_padded=getAlignedSize<T>(n);
Distances.resize(Ntargets,Ntargets_padded);
const size_t total_size = compute_size(Ntargets);
memoryPool.resize(total_size*D);
Displacements.resize(Ntargets);
for(int i=0; i<Ntargets; ++i)
Displacements[i].attachReference(i,total_size,memoryPool.data()+compute_size(i));
Temp_r.resize(Ntargets);
Temp_dr.resize(Ntargets);
}
inline void evaluate(ParticleSet& P)
{
constexpr T BigR= std::numeric_limits<T>::max();
//P.RSoA.copyIn(P.R);
for(int iat=0; iat<Ntargets; ++iat)
{
DTD_BConds<T,D,SC>::computeDistances(P.R[iat], P.RSoA, Distances[iat], Displacements[iat], 0, Ntargets, iat);
Distances[iat][iat]=BigR; //assign big distance
}
}
inline void evaluate(ParticleSet& P, IndexType jat)
{
DTD_BConds<T,D,SC>::computeDistances(P.R[jat], P.RSoA, Distances[jat], Displacements[jat], 0, Ntargets, jat);
Distances[jat][jat]=std::numeric_limits<T>::max(); //assign a big number
}
inline void moveOnSphere(const ParticleSet& P, const PosType& rnew)
{
DTD_BConds<T,D,SC>::computeDistances(rnew, P.RSoA, Temp_r.data(),Temp_dr, 0, Ntargets, P.activePtcl);
}
///evaluate the temporary pair relations
inline void move(const ParticleSet& P, const PosType& rnew)
{
//#pragma omp master
moveOnSphere(P,rnew);
}
int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const
{
RealType min_dist = std::numeric_limits<RealType>::max();
int index=-1;
if(newpos)
{
for(int jat=0; jat<Ntargets; ++jat)
if(Temp_r[jat]<min_dist && jat!=iat)
{
min_dist = Temp_r[jat];
index = jat;
}
if(index>=0) dr=Temp_dr[index];
}
else
{
for(int jat=0; jat<Ntargets; ++jat)
if(Distances[iat][jat]<min_dist && jat!=iat)
{
min_dist = Distances[iat][jat];
index = jat;
}
if(index>=0) dr=Displacements[iat][index];
}
r=min_dist;
return index;
}
///update the iat-th row for iat=[0,iat-1)
inline void update(IndexType iat)
{
if(iat==0) return;
//update by a cache line
const int nupdate=getAlignedSize<T>(iat);
simd::copy_n(Temp_r.data(),nupdate,Distances[iat]);
for(int idim=0;idim<D; ++idim)
simd::copy_n(Temp_dr.data(idim),nupdate,Displacements[iat].data(idim));
}
};
}
#endif
|
GB_unaryop__lnot_bool_fp32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_fp32
// op(A') function: GB_tran__lnot_bool_fp32
// C type: bool
// A type: float
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_fp32
(
bool *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-loop03.c
|
extern void abort (void);
int a;
void
foo ()
{
int i;
a = 30;
#pragma omp barrier
#pragma omp for lastprivate (a)
for (i = 0; i < 1024; i++)
{
a = i;
}
if (a != 1023)
abort ();
}
int
main (void)
{
#pragma omp parallel num_threads (64)
foo ();
return 0;
}
|
GB_unaryop__ainv_int32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_fp32
// op(A') function: GB_tran__ainv_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int32_t z ; GB_CAST_SIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_fp32
(
int32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ops.h
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#include <loops/ReduceType.h>
#define MIN_V 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(d1 - params[0]);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class SquaredReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(params[0] - d1);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN_V))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class ReversePow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < -_v)
return -_v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class GELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1);
}
};
template <typename X>
class PreciseGELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI));
auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3));
return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp));
}
};
template <typename X>
class GELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x17 = static_cast<X>(1.702f) * d1;
auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17);
// (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2
return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2);
}
};
template <typename X>
class PreciseGELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x79 = static_cast<X>(0.797885) * d1;
auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3);
auto x39 = static_cast<X>(0.398942) * d1;
auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3);
auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03);
// 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3]
return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_elu<X,X>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto val = static_cast<Z>(d1);
auto alpha = static_cast<Z>(d2);
return val < 0.0f ? alpha * val : val;
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ReduceSameBenchmarkOp {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto p = d1 * d1;
return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<Z, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction / (Z) n;
}
};
template <typename X, typename Z>
class ReduceFloatBenchmarkOp {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MAX;
op_def static X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMAX;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMIN;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MIN;
op_def static X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old),
nd4j::math::nd4j_abs<Z>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]);
return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps));
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
GB_assign_zombie5.c
|
//------------------------------------------------------------------------------
// GB_assign_zombie5: delete entries in C for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// For GrB_Matrix_assign, C(I,J)<M,repl>=..., if C_replace is true, and mask M
// is present, then any entry C(i,j) outside IxJ must be be deleted, if
// M(i,j)=0.
// See also GB_assign_zombie3 and GB_assign_zombie4.
// C must be sparse or hypersparse.
#include "GB_assign.h"
#include "GB_assign_zombie.h"
#include "GB_subassign_methods.h"
#include "GB_ek_slice.h"
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
}
GrB_Info GB_assign_zombie5
(
GrB_Matrix C, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const bool Mask_struct,
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (!GB_PENDING (C)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_JUMBLED (M)) ; // binary search on M
ASSERT (!GB_PENDING (M)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
// const int64_t Cnvec = C->nvec ;
int64_t *restrict Ci = C->i ;
int64_t nzombies = C->nzombies ;
const int64_t zvlen = C->vlen ;
//--------------------------------------------------------------------------
// get M
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int8_t *restrict Mb = M->b ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
const int64_t Mnvec = M->nvec ;
const int64_t Mvlen = M->vlen ;
const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
const bool M_is_bitmap = GB_IS_BITMAP (M) ;
const bool M_is_full = GB_IS_FULL (M) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
int C_ntasks, C_nthreads ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
GB_SLICE_MATRIX (C, 64, chunk) ;
//--------------------------------------------------------------------------
// each task creates its own zombies
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task description
//----------------------------------------------------------------------
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
//----------------------------------------------------------------------
// scan vectors kfirst to klast for entries to delete
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get C(:,j) and determine if j is outside the list J
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
// j_outside is true if column j is outside the C(I,J) submatrix
bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, zvlen) ;
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
// this works for M with any sparsity structure
int64_t pM_start, pM_end ;
int64_t pright = Mnvec - 1 ;
int64_t pleft = 0 ;
GB_lookup (M_is_hyper, Mh, Mp, Mvlen, &pleft, pright, j,
&pM_start, &pM_end) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//------------------------------------------------------------------
// iterate over all entries in C(:,j)
//------------------------------------------------------------------
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
//--------------------------------------------------------------
// consider C(i,j)
//--------------------------------------------------------------
// C(i,j) is outside the C(I,J) submatrix if either i is
// not in the list I, or j is not in J, or both.
int64_t i = Ci [pC] ;
if (!GB_IS_ZOMBIE (i) &&
(j_outside || !GB_ij_is_in_list (I, nI, i, Ikind, Icolon)))
{
//----------------------------------------------------------
// C(i,j) is a live entry not in the C(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (i) ;
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete C(i,j) by marking it as a zombie
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
NeighborhoodGraph.h
|
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph(): m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_iNeighborhoodScale(16),
m_iCEFScale(4),
m_iRefineIter(0),
m_iCEF(1000),
m_iMaxCheckForRefineGraph(10000) {}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const int node, int insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const int node, int* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const int samples, const std::unordered_map<int, int>* idmap = nullptr) = 0;
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<int, int>* idmap = nullptr)
{
std::cout << "build RNG graph!" << std::endl;
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale;
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize);
m_dataUpdateLock.resize(m_iGraphSize);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
std::cout << "Build RNG Graph end!" << std::endl;
return;
}
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize);
std::vector<std::vector<int>> TptreeDataIndices(m_iTPTNumber, std::vector<int>(m_iGraphSize));
std::vector<std::vector<std::pair<int, int>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<int, int>>());
for (int i = 0; i < m_iGraphSize; i++)
for (int j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
std::cout << "Parallel TpTree Partition begin " << std::endl;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (int j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
std::cout << "Finish Getting Leaves for Tree " << i << std::endl;
}
std::cout << "Parallel TpTree Partition done" << std::endl;
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < TptreeLeafNodes[i].size(); j++)
{
int start_index = TptreeLeafNodes[i][j].first;
int end_index = TptreeLeafNodes[i][j].second;
if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%';
for (int x = start_index; x < end_index; x++)
{
for (int y = x + 1; y <= end_index; y++)
{
int p1 = TptreeDataIndices[i][x];
int p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
std::cout << std::endl;
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
}
if (m_iMaxCheckForRefineGraph > 0) {
RefineGraph<T>(index, idmap);
}
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<int, int>* idmap = nullptr)
{
m_iCEF *= m_iCEFScale;
m_iMaxCheckForRefineGraph *= m_iCEFScale;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false);
}
std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl;
m_iCEF /= m_iCEFScale;
m_iMaxCheckForRefineGraph /= m_iCEFScale;
m_iNeighborhoodSize /= m_iNeighborhoodScale;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false);
}
std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl;
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<int>& indices, std::vector<int>& reverseIndices,
std::string graphFileName, const std::unordered_map<int, int>* idmap = nullptr)
{
int R = (int)indices.size();
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < R; i++)
{
RefineNode<T>(index, indices[i], false);
int* nodes = m_pNeighborhoodGraph[indices[i]];
for (int j = 0; j < m_iNeighborhoodSize; j++)
{
if (nodes[j] < 0) nodes[j] = -1;
else nodes[j] = reverseIndices[nodes[j]];
}
if (idmap == nullptr || idmap->find(-1 - indices[i]) == idmap->end()) continue;
nodes[m_iNeighborhoodSize - 1] = -2 - idmap->at(-1 - indices[i]);
}
std::ofstream graphOut(graphFileName, std::ios::binary);
if (!graphOut.is_open()) return ErrorCode::FailedCreateFile;
graphOut.write((char*)&R, sizeof(int));
graphOut.write((char*)&m_iNeighborhoodSize, sizeof(int));
for (int i = 0; i < R; i++) {
graphOut.write((char*)m_pNeighborhoodGraph[indices[i]], sizeof(int) * m_iNeighborhoodSize);
}
graphOut.close();
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const int node, bool updateNeighbors)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), m_iCEF + 1);
index->SearchIndex(query);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), m_iCEF + 1);
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= m_iCEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
std::lock_guard<std::mutex> lock(m_dataUpdateLock[item->VID]);
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<int>& indices, const int first, const int last,
std::vector<std::pair<int, int>> & leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.push_back(std::make_pair(first, last));
}
else
{
std::vector<float> Mean(index->GetFeatureDim(), 0);
int iIteration = 100;
int end = min(first + m_iSamples, last);
int count = end - first + 1;
// calculate the mean of each dimension
for (int j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (int k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] += v[k];
}
}
for (int k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(index->GetFeatureDim());
for (int j = 0; j < index->GetFeatureDim(); j++)
{
Variance.push_back(BasicResult(j, 0));
}
// calculate the variance of each dimension
for (int j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (int k = 0; k < index->GetFeatureDim(); k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist*dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<int> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[index->GetFeatureDim() - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (int j = 0; j < count; j++)
{
Val[j] = 0;
const T* v = (const T*)index->GetSample(indices[first + j]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (int j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
int i = first;
int j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
const T* v = (const T*)index->GetSample(indices[i]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
PartitionByTptree<T>(index, indices, first, i - 1, leaves);
PartitionByTptree<T>(index, indices, i, last, leaves);
}
}
bool LoadGraph(std::string sGraphFilename)
{
std::cout << "Load Graph From " << sGraphFilename << std::endl;
FILE * fp = fopen(sGraphFilename.c_str(), "rb");
if (fp == NULL) return false;
fread(&m_iGraphSize, sizeof(int), 1, fp);
fread(&m_iNeighborhoodSize, sizeof(int), 1, fp);
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize);
m_dataUpdateLock.resize(m_iGraphSize);
for (int i = 0; i < m_iGraphSize; i++)
{
fread((m_pNeighborhoodGraph)[i], sizeof(int), m_iNeighborhoodSize, fp);
}
fclose(fp);
std::cout << "Load Graph (" << m_iGraphSize << "," << m_iNeighborhoodSize << ") Finish!" << std::endl;
return true;
}
bool SetGraph(char* pGraphMemFile)
{
m_iGraphSize = *((int*)pGraphMemFile);
pGraphMemFile += sizeof(int);
m_iNeighborhoodSize = *((int*)pGraphMemFile);
pGraphMemFile += sizeof(int);
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, (int*)pGraphMemFile);
m_dataUpdateLock.resize(m_iGraphSize);
return true;
}
bool SaveGraph(std::string sGraphFilename) const
{
std::cout << "Save Graph To " << sGraphFilename << std::endl;
FILE *fp = fopen(sGraphFilename.c_str(), "wb");
if (fp == NULL) return false;
fwrite(&m_iGraphSize, sizeof(int), 1, fp);
fwrite(&m_iNeighborhoodSize, sizeof(int), 1, fp);
for (int i = 0; i < m_iGraphSize; i++)
{
fwrite((m_pNeighborhoodGraph)[i], sizeof(int), m_iNeighborhoodSize, fp);
}
fclose(fp);
std::cout << "Save Graph (" << m_iGraphSize << "," << m_iNeighborhoodSize << ") Finish!" << std::endl;
return true;
}
inline void AddBatch(int num) { m_pNeighborhoodGraph.AddBatch(num); m_iGraphSize += num; m_dataUpdateLock.resize(m_iGraphSize); }
inline int* operator[](int index) { return m_pNeighborhoodGraph[index]; }
inline const int* operator[](int index) const { return m_pNeighborhoodGraph[index]; }
inline void SetR(int rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; m_dataUpdateLock.resize(m_iGraphSize); }
inline int R() const { return m_iGraphSize; }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
int m_iGraphSize;
COMMON::Dataset<int> m_pNeighborhoodGraph;
COMMON::FineGrainedLock m_dataUpdateLock; // protect one row of the graph
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
int m_iNeighborhoodSize, m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iMaxCheckForRefineGraph;
};
}
}
#endif
|
DRB050-functionparameter-orig-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len) {
int i;
long long int AI1[6];
AI1[0] = len + -1;
AI1[1] = 8 * AI1[0];
AI1[2] = AI1[1] + 8;
AI1[3] = AI1[2] / 8;
AI1[4] = (AI1[3] > 0);
AI1[5] = (AI1[4] ? AI1[3] : 0);
char RST_AI1 = 0;
RST_AI1 |= !(((void*) (c + 0) > (void*) (o1 + AI1[5]))
|| ((void*) (o1 + 0) > (void*) (c + AI1[5])));
#pragma omp target data map(to: c[0:AI1[5]]) map(tofrom: o1[0:AI1[5]]) if(!RST_AI1)
{
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
}
int main() {
double o1[101];
double c[101];
int i;
int len = 100;
char RST_AI1 = 0;
#pragma omp target data map(tofrom: c[0:101],o1[0:101]) if(!RST_AI1)
{
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
}
foo1(&o1[1], &o1[0], 100);
for (i = 0; i < len; ++i) {
printf("%lf\n", o1[i]);
}
return 0;
}
|
GB_binop__lor_int8.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int8)
// A*D function (colscale): GB (_AxD__lor_int8)
// D*A function (rowscale): GB (_DxB__lor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int8)
// C=scalar+B GB (_bind1st__lor_int8)
// C=scalar+B' GB (_bind1st_tran__lor_int8)
// C=A+scalar GB (_bind2nd__lor_int8)
// C=A'+scalar GB (_bind2nd_tran__lor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_hyper_prune.c
|
//------------------------------------------------------------------------------
// GB_hyper_prune: remove empty vectors from a hypersparse Ap, Ah list
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Removes empty vectors from a hypersparse list. On input, *Ap and *Ah are
// assumed to be NULL. The input arrays Ap_old and Ah_old are not modified,
// and thus can be shallow content from another matrix. New hyperlists Ap and
// Ah are allocated, for nvec vectors, all nonempty.
#include "GB.h"
GrB_Info GB_hyper_prune
(
// output, not allocated on input:
int64_t *restrict *p_Ap, size_t *p_Ap_size, // size nvec+1
int64_t *restrict *p_Ah, size_t *p_Ah_size, // size nvec
int64_t *p_nvec, // # of vectors, all nonempty
// input, not modified
const int64_t *Ap_old, // size nvec_old+1
const int64_t *Ah_old, // size nvec_old
const int64_t nvec_old, // original number of vectors
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Ap != NULL) ;
ASSERT (p_Ah != NULL) ;
ASSERT (p_nvec != NULL) ;
ASSERT (Ap_old != NULL) ;
ASSERT (Ah_old != NULL) ;
ASSERT (nvec_old >= 0) ;
(*p_Ap) = NULL ; (*p_Ap_size) = 0 ;
(*p_Ah) = NULL ; (*p_Ah_size) = 0 ;
(*p_nvec) = -1 ;
int64_t *restrict W = NULL ; size_t W_size = 0 ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
int64_t *restrict Ah = NULL ; size_t Ah_size = 0 ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvec_old, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
W = GB_MALLOC_WERK (nvec_old+1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count the # of nonempty vectors
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
// W [k] = 1 if the kth vector is nonempty; 0 if empty
W [k] = (Ap_old [k] < Ap_old [k+1]) ;
}
int64_t nvec ;
GB_cumsum (W, nvec_old, &nvec, nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate the result
//--------------------------------------------------------------------------
Ap = GB_MALLOC (nvec+1, int64_t, &Ap_size) ;
Ah = GB_MALLOC (nvec , int64_t, &Ah_size) ;
if (Ap == NULL || Ah == NULL)
{
// out of memory
GB_FREE_WERK (&W, W_size) ;
GB_FREE (&Ap, Ap_size) ;
GB_FREE (&Ah, Ah_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create the Ap and Ah result
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
if (Ap_old [k] < Ap_old [k+1])
{
int64_t knew = W [k] ;
Ap [knew] = Ap_old [k] ;
Ah [knew] = Ah_old [k] ;
}
}
Ap [nvec] = Ap_old [nvec_old] ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
(*p_Ap) = Ap ; (*p_Ap_size) = Ap_size ;
(*p_Ah) = Ah ; (*p_Ah_size) = Ah_size ;
(*p_nvec) = nvec ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__lnot_uint32_uint64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_uint64
// op(A') function: GB_tran__lnot_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bli_gemm_ref.c
|
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
// Completely generic gemm ukr implementation which checks MR/NR at
// runtime. Very slow, but has to be used in certain cases.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf ) \
\
static void PASTEMAC3(ch,opname,arch,suf) \
( \
dim_t m, \
dim_t n, \
dim_t k, \
ctype* restrict alpha, \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict beta, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* data, \
cntx_t* cntx \
) \
{ \
const num_t dt = PASTEMAC(ch,type); \
\
const inc_t packmr = bli_cntx_get_blksz_max_dt( dt, BLIS_MR, cntx ); \
const inc_t packnr = bli_cntx_get_blksz_max_dt( dt, BLIS_NR, cntx ); \
\
const inc_t rs_a = bli_cntx_get_blksz_def_dt( dt, BLIS_BBM, cntx ); \
const inc_t cs_a = packmr; \
\
const inc_t rs_b = packnr; \
const inc_t cs_b = bli_cntx_get_blksz_def_dt( dt, BLIS_BBN, cntx ); \
\
ctype ab[ BLIS_STACK_BUF_MAX_SIZE \
/ sizeof( ctype ) ] \
__attribute__((aligned(BLIS_STACK_BUF_ALIGN_SIZE))); \
const inc_t rs_ab = 1; \
const inc_t cs_ab = m; \
\
dim_t l, j, i; \
\
ctype ai; \
ctype bj; \
\
\
/* Initialize the accumulator elements in ab to zero. */ \
for ( i = 0; i < m * n; ++i ) \
{ \
PASTEMAC(ch,set0s)( *(ab + i) ); \
} \
\
/* Perform a series of k rank-1 updates into ab. */ \
for ( l = 0; l < k; ++l ) \
{ \
ctype* restrict abij = ab; \
\
/* In an optimized implementation, these two loops over MR and NR
are typically fully unrolled. */ \
for ( j = 0; j < n; ++j ) \
{ \
bj = *(b + j*cs_b); \
\
for ( i = 0; i < m; ++i ) \
{ \
ai = *(a + i*rs_a); \
\
PASTEMAC(ch,dots)( ai, bj, *abij ); \
\
abij += rs_ab; \
} \
} \
\
a += cs_a; \
b += rs_b; \
} \
\
/* Scale the result in ab by alpha. */ \
for ( i = 0; i < m * n; ++i ) \
{ \
PASTEMAC(ch,scals)( *alpha, *(ab + i) ); \
} \
\
/* If beta is zero, overwrite c with the scaled result in ab. Otherwise,
scale by beta and then add the scaled redult in ab. */ \
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
PASTEMAC(ch,copys_mxn) \
( \
m, \
n, \
ab, rs_ab, cs_ab, \
c, rs_c, cs_c \
); \
} \
else \
{ \
PASTEMAC(ch,xpbys_mxn) \
( \
m, \
n, \
ab, rs_ab, cs_ab, \
beta, \
c, rs_c, cs_c \
); \
} \
}
INSERT_GENTFUNC_BASIC2( gemm_gen, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
// An implementation that attempts to facilitate emission of vectorized
// instructions via constant loop bounds + #pragma omp simd directives.
// If compile-time MR/NR are not available (indicated by BLIS_[MN]R_x = -1),
// then the non-unrolled version (above) is used.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
dim_t m, \
dim_t n, \
dim_t k, \
ctype* restrict alpha, \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict beta, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* data, \
cntx_t* cntx \
) \
{ \
\
const dim_t mr = PASTECH(BLIS_MR_,ch); \
const dim_t nr = PASTECH(BLIS_NR_,ch); \
\
if ( mr == -1 || nr == -1 ) \
{ \
PASTEMAC3(ch,gemm_gen,arch,suf) \
( \
m, \
n, \
k, \
alpha, \
a, \
b, \
beta, \
c, rs_c, cs_c, \
data, \
cntx \
); \
return; \
} \
\
ctype ab[ BLIS_STACK_BUF_MAX_SIZE \
/ sizeof( ctype ) ] \
__attribute__((aligned(BLIS_STACK_BUF_ALIGN_SIZE))); \
const inc_t rs_ab = nr; \
const inc_t cs_ab = 1; \
\
const inc_t rs_a = PASTECH(BLIS_BBM_,ch); \
const inc_t cs_a = PASTECH(BLIS_PACKMR_,ch); \
const inc_t rs_b = PASTECH(BLIS_PACKNR_,ch); \
const inc_t cs_b = PASTECH(BLIS_BBN_,ch); \
\
\
/* Initialize the accumulator elements in ab to zero. */ \
PRAGMA_SIMD \
for ( dim_t i = 0; i < mr * nr; ++i ) \
{ \
PASTEMAC(ch,set0s)( ab[ i ] ); \
} \
\
/* Perform a series of k rank-1 updates into ab. */ \
for ( dim_t l = 0; l < k; ++l ) \
{ \
for ( dim_t i = 0; i < mr; ++i ) \
{ \
PRAGMA_SIMD \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
PASTEMAC(ch,dots) \
( \
a[ i*rs_a ], \
b[ j*cs_b ], \
ab[ i*rs_ab + j*cs_ab ] \
); \
} \
} \
\
a += cs_a; \
b += rs_b; \
} \
\
/* Scale the result in ab by alpha. */ \
PRAGMA_SIMD \
for ( dim_t i = 0; i < mr * nr; ++i ) \
{ \
PASTEMAC(ch,scals)( *alpha, ab[ i ] ); \
} \
\
/* Output/accumulate intermediate result ab based on the storage
of c and the value of beta. */ \
if ( cs_c == 1 ) \
{ \
/* C is row-stored. */ \
\
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
for ( dim_t i = 0; i < m; ++i ) \
for ( dim_t j = 0; j < n; ++j ) \
PASTEMAC(ch,copys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
c [ i*rs_c + j*1 ] \
); \
} \
else \
{ \
for ( dim_t i = 0; i < m; ++i ) \
for ( dim_t j = 0; j < n; ++j ) \
PASTEMAC(ch,xpbys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
*beta, \
c [ i*rs_c + j*1 ] \
); \
} \
} \
else \
{ \
/* C is column-stored or general-stored. */ \
\
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
for ( dim_t j = 0; j < n; ++j ) \
for ( dim_t i = 0; i < m; ++i ) \
PASTEMAC(ch,copys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
c [ i*rs_c + j*cs_c ] \
); \
} \
else \
{ \
for ( dim_t j = 0; j < n; ++j ) \
for ( dim_t i = 0; i < m; ++i ) \
PASTEMAC(ch,xpbys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
*beta, \
c [ i*rs_c + j*cs_c ] \
); \
} \
} \
}
INSERT_GENTFUNC_BASIC2( gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
|
utils.h
|
#pragma once
#ifdef WIN32
#define _USE_MATH_DEFINES
#include <math.h>
#else
#include <math.h>
#endif
#include <float.h>
#include <omp.h>
#include <string.h>
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <vector>
#include <open3d/geometry/Geometry.h>
#include <open3d/geometry/PointCloud.h>
#include <open3d/pipelines/registration/Feature.h>
#include <Eigen/Dense>
namespace misc3d {
typedef std::shared_ptr<open3d::geometry::Geometry> GeometryPtr;
typedef std::shared_ptr<open3d::geometry::PointCloud> PointCloudPtr;
typedef std::shared_ptr<open3d::geometry::TriangleMesh> TriangleMeshPtr;
typedef std::shared_ptr<open3d::pipelines::registration::Feature> FeaturePtr;
/**
* @brief Timer for duration measurement.
*
*/
class Timer {
public:
void Start() { t0_ = std::chrono::high_resolution_clock::now(); }
double Stop() {
const double timestamp =
std::chrono::duration<double>(
std::chrono::high_resolution_clock::now() - t0_)
.count();
return timestamp;
}
private:
std::chrono::high_resolution_clock::time_point t0_;
};
/**
* @brief base sampler class
*
* @tparam T
*/
template <typename T>
class Sampler {
public:
/**
* @brief pure virtual operator, which define the I/O of this sampler
*
* @param sample_size
* @return std::vector<T>
*/
virtual std::vector<T> operator()(size_t sample_size) = 0;
};
/**
* @brief Extract a random sample of given sample_size from the input indices
*
* @tparam T
*/
template <typename T>
class RandomSampler : public Sampler<T> {
public:
explicit RandomSampler(const size_t size) : Sampler<T>(), size_(size) {
std::random_device rd;
rng_ = std::mt19937(rd());
}
// This operator is usually used in for loop and sample a small subset from
// original indices
std::vector<T> operator()(size_t sample_size) override {
// Lock this operation when using OpenMP to ensure synchronization
std::lock_guard<std::mutex> guard(mutex_);
std::vector<T> sample;
sample.reserve(sample_size);
size_t valid_sample = 0;
while (valid_sample < sample_size) {
size_t idx = rng_() % size_;
if (std::find(sample.begin(), sample.end(), idx) == sample.end()) {
sample.push_back(idx);
valid_sample++;
}
}
return sample;
}
// This function is usually called once to sample more than half of original
// indices
std::vector<T> SampleWithoutDuplicate(size_t sample_size) {
std::vector<T> indices(size_);
std::iota(indices.begin(), indices.end(), 0);
for (size_t i = 0; i < sample_size; ++i) {
std::swap(indices[i], indices[rng_() % size_]);
}
std::vector<T> sample;
sample.reserve(sample_size);
for (int idx = 0; idx < sample_size; ++idx) {
sample.push_back(indices[idx]);
}
return sample;
}
private:
size_t size_;
std::mt19937 rng_;
std::mutex mutex_;
};
/**
* @brief perfoem normal consistent, here we have assumption that the point
* clouds are all in camera coordinate
*
* @param pc
*/
inline void NormalConsistent(open3d::geometry::PointCloud &pc) {
if (!pc.HasNormals()) {
return;
} else {
const int size = pc.points_.size();
#pragma omp parallel for
for (int i = 0; i < size; i++) {
if (pc.points_[i].dot(pc.normals_[i]) > 0) {
pc.normals_[i] *= -1;
}
pc.normals_[i].normalize();
}
}
}
/**
* @brief extract data by indices
*
* @param src
* @param index
* @param dst
*/
template <typename T>
inline void GetVectorByIndex(const std::vector<T> &src,
const std::vector<size_t> &index,
std::vector<T> &dst) {
const size_t num = index.size();
dst.resize(num);
#pragma omp parallel for
for (int i = 0; i < num; i++) {
dst[i] = src[index[i]];
}
}
/**
* @brief Get Eigen matrix from vector
*
* @param src
* @param index
* @param dst
*/
template <typename T>
inline void GetMatrixByIndex(const std::vector<Eigen::Matrix<T, 3, 1>> &src,
const std::vector<size_t> &index,
Eigen::Matrix<T, 3, Eigen::Dynamic> &dst) {
const size_t num = index.size();
dst.setZero(3, num);
if (src.size() == 0) {
return;
}
#pragma omp parallel for
for (int i = 0; i < num; i++) {
dst.col(i) = src[index[i]];
}
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
template <typename T>
inline void EigenMatrixToVector(const Eigen::Matrix<T, Eigen::Dynamic, 3> &pc,
std::vector<Eigen::Matrix<T, 3, 1>> &new_pc) {
const size_t num = pc.rows();
const size_t data_length = sizeof(T) * 3;
new_pc.resize(num);
#pragma omp parallel for
for (int i = 0; i < num; i++) {
const Eigen::Matrix<T, 3, 1> &p = pc.row(i);
memcpy(new_pc[i].data(), p.data(), data_length);
}
}
/**
* @brief data conversion
*
* @param pc
* @param normal
* @param new_pc
*/
template <typename T>
inline void EigenMatrixToVector(
const Eigen::Matrix<T, Eigen::Dynamic, 3> &pc,
const Eigen::Matrix<T, Eigen::Dynamic, 3> &normal,
std::vector<Eigen::Matrix<T, 6, 1>> &new_pc) {
const size_t num = pc.rows();
const size_t data_length = sizeof(T) * 3;
new_pc.resize(num);
#pragma omp parallel for
for (int i = 0; i < num; i++) {
const Eigen::Matrix<T, 3, 1> &p = pc.row(i);
const Eigen::Matrix<T, 3, 1> &n = normal.row(i);
memcpy(new_pc[i].data(), p.data(), data_length);
memcpy(new_pc[i].data() + 3, n.data(), data_length);
}
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
template <typename T>
inline void VectorToEigenMatrix(const std::vector<Eigen::Matrix<T, 3, 1>> &pc,
Eigen::Matrix<T, 3, Eigen::Dynamic> &new_pc) {
const size_t num = pc.size();
new_pc.setZero(3, num);
#pragma omp parallel for
for (int i = 0; i < num; i++) {
new_pc.col(i) = pc[i];
}
}
/**
* @brief data conversion
*
* @param pc
* @param new_pc
*/
template <typename T>
inline void VectorToEigenMatrix(const std::vector<Eigen::Matrix<T, 6, 1>> &pc,
Eigen::Matrix<T, 6, Eigen::Dynamic> &new_pc) {
const size_t num = pc.size();
new_pc.setZero(6, num);
#pragma omp parallel for
for (int i = 0; i < num; i++) {
new_pc.col(i) = pc[i];
}
}
/**
* @brief Compute the coordinate transformation between the target coordinate
* and origin coordinate
*
* @param x_head Point at target coordinate x-axis
* @param origin Point at target coordinate origin
* @param ref Point at target coordinate x-y plane
* @return Eigen::Matrix<T, 4, 4>
*/
template <typename T>
inline Eigen::Matrix<T, 4, 4> CalcCoordinateTransform(
const Eigen::Matrix<T, 3, 1> &x_head, const Eigen::Matrix<T, 3, 1> &origin,
const Eigen::Matrix<T, 3, 1> &ref) {
const Eigen::Matrix<T, 3, 1> x_axis =
(x_head - origin) / (x_head - origin).norm();
const Eigen::Matrix<T, 3, 1> tmp_axis =
(ref - origin) / (ref - origin).norm();
Eigen::Matrix<T, 3, 1> z_axis = x_axis.cross(tmp_axis);
if (z_axis.dot(Eigen::Matrix<T, 3, 1>(0, 0, 1)) > 0) {
z_axis /= z_axis.norm();
} else {
z_axis /= -z_axis.norm();
}
Eigen::Matrix<T, 3, 1> y_axis = z_axis.cross(x_axis);
y_axis /= y_axis.norm();
Eigen::Matrix<T, 4, 4> transform;
transform << x_axis(0), y_axis(0), z_axis(0), origin(0), x_axis(1),
y_axis(1), z_axis(1), origin(1), x_axis(2), y_axis(2), z_axis(2),
origin(2), 0, 0, 0, 1;
return transform;
}
/**
* @brief compute point to line distance
*
* @tparam T
* @param query
* @param point1
* @param point2
* @return T
*/
template <typename T>
inline T CalcPoint2LineDistance(const Eigen::Matrix<T, 3, 1> &query,
const Eigen::Matrix<T, 3, 1> &point1,
const Eigen::Matrix<T, 3, 1> &point2) {
const Eigen::Matrix<T, 3, 1> a = query - point1;
const Eigen::Matrix<T, 3, 1> b = query - point2;
const Eigen::Matrix<T, 3, 1> c = point2 - point1;
return a.cross(b).norm() / c.norm();
}
/**
* @brief convert degree to radian
*
* @param angle_deg
* @return double
*/
template <typename T>
inline T Deg2Rad(const T angle_deg) {
return angle_deg / 180 * M_PI;
}
/**
* @brief convert radian to degree
*
* @param angle_rad
* @return double
*/
template <typename T>
inline T Rad2Deg(const T angle_rad) {
return angle_rad / M_PI * 180;
}
/**
* @brief data type conversion
*
* @param pc
* @param new_pc
*/
inline void VectorToO3dPointCloud(const std::vector<Eigen::Vector6d> &pc,
open3d::geometry::PointCloud &new_pc) {
const int n_pt = pc.size();
const size_t data_length = sizeof(double) * 3;
new_pc.points_.resize(n_pt);
new_pc.normals_.resize(n_pt);
#pragma omp parallel for
for (int i = 0; i < n_pt; i++) {
memcpy(new_pc.points_[i].data(), pc[i].data(), data_length);
memcpy(new_pc.normals_[i].data(), pc[i].data() + 3, data_length);
}
}
/**
* @brief Convert 4x4 Mat to 16 array
*
* @tparam T
* @param mat
* @param array
*/
template <typename T>
inline void EigenMat4x4ToArray(const Eigen::Matrix<T, 4, 4> &mat,
std::array<T, 16> &array) {
for (size_t i = 0; i < 4; i++) {
for (size_t j = 0; j < 4; j++) {
array[i * 4 + j] = mat(i, j);
}
}
}
/**
* @brief Convert 16 array to 4x4 Mat
*
* @tparam T
* @param array
* @param mat
*/
template <typename T>
inline void ArrayToEigenMat4x4(const std::array<T, 16> &array,
Eigen::Matrix<T, 4, 4> &mat) {
for (size_t i = 0; i < 4; i++) {
for (size_t j = 0; j < 4; j++) {
mat(i, j) = array[i * 4 + j];
}
}
}
/**
* @brief Get new eigen matrix by indices
*
* @param mat
* @param index
* @return Eigen::Matrix3Xd
*/
inline Eigen::Matrix3Xd SelectByIndexEigenMat(const Eigen::Matrix3Xd &mat,
const std::vector<size_t> &indices) {
Eigen::Matrix3Xd new_mat;
new_mat.resize(3, indices.size());
#pragma omp parallel for
for (int i = 0; i < indices.size(); i++) {
new_mat.col(i) = mat.col(indices[i]);
}
return new_mat;
}
} // namespace misc3d
|
omp_for_schedule_guided.c
|
// RUN: %libomp-compile-and-run
/* Test for guided scheduling
* Ensure threads get chunks interleavely first
* Then judge the chunk sizes are decreasing to a stable value
* Modified by Chunhua Liao
* For example, 100 iteration on 2 threads, chunksize 7
* one line for each dispatch, 0/1 means thread id
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24
* 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14
* 1 1 1 1 1 1 1 1 1 1 10
* 0 0 0 0 0 0 0 0 8
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 0 0 7
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 5
*/
#include <stdio.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#define CFSMAX_SIZE 1000
#define MAX_TIME 0.005
#ifdef SLEEPTIME
#undef SLEEPTIME
#define SLEEPTIME 0.0001
#endif
int test_omp_for_schedule_guided()
{
int * tids;
int * chunksizes;
int notout;
int maxiter;
int threads;
int i;
int result;
tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1));
maxiter = 0;
result = 1;
notout = 1;
/* Testing if enough threads are available for this check. */
#pragma omp parallel
{
#pragma omp single
{
threads = omp_get_num_threads();
}
}
/* ensure there are at least two threads */
if (threads < 2) {
omp_set_num_threads(2);
threads = 2;
}
/* Now the real parallel work:
* Each thread will start immediately with the first chunk.
*/
#pragma omp parallel shared(tids,maxiter)
{ /* begin of parallel */
double count;
int tid;
int j;
tid = omp_get_thread_num ();
#pragma omp for nowait schedule(guided)
for(j = 0; j < CFSMAX_SIZE; ++j) {
count = 0.;
#pragma omp flush(maxiter)
if (j > maxiter) {
#pragma omp critical
{
maxiter = j;
}
}
/*printf ("thread %d sleeping\n", tid);*/
#pragma omp flush(maxiter,notout)
while (notout && (count < MAX_TIME) && (maxiter == j)) {
#pragma omp flush(maxiter,notout)
my_sleep (SLEEPTIME);
count += SLEEPTIME;
#ifdef VERBOSE
printf(".");
#endif
}
#ifdef VERBOSE
if (count > 0.) printf(" waited %lf s\n", count);
#endif
/*printf ("thread %d awake\n", tid);*/
tids[j] = tid;
#ifdef VERBOSE
printf("%d finished by %d\n",j,tid);
#endif
} /* end of for */
notout = 0;
#pragma omp flush(maxiter,notout)
} /* end of parallel */
/*******************************************************
* evaluation of the values *
*******************************************************/
{
int determined_chunksize = 1;
int last_threadnr = tids[0];
int global_chunknr = 0;
int openwork = CFSMAX_SIZE;
int expected_chunk_size;
int* local_chunknr = (int*)malloc(threads * sizeof(int));
double c = 1;
for (i = 0; i < threads; i++)
local_chunknr[i] = 0;
tids[CFSMAX_SIZE] = -1;
/*
* determine the number of global chunks
*/
// fprintf(stderr,"# global_chunknr thread local_chunknr chunksize\n");
for(i = 1; i <= CFSMAX_SIZE; ++i) {
if (last_threadnr==tids[i]) {
determined_chunksize++;
} else {
/* fprintf(stderr, "%d\t%d\t%d\t%d\n", global_chunknr,
last_threadnr, local_chunknr[last_threadnr], m); */
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
/* now allocate the memory for saving the sizes of the global chunks */
chunksizes = (int*)malloc(global_chunknr * sizeof(int));
/*
* Evaluate the sizes of the global chunks
*/
global_chunknr = 0;
determined_chunksize = 1;
last_threadnr = tids[0];
for (i = 1; i <= CFSMAX_SIZE; ++i) {
/* If the threadnumber was the same as before increase the
* detected chunksize for this chunk otherwise set the detected
* chunksize again to one and save the number of the next
* thread in last_threadnr.
*/
if (last_threadnr == tids[i]) {
determined_chunksize++;
} else {
chunksizes[global_chunknr] = determined_chunksize;
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
#ifdef VERBOSE
fprintf(stderr, "found\texpected\tconstant\n");
#endif
/* identify the constant c for the exponential
decrease of the chunksize */
expected_chunk_size = openwork / threads;
c = (double) chunksizes[0] / expected_chunk_size;
for (i = 0; i < global_chunknr; i++) {
/* calculate the new expected chunksize */
if (expected_chunk_size > 1)
expected_chunk_size = c * openwork / threads;
#ifdef VERBOSE
fprintf(stderr, "%8d\t%8d\t%lf\n", chunksizes[i],
expected_chunk_size, c * chunksizes[i]/expected_chunk_size);
#endif
/* check if chunksize is inside the rounding errors */
if (abs (chunksizes[i] - expected_chunk_size) >= 2) {
result = 0;
#ifndef VERBOSE
fprintf(stderr, "Chunksize differed from expected "
"value: %d instead of %d\n", chunksizes[i],
expected_chunk_size);
return 0;
#endif
} /* end if */
#ifndef VERBOSE
if (expected_chunk_size - chunksizes[i] < 0)
fprintf(stderr, "Chunksize did not decrease: %d"
" instead of %d\n", chunksizes[i],expected_chunk_size);
#endif
/* calculating the remaining amount of work */
openwork -= chunksizes[i];
}
}
return result;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_guided()) {
num_failed++;
}
}
return num_failed;
}
|
blas_l1_kernel.c
|
/*******************************************************************************
* Copyright 2019 UChicago Argonne, LLC.
* (c.f. AUTHORS, LICENSE)
*
* This file is part of the AML project.
* For more info, see https://github.com/anlsys/aml
*
* SPDX-License-Identifier: BSD-3-Clause
******************************************************************************/
/*
* This is a benchmark for the BLAS Level 1 operations for AML.
*/
#include "blas_l1_kernel.h"
/* Look into another way to define these */
#define sign(a) ((a > 0) ? 1 : ((a < 0) ? -1 : 0))
double dasum(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*b;
(void)*c;
(void)scalar;
size_t i;
double dasum = 0;
for (i = 0; i < n; i++) {
dasum = dasum + fabs(a[i]);
}
return dasum;
}
double daxpy(size_t n, double *a, double *b, double *c, double scalar)
{
size_t i;
#pragma omp parallel for
for (i = 0; i < n; i++)
c[i] = b[i] + scalar * a[i];
return 1;
}
double dcopy(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*c;
(void)scalar;
size_t i;
#pragma omp parallel for
for (i = 0; i < n; i++)
b[i] = a[i];
return 1;
}
double ddot(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*c;
(void)scalar;
size_t i;
long double dot = 0.0;
#pragma omp parallel for reduction(+ : dot)
for (i = 0; i < n; i++) {
long double temp;
temp = a[i] * b[i];
dot += temp;
}
return (double)dot;
}
double dnrm2(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*b;
(void)*c;
(void)scalar;
size_t i;
double scale, ssq, temp;
scale = 0.0;
ssq = 1.0;
for (i = 0; i < n; i++) {
if (a[i] != 0.0) {
temp = fabs(a[i]);
if (scale < temp) {
ssq = 1.0 + ssq * pow(scale / temp, 2);
scale = temp;
} else
ssq = ssq + pow(temp / scale, 2);
}
}
return scale * sqrt(ssq);
}
double dscal(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*c;
size_t i;
#pragma omp parallel for
for (i = 0; i < n; i++)
b[i] = scalar * a[i];
return 1;
}
double dswap(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*c;
(void)scalar;
size_t i;
#pragma omp parallel for
for (i = 0; i < n; i++) {
double temp = a[i];
a[i] = b[i];
b[i] = temp;
}
return 1;
}
double idmax(size_t n, double *a, double *b, double *c, double scalar)
{
(void)*b;
(void)*c;
(void)scalar;
if (n == 1)
return 0;
size_t i;
double max;
size_t id_max = 0;
max = a[0];
for (i = 1; i < n; i++) {
if (fabs(a[i]) > max) {
id_max = i;
max = fabs(a[i]);
}
}
return id_max;
}
/* The rotations. Not included in the array of functions because of their
parameters */
/* Plane rotation */
void drot(size_t n, double *a, double *b, double x, double y)
{
size_t i;
#pragma omp parallel for
for (i = 0; i < n; i++) {
double temp = x * a[i] + y * b[i];
b[i] = x * b[i] - y * a[i];
a[i] = temp;
}
}
/* Create a plane rotation. TODO: Verify */
void drotg(double x, double y, double c, double s)
{
double r, roe, scale, z;
roe = y;
if (fabs(x) > fabs(y))
roe = x;
scale = fabs(x) + fabs(y);
if (scale == 0.0) {
c = 1.0;
s = 0.0;
r = 0.0;
z = 0.0;
} else {
r = scale * sqrt(pow(x / scale, 2) + pow(y / scale, 2));
r = sign(roe) * r;
c = x / r;
s = y / r;
z = 1.0;
if (fabs(x) > fabs(y))
z = s;
if (fabs(y) >= fabs(x) && c != 0.0)
z = 1.0 / c;
}
x = r;
y = z;
}
void drotm(size_t n, double *a, double *b, double *param)
{
double flag, h11, h12, h21, h22;
size_t i;
flag = param[0];
if (flag < 0.0) {
h11 = param[1];
h12 = param[3];
h21 = param[2];
h22 = param[4];
} else {
if (flag == 0) {
h11 = 1.0;
h12 = param[3];
h21 = param[2];
h22 = 1.0;
} else {
h11 = param[1];
h12 = 1.0;
h21 = -1.0;
h22 = param[4];
}
}
#pragma omp parallel for
for (i = 0; i < n; i++) {
double w = a[i];
double z = b[i];
a[i] = w * h11 + z * h12;
b[i] = w * h21 + z * h22;
}
}
/* TODO: Verify */
void drotmg(double d1, double d2, double x, double y, double *param)
{
double flag, h11, h12, h21, h22, p1, p2, q1, q2, temp, u, gam, gamsq,
rgamsq;
gam = 4096.0;
gamsq = 16777216.0;
rgamsq = 5.9604645e-8;
/* default initialization */
h11 = 0.0;
h12 = 0.0;
h21 = 0.0;
h22 = 0.0;
if (d1 < 0) {
flag = -1.0;
d1 = 0.0;
d2 = 0.0;
x = 0.0;
} else {
p2 = d2 * y;
if (p2 == 0) {
flag = -2.0;
param[0] = flag;
}
p1 = d1 * x;
q2 = p2 * y;
q1 = p1 * x;
if (fabs(q1) > fabs(q2)) {
h21 = -y / x;
h12 = p2 / p1;
u = 1.0 - h12 * h21;
if (u > 0) {
flag = 0.0;
d1 = d1 / u;
d2 = d2 / u;
x = x * u;
}
} else {
if (q2 < 0.0) {
flag = -1.0;
d1 = 0.0;
d2 = 0.0;
x = 0.0;
} else {
flag = 1.0;
h11 = p1 / p2;
h22 = x / y;
u = 1.0 + h11 * h22;
temp = d2 / u;
d2 = d1 / u;
d1 = temp;
x = y * u;
}
}
if (d1 != 0.0) {
while (fabs(d1) <= rgamsq || d1 >= gamsq) {
if (flag == 0.0) {
h11 = 1.0;
h22 = 1.0;
} else {
h21 = -1.0;
h12 = 1.0;
}
flag = -1.0;
if (d1 <= rgamsq) {
d1 = d1 * pow(gam, 2);
x = x / gam;
h11 = h11 / gam;
h12 = h12 / gam;
} else {
d1 = d1 / pow(gam, 2);
x = x * gam;
h11 = h11 * gam;
h12 = h12 * gam;
}
}
}
if (d2 != 0) {
while (fabs(d2) <= rgamsq || fabs(d2) >= gamsq) {
if (flag == 0.0) {
h11 = 1.0;
h22 = 1.0;
} else {
h21 = -1.0;
h12 = 1.0;
}
flag = -1.0;
if (fabs(d2) <= rgamsq) {
d2 = d2 * pow(gam, 2);
h21 = h21 / gam;
h22 = h22 / gam;
} else {
d2 = d2 / pow(gam, 2);
h21 = h21 * gam;
h22 = h22 * gam;
}
}
}
}
param[1] = h11;
param[2] = h21;
param[3] = h12;
param[4] = h22;
param[0] = flag;
}
|
image.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->alpha_trait=image->alpha_trait;
clone_image->channels=image->channels;
clone_image->mask_trait=image->mask_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
switch (type)
{
case ReadPixelMask:
{
if ((image->channels & ReadMaskChannel) == 0)
return((Image *) NULL);
break;
}
case WritePixelMask:
{
if ((image->channels & WriteMaskChannel) == 0)
return((Image *) NULL);
break;
}
default:
{
if ((image->channels & CompositeMaskChannel) == 0)
return((Image *) NULL);
break;
}
}
mask_image=AcquireImage((ImageInfo *) NULL,exception);
status=SetImageExtent(mask_image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(mask_image));
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case ReadPixelMask:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MagickPathExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),option,(size_t)
(MagickPathExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(option)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
size_t
length;
ssize_t
y;
void
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset(pixels,0,length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum));
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: the level of transparency: 0 is fully transparent and QuantumRange
% is fully opaque.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType))))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*component != '\0') && (IsGlob(component) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,component);
else
GetPathComponent(image_info->filename,SubcanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to seekable temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) memset(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic cache.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelCompositeMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=QuantumRange;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=(Quantum) 0;
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,pixel,q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelCompositeMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
data_augmentation.h
|
/*
Copyright (c) 2019, Sanaxen
All rights reserved.
Use of this source code is governed by a MIT license that can be found
in the LICENSE file.
*/
#ifndef _DATA_AUGMANTATION_H
#define _DATA_AUGMANTATION_H
namespace cpp_torch
{
namespace test
{
void Image3CannelDataAugment(std::vector<tiny_dnn::vec_t>& train_images, std::vector<tiny_dnn::label_t>& train_labels, const float_t mean, const float_t stddiv, const int image_height, const int image_width, int extend_factor=2, float channel_range = CHANNEL_RANGE)
{
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<> rand(0, 5);
std::uniform_int_distribution<> rand_index(0, train_images.size() - 1);
const size_t sz = train_images.size();
for (int i = 0; i < sz * extend_factor; i++)
{
const int index = rand_index(mt);
tiny_dnn::vec_t& u = train_images[index];
//{
// cpp_torch::Image& bmp = cpp_torch::vec_t2image(u, 3, image_height, image_width);
// cpp_torch::ImageWrite("aaa.bmp", &bmp);
// //exit(0);
//}
std::string func = "";
switch (rand(mt))
{
case 0:func = "GAMMA"; break;
case 1:func = "RL"; break;
case 2:func = "COLOR_NOIZE"; break;
case 3:func = "NOIZE"; break;
case 4:func = "ROTATION"; break;
case 5:func = "SIFT"; break;
}
cpp_torch::ImageAugmentation(u, image_height, image_width, func);
tiny_dnn::vec_t v(u.size());
transform(u.begin(), u.end(), v.begin(),
[=](float_t c) {return (c / channel_range); }
);
train_images.push_back(v);
train_labels.push_back(train_labels[index]);
//{
// tiny_dnn::vec_t v2(v.size());
// transform(v.begin(), v.end(), v2.begin(),
// [=](float_t c) {return (c * channel_range); }
// );
// cpp_torch::Image& bmp = cpp_torch::vec_t2image(v2, 3, image_height, image_width);
// cpp_torch::ImageWrite("bbb.bmp", &bmp);
// exit(0);
//}
}
const size_t sz2 = train_images.size();
#pragma omp parallel for
for (int i = 0; i < sz2; i++)
{
for (int j = 0; j < train_images[i].size(); j++)
{
train_images[i][j] = (train_images[i][j] - mean) / stddiv;
}
}
printf("Augmentation:%d -> %d\n", sz, sz2);
}
}
}
#endif
|
GB_binop__isge_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint32)
// A*D function (colscale): GB (_AxD__isge_uint32)
// D*A function (rowscale): GB (_DxB__isge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint32)
// C=scalar+B GB (_bind1st__isge_uint32)
// C=scalar+B' GB (_bind1st_tran__isge_uint32)
// C=A+scalar GB (_bind2nd__isge_uint32)
// C=A'+scalar GB (_bind2nd_tran__isge_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT32 || GxB_NO_ISGE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
triplet_kpoint.c
|
/* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <stdlib.h>
#include <mathfunc.h>
#include <kpoint.h>
#include <kgrid.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_kpoint.h>
#define KPT_NUM_BZ_SEARCH_SPACE 125
static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static void grid_point_to_address_double(int address_double[3],
const size_t grid_point,
const int mesh[3],
const int is_shift[3]);
static size_t get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const MatINT * rot_reciprocal,
const int swappable);
static size_t get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3]);
static int get_third_q_of_triplets_at_q(int bz_address[3][3],
const int q_index,
const size_t *bz_map,
const int mesh[3],
const int bzmesh[3]);
static void modulo_i3(int v[3], const int m[3]);
size_t tpk_get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const MatINT * rotations,
const int swappable)
{
int num_ir;
MatINT *rot_reciprocal;
rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal);
num_ir = get_ir_triplets_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
rot_reciprocal,
swappable);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
size_t tpk_get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
return get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
}
static size_t get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const MatINT * rot_reciprocal,
const int swappable)
{
size_t i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point;
int mesh_double[3], is_shift[3];
int address_double0[3], address_double1[3], address_double2[3];
size_t *ir_grid_points, *third_q;
double tolerance;
double stabilizer_q[1][3];
MatINT *rot_reciprocal_q;
ir_grid_points = NULL;
third_q = NULL;
rot_reciprocal_q = NULL;
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
num_grid = mesh[0] * mesh[1] * (size_t)mesh[2];
for (i = 0; i < 3; i++) {
/* Only consider the gamma-point */
is_shift[i] = 0;
mesh_double[i] = mesh[i] * 2;
}
/* Search irreducible q-points (map_q) with a stabilizer */
/* q */
grid_point_to_address_double(address_double0, grid_point, mesh, is_shift);
for (i = 0; i < 3; i++) {
stabilizer_q[0][i] =
(double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]);
}
rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
1,
stabilizer_q);
num_ir_q = kpt_get_dense_irreducible_reciprocal_mesh(grid_address,
map_q,
mesh,
is_shift,
rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal_q);
rot_reciprocal_q = NULL;
third_q = (size_t*) malloc(sizeof(size_t) * num_ir_q);
ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_ir_q);
num_ir_q = 0;
for (i = 0; i < num_grid; i++) {
if (map_q[i] == i) {
ir_grid_points[num_ir_q] = i;
num_ir_q++;
}
}
for (i = 0; i < num_grid; i++) {
map_triplets[i] = num_grid; /* When not found, map_triplets == num_grid */
}
#pragma omp parallel for private(j, address_double1, address_double2)
for (i = 0; i < num_ir_q; i++) {
grid_point_to_address_double(address_double1,
ir_grid_points[i],
mesh,
is_shift); /* q' */
for (j = 0; j < 3; j++) { /* q'' */
address_double2[j] = - address_double0[j] - address_double1[j];
}
third_q[i] = kgd_get_dense_grid_point_double_mesh(address_double2, mesh);
}
num_ir_triplets = 0;
if (swappable) { /* search q1 <-> q2 */
for (i = 0; i < num_ir_q; i++) {
ir_grid_point = ir_grid_points[i];
q_2 = third_q[i];
if (map_triplets[map_q[q_2]] < num_grid) {
map_triplets[ir_grid_point] = map_triplets[map_q[q_2]];
} else {
map_triplets[ir_grid_point] = ir_grid_point;
num_ir_triplets++;
}
}
} else {
for (i = 0; i < num_ir_q; i++) {
ir_grid_point = ir_grid_points[i];
map_triplets[ir_grid_point] = ir_grid_point;
num_ir_triplets++;
}
}
#pragma omp parallel for
for (i = 0; i < num_grid; i++) {
map_triplets[i] = map_triplets[map_q[i]];
}
free(third_q);
third_q = NULL;
free(ir_grid_points);
ir_grid_points = NULL;
return num_ir_triplets;
}
static size_t get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
size_t i, num_ir;
int j, k;
int bz_address[3][3], bz_address_double[3], bzmesh[3];
size_t *ir_grid_points;
ir_grid_points = NULL;
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
}
num_ir = 0;
ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_map_triplets);
for (i = 0; i < num_map_triplets; i++) {
if (map_triplets[i] == i) {
ir_grid_points[num_ir] = i;
num_ir++;
}
}
#pragma omp parallel for private(j, k, bz_address, bz_address_double)
for (i = 0; i < num_ir; i++) {
for (j = 0; j < 3; j++) {
bz_address[0][j] = bz_grid_address[grid_point][j];
bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j];
bz_address[2][j] = - bz_address[0][j] - bz_address[1][j];
}
for (j = 2; j > -1; j--) {
if (get_third_q_of_triplets_at_q(bz_address,
j,
bz_map,
mesh,
bzmesh) == 0) {
break;
}
}
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
bz_address_double[k] = bz_address[j][k] * 2;
}
triplets[i][j] =
bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)];
}
}
free(ir_grid_points);
ir_grid_points = NULL;
return num_ir;
}
static int get_third_q_of_triplets_at_q(int bz_address[3][3],
const int q_index,
const size_t *bz_map,
const int mesh[3],
const int bzmesh[3])
{
int i, j, smallest_g, smallest_index, sum_g, delta_g[3];
size_t prod_bzmesh;
size_t bzgp[KPT_NUM_BZ_SEARCH_SPACE];
int bz_address_double[3];
prod_bzmesh = (size_t)bzmesh[0] * bzmesh[1] * bzmesh[2];
modulo_i3(bz_address[q_index], mesh);
for (i = 0; i < 3; i++) {
delta_g[i] = 0;
for (j = 0; j < 3; j++) {
delta_g[i] += bz_address[j][i];
}
delta_g[i] /= mesh[i];
}
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
for (j = 0; j < 3; j++) {
bz_address_double[j] = (bz_address[q_index][j] +
bz_search_space[i][j] * mesh[j]) * 2;
}
bzgp[i] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double,
bzmesh)];
}
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
if (bzgp[i] != prod_bzmesh) {
goto escape;
}
}
escape:
smallest_g = 4;
smallest_index = 0;
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
if (bzgp[i] < prod_bzmesh) { /* q'' is in BZ */
sum_g = (abs(delta_g[0] + bz_search_space[i][0]) +
abs(delta_g[1] + bz_search_space[i][1]) +
abs(delta_g[2] + bz_search_space[i][2]));
if (sum_g < smallest_g) {
smallest_index = i;
smallest_g = sum_g;
}
}
}
for (i = 0; i < 3; i++) {
bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i];
}
return smallest_g;
}
static void grid_point_to_address_double(int address_double[3],
const size_t grid_point,
const int mesh[3],
const int is_shift[3])
{
int i;
int address[3];
#ifndef GRID_ORDER_XYZ
address[2] = grid_point / (mesh[0] * mesh[1]);
address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0];
address[0] = grid_point % mesh[0];
#else
address[0] = grid_point / (mesh[1] * mesh[2]);
address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2];
address[2] = grid_point % mesh[2];
#endif
for (i = 0; i < 3; i++) {
address_double[i] = address[i] * 2 + is_shift[i];
}
}
static void modulo_i3(int v[3], const int m[3])
{
int i;
for (i = 0; i < 3; i++) {
v[i] = v[i] % m[i];
if (v[i] < 0) {
v[i] += m[i];
}
}
}
|
ast-dump-openmp-target-parallel-for.c
|
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target parallel for
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target parallel for
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target parallel for collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetParallelForDirective {{.*}} <line:4:9, col:32>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetParallelForDirective {{.*}} <line:10:9, col:32>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetParallelForDirective {{.*}} <line:17:9, col:44>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:33, col:43>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:42> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:42> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetParallelForDirective {{.*}} <line:24:9, col:44>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:33, col:43>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:42> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:42> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetParallelForDirective {{.*}} <line:31:9, col:44>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:33, col:43>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:42> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:42> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
p99.h
|
/* This may look like nonsense, but it really is -*- mode: C -*- */
/* */
/* Except for parts copied from previous work and as explicitly stated below, */
/* the authors and copyright holders for this work are as follows: */
/* (C) copyright 2010-2012 Jens Gustedt, INRIA, France */
/* (C) copyright 2012 William Morris */
/* */
/* This file is free software; it is part of the P99 project. */
/* You can redistribute it and/or modify it under the terms of the QPL as */
/* given in the file LICENSE. It is distributed without any warranty; */
/* without even the implied warranty of merchantability or fitness for a */
/* particular purpose. */
/* */
#ifndef P99_H_
# define P99_H_
/**
** @file
** @brief Use this to include all of P99.
**/
/**
** @mainpage P99 - Preprocessor macros and functions for C99 and C11
**
** P99 is a suite of macro and function definitions that ease
** programming in modern C, aka C99. By using new facilities in C99 we
** implement default arguments for functions, scope bound resource
** management, transparent allocation and initialization, ...
**
** By using special features of some compilers and operating systems,
** we also are able to provide an almost feature complete emulation
** of the new C standard, C11.
**
** @section introduction Macros and inline functions working together
**
** In C, functions (whether @c inline or not) and macros fulfill
** different purposes. Their difference should not be seen as
** ideological as some seem to take it, and what is even more
** important, they may work nicely together.
**
** Macros are text replacement that is done at compile time and they
** can do things like ::P99_SIGNED(EXPR) which is defined by P99. That
** macro takes an expression as an argument, and tells at compile time
** of whether or not the integral type of @c EXPR is signed or
** not. Additionally it guarantees that @c EXPR itself is not
** evaluated at run time (so there are no side effects), but that
** only its type is taken into consideration.
**
** Such an example shows that macros are ideally used when the type
** of an expression is to be determined and you want to act
** accordingly to that type. On the other hand, the pitfall with
** macros is that their arguments may be evaluated several times,
** which is bad because of side effects.
**
** Functions on the other hand are typed, which makes them more
** strict or, phrased negatively, less flexible. Consider the
** function
**
** @code
** inline
** uintmax_t p00_abs_signed(intmax_t a) {
** return (a < 0) ? -(uintmax_t)a : (uintmax_t)a;
** }
** @endcode
**
** It takes signed integer value @c a and computes its absolute
** value. Observe that the return type of this function is
** unsigned. This has to be so, since otherwise not all valid values
** could be realized.
**
** @c p00_abs_signed would not be a good candidate for a macro, since
** @c a is evaluated twice in the expression; once in the controlling
** expression and once for returning its value or its negation.
**
** We may use this function with any integral type, but then the
** result would probably not be what a naive programmer would expect if
** the argument is a large unsigned value. The argument will be
** promoted to @c intmax_t. If the value @c X that is
** passed to the call is positive and greater than <code> INTMAX_MAX
** = 2<sup>N</sup> -1</code>, the result is probably not what we'd
** expect.@fntm 1@efntm If the conversion to
** @c intmax_t doesn't result in a range error thrown by the run time
** system (it would be permitted to do so), the argument @a a of the
** function would receive the negative value @c -C where @c C is @c
** 2<sup>N</sup> - X. The result of the function call would then be
** @c C and not @c X.
**
** With the following macro we get rid of these restrictions by
** combining the macro and the function:
**
** @code
** #define P99_ABS(EXPR) (P99_SIGNED(EXPR) ? p00_abs_signed(EXPR) : (EXPR))
** @endcode
**
** This has the following properties
**
** <ul>
** <li>For any integral type it returns the correct result.
** @fntm 2
** @efntm
** </li>
** <li>The argument @c EXPR is evaluated exactly once.</li>
** <li>Any recent and decent compiler will create
** @ref inline "optimal code" for that combined macro.
** </li>
** </ul>
**
** In that spirit, P99 aims to provide utilities that often combine
** macros and @c inline funcions and that are only possible with the
** @ref c99 "features"
** that come with C99 (read ANSI C as normalized in 1999)
** and that were absent in C89. The features include among others
** - @ref variadic
** - @ref inline
** - @ref initializers
** - @ref compound
** - @ref hide
** - @ref pragma
**
** On some platforms, P99 is also able to emulate the main features
** that come with @link C11, the newest C standard, C11:@endlink
** - @ref generic
** - @ref atomic
** - @ref threads
**
** With all these features it implements @ref utilities "utilities" that
** previously had not been possible to program in C (or C macros) or
** that were very difficult to implement
** - @ref defaults
** - @ref blocks
** - @ref condi
** - @ref alloc
**
** P99 also provides numerous facilities for
** @ref programming "macro programming."
**
** P99 is not a C library in the classical sense but merely a
** collection of include files:
** - There is no binary library to be linked to your
** executable. The few functions that are provided are small
** wrappers that are compiled directly into your code.
** - There is nothing to configure, P99 include files should work
** out of the box with any conforming C99 compiler.
**
** @fnt 1 @efnt
** Here <code>N+1</code> is the width of @c uintmax_t,
** assuming most common representations of signed integers.
** @fnt 2 @efnt
** Well, there is exactly one exception to that: on systems where @c
** -INTMAX_MIN is not representable in @c uintmax_t, this same value
** may cause problems.
**
** @section credits Credits and Rights
** @subsection author Author and Maintainer
** @author <a href="http://www.loria.fr/~gustedt/">Jens Gustedt</a>
** @date 2010 - 2012
**
** @subsection contributor Contribution
** @author <a href="http://basyl.co.uk/">William Morris</a> proof reading
** @date 2012
**
** @subsection version Version
**
** The version this documentation describes can be identified
** via the macros ::P99_VERSION_DATE, namely $Format:%cd$. It also is
** tagged with an hexadecimal ID tag that is given in
** ::P99_VERSION_ID, namely $Format:%H$.
**
** @subsection copyright Copyright
** Copyright © 2010-2012 Jens Gustedt, INRIA, France, http://www.inria.fr/
**
** @htmlinclude SHORTLICENCE-open.txt
**
** @subsection license License
**
** <pre>
**@verbinclude LICENSE-QPL.txt
** </pre>
**/
/**
** @page conventions Programming conventions
**
** P99 uses some programming conventions that might be interesting
** for projects that include its header files.
**
** -# @ref standardconformance
** -# @ref OSindependence
** -# @ref prefixes
** -# @ref variableInit
** -# @ref temporaries
**
** @section standardconformance Standard conformance
**
** Where we can, we try to conform to the C99 standard and to
** mark extensions clearly, if we use them.
**
** @subsection UB Undefined behavior
**
** The C specification has many places where it explicitly says that
** under certain circumstances the behavior of the resulting code is
** undefined. Generally this means that a conforming C implementation
** is not obliged to capture such circumstances and for code that
** uses such undefined behavior might do anything, from
** do-the-right-thing or crashing to eating your hard drive.
**
** P99 should not produce any such undefined behavior.
**
** @subsection IB Implementation specific behavior
**
** In other places the standard leaves room for C implementations to
** specify certain behavior.
**
** P99 tries not use any special feature that might be the result of
** such implementation specific behavior. This concerns in particular
** arithmetic on integer types. Here the standard allows certain
** variations:
**
** - padding bits: integer types may have padding bits that do not
** count towards their width (# of significant bits) but do count towards
** their size (storage requirement). So generally we have to be careful
** to not use expressions that use @c sizeof expressions for
** shifts.
**
** - encoding of signed types: C99 allows three different encodings
** for signed integers. We do not assume any of these encodings
** but build macros that are valid for all of them.
**
** - signed under- and overflow: arithmetic on signed integer types
** may under- or overflow and C99 leaves it to the implementation
** whether or not this silently wraps around or triggers a
** signal. All expressions that involve signed types should be
** such that they avoid this implementation specific behavior. E.g
** to compute the absolute value of a negative @c int @c a we
** would use @c -(unsigned)a. This expression guarantees
** that the result is well defined even for corner cases (here @c
** a being @c INT_MIN in two's complement representation) and will
** never trigger a range error.
**
** - We do not suppose the presence of the @c typedefs @c uintptr_t
** or @c intptr_t since they are optional in C. In particular we
** may not assume that there is any sensible conversion between
** pointers and integer types.
**
** @section prefixes Defining identifiers
**
** Macro names that implement the functionality of P99 are generally
** uppercase. Exceptions from that rule are @ref hide.
** All other identifiers are lowercase.
**
** P99 uses the common prefixes @c P99_ and @c p99_ for macros and
** other identifiers, respectively.
** Future P99 versions could define new identifiers with
** these prefixes. If you include any of the P99 files,
** avoid using these prefixes for your own identifiers.
**
** The same rule holds for the prefixes @c P00_and @c p00_ which are used
** for auxilliary identifiers that need not be
** documented. Such identifiers are ignored in the doxygen
** documentation.
**
** @section OSindependence Operating system independence
**
** The P99 macros and functions as such should be independent of the
** execution system and compiler. Nevertheless, for the time being
** they are only tested on POSIX systems, namely Linux. So if
** problems are discovered with other systems, please let
** us know.
**
** In contrast to that general policy, there is @em one file that is
** dependent on the system, p99_posix_default.h. As the name
** indicates it is designed for POSIX systems and provides default
** arguments for some POSIX functions.
**
** Also, some of the examples throughout this documentation are taken
** from programs that would typically run on POSIX systems. We hope
** that such examples are obvious and don't need
** explanation for programmers of other systems.
**
** @section variableInit Variable initialization
**
** Where possible, P99 uses initializers to initialize variables. For
** each type @c T where such an initialization is possible, there
** should be a macro @c T_INITIALIZER that does a standard
** initialization. Such a macro should use the @ref initializers
** scheme.
**
** @code
** typedef struct toto toto;
** struct toto { double a; unsigned b; };
** #define TOTO_INITIALIZER { .a = 0.0; .b = 0u }
** @endcode
**
** In case you want the default behavior of C, namely that
** all fields are recursively initialized with @c 0 then you could
** just use
** @code
** #define TOTO_INITIALIZER P99_INIT
** @endcode
** to make this choice explicit.
**
** Such initializers can easily be assembled together
** @code
** typedef struct tutu tutu;
** struct tutu { toto A; bool c; };
** #define TUTU_INITIALIZER(VAL) { .A = TOTO_INITIALIZER, .c = (VAL) }
** @endcode
**
** As you can see in this example, @c INITIALIZER can be a `normal'
** macro or a function like macro.
**
** For dynamic initialization we assume that an `init' function
** exists that
** - takes a pointer as a first argument
** - tests for the validity of that pointer, and
** - returns exactly the same pointer
** @code
** toto* toto_init(toto* t) {
** // assign from a compound literal
** if (t) *t = (toto)TOTO_INITIALIZER;
** return t;
** }
** tutu* tutu_init(tutu* t, bool val) {
** if (t) {
** toto_init(&(t->A));
** t->c = val;
** }
** return t;
** }
** @endcode
**
** @section temporaries Use of temporary lvalues
**
** Often when programming utilities for C that are supposed to return
** a pointer to an array or structure, the question of who is
** allocating the space arises: the caller or the callee.
**
** P99 goes a different way, in that it tries to remove
** most of the burden from the programmer of both caller and callee.
** Let us look at the hypothetical function
** @code
** char const* hostname(char buffer[], size_t len);
** @endcode
**
** which could be defined as being similar to the POSIX @c
** gethostname function, only that it doesn't return an error
** indicator but a pointer to the name or a null pointer if it fails. An old
** time (and dangerous!) calling convention for such a function would
** perhaps have been to return a statically allocated buffer in case
** that the @c buffer argument is a null pointer.
**
** P99 lets you define more convenient and less dangerous calling
** conventions:
** @ref defaults
** allows us to define a
** @ref hide "macro of the same name"
** that uses a
** @ref compound "compound litteral"
** if no argument is given to the same function.
**
** @code
** #define hostname(...) P99_CALL_DEFARG(hostname, 2, __VA_ARGS__)
** #define hostname_defarg_0() P99_LVAL(char[HOSTNAME_MAX])
** #define hostname_defarg_1() HOST_NAME_MAX
** @endcode
**
** This defines three different macros. One that is used where
** the programmer places a call to @c hostname. The other two, @c
** hostname_defarg_0 and @c hostname_defarg_1, are used by the macro
** @c hostname when the respective arguments are left out.
**
** Now @c hostname can be used in three different ways.
** <ol>
** <li>Such that
** the caller is responsible and obtains space on the heap:
** @code
** char const*const host = hostname(malloc(mylen), mylen);
** .
** free(host);
** @endcode
** </li>
** <li>Such that the caller initializes its own variable that has a
** storage class that best fits its needs:
** @code
** char host[mylen];
** .
** hostname(host, mylen);
** @endcode
** </li>
** <li>
** Or such that the space is allocated on the stack of the current
** call scope:
** @code
** char const*const host = hostname();
** @endcode
** </li>
** </ol>
**
** The later is then equivalent to
** @code
** char tmp[HOSTNAME_MAX] = { 0 };
** char const*const host = hostname(tmp, HOSTNAME_MAX);
** @endcode
** but without leaving a non-const access to the contents of @c tmp.
**
**
** It uses a temporary value that is only valid inside the block in
** which the @c get_hostname macro is expanded. The handling of this
** temporary is implicit; neither the caller nor the callee have to
** worry of allocating or deallocating it. On the calling side this
** convention is simple to use without having the callee expose a
** static buffer.
**
** In P99, it is currently applied in a few places, in particular in the
** header file "p99_posix_default.h". Its use will probably
** grow in future releases.
**/
/**
** @page utilities Implemented utilities
**
** P99 implements many different features through macros and
** functions, too many to mention explicitly in such an overview. You
** will find a structured hierarchy of descriptions below the
** "Modules" tag and the documentation of individual items under
** "Files" -> "Globals". Here we will introduce some main features:
**
** -# @ref defaults
** -# @ref blocks
** -# @ref for
** -# @ref condi
** -# @ref alloc
** -# @ref secC11
** -# @ref secGeneric
** -# @ref secAtomic
** -# @ref secThreads
**
** @section defaults Default arguments to functions
**
** In section @ref temporaries we saw a way to provide default
** arguments to functions by overloading them with macros. The
** general declaration pattern here is as follows
**
** @code
** #define NAME(...) P99_CALL_DEFARG(NAME, N, __VA_ARGS__)
** @endcode
**
** Where @c NAME becomes the name of a macro and where we also
** suppose that there is already a function of the same name @c NAME.
**
** The default value for the ::hostname macro above was produced by a
** macro, namely @c hostname_defarg_0. The evaluation of the
** default value is done in the context of the call and not in the
** context of the declaration. For default arguments that are not
** constants but expressions that have to be evaluated this is a
** major difference to C++. There, default arguments are always
** evaluated in the context of the declaration.
**
** The convention here is simple:
** - when called, ::P99_CALL_DEFARG replaces each argument M
** (counting starts at 0) that is not
** provided by the tokens
** @code
** NAME ## _defarg_ ## M ()
** @endcode
** that is a concatenation of @c NAME with the token @c _defarg_
** and the decimal number @c M
** - "not provided" here means either
** - leaving an empty place in an argument list
** - giving fewer arguments than @c N
** - to be valid C code this name must then either
** -# itself be a macro that is then expanded
** -# be a valid function call that can be interpreted by the
** compiler
**
** As we have seen in the example (a) is computed in the context of
** the caller. This let us simply use a temporary (here a local
** @ref compound "compound literal") that was thus valid in that context.
**
** To obtain the same behavior as for C++, namely to provide a
** default argument that is evaluated at the place of declaration and
** not at the place of the call we have to use (b), a function call.
** This will be as efficient as a macro call if we use @ref inline for
** that purpose.
**
** To ease the programming of this functional approach, P99 provides
** some machinery. We need three things as in the following example:
** @code
** P99_PROTOTYPE(rand48_t *, rand48_t_init, rand48_t*, unsigned short, unsigned short, unsigned short);
** #define rand48_t_init(...) P99_CALL_DEFARG(rand48_t_init, 4, __VA_ARGS__)
** P99_DECLARE_DEFARG(rand48_t_init,
** ,
** useconds(),
** getpid(),
** atomic_fetch_add(&rand48_counter)
** );
** @endcode
**
** Namely
** - a "prototype" of the underlying function, such that P99
** knows the name of the function, the return type and the types
** of the arguments.
** - the macro definition as we have already seen
** - a declaration of the default arguments.
**
** Here in the example, a default argument is provided for positions 1 to 3
** but not for position 0. All three defaults have the type
** <code>unsigned short</code>. The above code leads to the automatic generation of three @c
** inline functions that look something like:
**
** @code
** inline
** unsigned short
** rand48_t_init_defarg_1(void) {
** return useconds();
** }
** inline
** unsigned short
** rand48_t_init_defarg_2(void) {
** return getpid();
** }
** inline
** unsigned short
** rand48_t_init_defarg_3(void) {
** return atomic_fetch_add(&rand48_counter);
** }
** @endcode
**
** This declaration and definition is placed in the context of the
** above declaration and not in the context of the caller. Thus
** the expression is evaluated in that context, and not in the
** context of the caller. In particular for the third function, this
** fixes the variable @c rand48_counter to the one that is visible at
** the point of declaration.
**
** @section blocks Scope-bound resource management with for-statements
**
** Resource management can be tedious in C. <em>E.g</em> to protect a
** critical block from simultaneous execution in a @link threads
** threaded environment @endlink you'd have to place a lock / unlock
** pair before and after that block: @code mtx_t guard;
** mtx_init(&guard);
**
** mtx_lock(&guard);
** // critical block comes here
** mtx_unlock(&guard);
** @endcode
** This is error prone as locking calls must be provided
** for each critical block. If the block is longer than a few
** lines it becomes increasingly difficult to ensure the unlocking of the
** resource, since the lock /
** unlock calls are spread at the same level as other code.
**
** Within C99 (and equally in C++, BTW) it is possible to extend the
** language in order to make this more easily visible
** and to guarantee that lock / unlock calls match. Below,
** we will give an example of a macro that will help us to write
** something like
**
** @code
** P99_PROTECTED_BLOCK(mtx_lock(&guard),
** mtx_unlock(&guard)) {
** // critical block comes here
** }
** @endcode
**
** To make this even more comfortable we have
**
** @code
** P99_MUTUAL_EXCLUDE(&guard) {
** // critical block comes here
** }
** @endcode
**
** There is an equivalent block protection that uses an ::atomic_flag
** as a spin lock. Such a spin lock uses only @link atomic atomic
** operations @endlink and can be much more efficient than
** protection through a ::mtx_t, @b if the code inside the critical
** section is really small and fast:
**
** @code
** P99_SPIN_EXCLUDE(&cat) {
** // critical block comes here
** }
** @endcode
**
** For cases where the ::atomic_flag variable would be specific to
** the block, you don't even have to define it yourself:
**
** @code
** P99_CRITICAL {
** // critical block comes here
** }
** @endcode
**
** Generally there should be no run-time performance cost for using such a
** macro. Any decent compiler will detect that the dependent code is
** executed exactly once, and thus optimize out all the control that
** has to do with our specific implementation of theses blocks.
**
** Other such block macros that can be implemented with such a technique are:
** - pre- and post-conditions
** - ensuring that some dynamic initialization of a static variable is performed exactly once
** - code instrumentation
**
** An even more sophisticated tool for scope-bound resource
** management is provided by the macro ::P99_UNWIND_PROTECT
** @code
** double toto(double x) {
** P99_UNWIND_PROTECT {
** // do something
** while (cond0) {
** for (;cond1;) {
** if (cond2) P99_UNWIND(-1);
** // preliminary return
** if (cond3) P99_UNWIND_RETURN 5.7777E-30;
** }
** }
** P99_PROTECT :
** // do some cleanup here
** // if everything went well ::p99_unwind_code has value 0 otherwise it
** // receives a value from P99_UNWIND
** }
** // regular return
** return x * x;
** }
** @endcode
**
** In this code fragment the statement ::P99_UNWIND will ensure that
** the two levels of loops are broken and that execution
** continues at the special label ::P99_PROTECT.
**
** ::P99_UNWIND_RETURN goes one step further. As for ::P99_UNWIND, it
** executes the clause after ::P99_PROTECT, but when it reaches the
** end of the ::P99_UNWIND_PROTECT scope it will return to the caller
** with a return value as specified after ::P99_UNWIND_RETURN, here
** the value @c 5.7777E-30.
**
** On certain platforms that implement enough of C11 we even now have
** try-catch clauses that are entirely implemented within
** C. ::P99_TRY and ::P99_CATCH can be used as follows
** @code
** double toto(double x) {
** P99_TRY {
** // do something
** while (cond0) {
** for (;cond1;) {
** if (cond2) P99_THROW -1;
** // preliminary return
** if (cond3) P99_UNWIND_RETURN 5.7777E-30;
** }
** }
** } P99_CATCH(int code) {
** // do some cleanup here
** // if everything went well "code" has value 0 otherwise it
** // receives a value from ::P99_TRY
** }
** // regular return
** return x * x;
** }
** @endcode
**
** The advantage of ::P99_TRY over ::P99_UNWIND is that P99_THROW
** will also work from other functions that called within the
** try-block.
**
** @section for Multidimensional arrays and parallel loops
**
** We provide some utilities to ease the programming of loop
** iterations in one or multiple dimensions. The simplest to use
** is ::P99_DO, that closely resembles a @c do loop in Fortran. It
** fixes the bounds of the iteration once, before entering the
** iteration itself.
**
** @code
** P99_DO(size_t, i, a, n, inc) {
** A[i] *= B[i-1]
** }
** @endcode
**
**
** ::P99_FORALL allows the generatation of nested for-loops over
** an arbitrary number of dimensions:
** @code
** size_t const D[3] = { 20, 17, 31 };
** P99_FORALL(D, i, j, k) {
** A[i][j][k] *= B[i][j][k];
** }
** @endcode
**
** will iterate over all combinations of @c i, @c j, @c k in the
** bounds specified by @c D.
**
** ::P99_PARALLEL_FOR, where available, will provide a parallelized
** version of a simple @c for-loop, and ::P99_PARALLEL_DO and
** ::P99_PARALLEL_FORALL implement nested parallel loops with
** otherwise the same semantics as for ::P99_DO or ::P99_FORALL,
** respectively.
**
** @section condi Preprocessor conditionals and loops
**
** P99 provides you with macro features that can become handy if you
** have to generate code repetition that might later be subject to changes.
** As examples suppose that you'd have to code something like
**
** @code
** tata = A[0]; tete = A[1]; titi = A[2]; toto = A[3];
** typedef int hui_0; typedef unsigned hui_1; typedef double hui_2;
** @endcode
**
** If, over time, there are many additions and removals to these lists,
** maintaining such code will not really be a pleasure. In P99 you
** may write equivalent statements and declarations just as
**
** @code
** P99_VASSIGNS(A, tata, tete, titi, toto);
** P99_TYPEDEFS(hui, int, unsigned, double);
** @endcode
**
** There are a handful of such predefined macros that you may look up
** under @ref statement_lists. Under the hood they all use a more
** general macro that you may yourself use to define your own macros:
** ::P99_FOR. The use of this will be described in more detail under
** @ref programming.
**
** The predefined macros from above are also able to avoid the nasty
** special case that the variadic part of the argument list is
** empty. Something like
**
** @code
** P99_VASSIGNS(A);
** P99_TYPEDEFS(hui);
** @endcode
**
** would at least cause a warning with conforming preprocessors if
** the macros were implemented directly with something like
**
** @code
** #define P99_VASSIGNS(NAME, ...) do_something_here
** #define P99_TYPEDEFS(NAME, ...) do_something_else_here
** @endcode
**
** since the variable length part should not be empty, according to
** the standard. With P99 you don't have these sort of problems, the
** above should just result in empty statements or declarations, that
** are even capable of swallowing the then superfluous semicolon at the
** end.
**
** P99 avoids this by testing for the length of the argument list as
** a whole with ::P99_NARG and by using a macro conditional
** controlled by that length. Such conditionals like ::P99_IF_EMPTY
** ensure that the preprocessor decides which of two different code
** variants the compiler will see. The fragment
**
** @code
** P99_IF_EMPTY(BLA)(special_version)(general_version)
** @endcode
**
** will expand to either @c special_version or @c general_version
** according to @c BLA. If it expands to an empty token, the first
** variant is produced, if there is at least one non-empty token the
** second version results.
**
** P99 also implements logical and arithmetic operations in the
** preprocessor. Logical operations just evaluate to the tokens @c 0
** or @c 1. Arithmetic is restricted to small decimal numbers, less
** than ::P99_MAX_NUMBER. Some examples
**
** @code
** P99_IS_EQ(int, double) ==> 0
** P99_IS_EQ(static, static) ==> 1
** P99_ADD(4, 5) ==> 9
** @endcode
**
** See @ref preprocessor_operators for more about that.
**
** @section alloc Allocation and initialization facilities
**
** Consistent initialization of variables is an important issue in
** C. P99 provides some tools to help with that, most importantly a
** macro ::P99_NEW. Therefore we have to relay on some assumptions
** that are specified in @ref variableInit, in particular that there
** is an `init' function for each type that we want to use with ::P99_NEW.
**
** For the example type of a circular list element
**
** @code
** // Forward declare struct elem and elem
** typedef struct elem elem;
** .
** .
** .
** struct elem { elem* pred; elem* succ; };
** @endcode
**
** we might want to ensure that the fields @c pred and @c succ are
** always properly initialized. An `init' function could be as follows:
** @code
** #define ELEM_INITIALIZER(HERE, PRED, SUCC) {
** .pred = (PRED) ? (PRED) : (HERE),
** .succ = (SUCC) ? (SUCC) ; (HERE),
** }
** @endcode
**
** A static initialization of a 4-element list in file scope can then be done as
** @code
** extern elem * head;
** .
** .
** static elem L0;
** static elem L1;
** static elem L2;
** static elem L3;
** static elem L0 = ELEM_INITIALIZER(&L0, &L1, &L3);
** static elem L1 = ELEM_INITIALIZER(&L1, &L0, &L2);
** static elem L2 = ELEM_INITIALIZER(&L2, &L1, &L3);
** static elem L3 = ELEM_INITIALIZER(&L3, &L2, &L0);
** head = &L0;
** @endcode
**
** Dynamic initialization of a 4-element list on the stack in function scope
** @code
** elem L[4] = {
** [0] = ELEM_INITIALIZER(&L[0], &L[1], &L[3]),
** [1] = ELEM_INITIALIZER(&L[1], &L[0], &L[2]),
** [2] = ELEM_INITIALIZER(&L[2], &L[1], &L[3]),
** [3] = ELEM_INITIALIZER(&L[3], &L[2], &L[0]),
** };
** @endcode
**
** For dynamic initialization we would then define something like this:
** @code
** elem * elem_init(elem* here, elem* there) {
** if (here) {
** if (there) {
** here->pred = there;
** here->succ = there->succ;
** there->succ = here;
** here->succ->pred = here;
** } else {
** here->pred = here;
** here->succ = here;
** }
** }
** return here;
** }
** @endcode
**
** Initializations of this type of heap variables in function scope
** can now simply look like this
** @code
** elem * a = P99_NEW(elem, P99_0(elem*));
** elem * b = P99_NEW(elem, a);
** elem * c = P99_NEW(elem, b);
** @endcode
** or
** @code
** elem * head = P99_NEW(elem, P99_NEW(elem, P99_NEW(elem, P99_0(elem*))));
** @endcode
**
** These define cyclic lists of 3 elements, well initialized and
** ready to go.
**
** In fact, the ::P99_NEW macro takes a list of arguments that may be
** arbitrarily@fntm 3@efntm
** long. It just needs the first, which must be
** the type of the object that is to be created. The others are then
** passed as supplementary arguments to the `init' function, here the
** parameter @c there.
**
** If the `init' function accepts default arguments to some
** parameters, so will ::P99_NEW. With @ref default_arguments, calls
** to ::P99_NEW may then omit the second argument:
**
** @code
** #define elem_init(...) P99_CALL_DEFARG(elem_init, 2, __VA_ARGS__)
** #define elem_init_defarg_1() P99_0(elem*)
** .
** .
** .
** elem * a = P99_NEW(elem);
** elem * head = P99_NEW(elem, P99_NEW(elem, P99_NEW(elem)));
** @endcode
**
** @fnt 3@efnt
** The number of arguments might be restricted by your compiler
** implementation. Also most of the P99 macros are limited to
** ::P99_MAX_NUMBER.
**
** @section secC11 Emulating features of C11
**
** The new C standard C11 (published in December 2011) introduces
** some new features that are already present in many compilers or
** OS, but sometimes with different syntax or interfaces. We provide
** interfaces to some of them with the intention that once compilers
** that implement C11 come out these interfaces can directly relate
** to the C11 feature.
**
** With these emulated interfaces you can already program almost as
** if you had a native C11 compiler (which doesn't yet exist) and
** take advantage of the improvements that C11 makes to the language,
** without giving up on portability in the real world of today's
** compilers.
**
** @subsection secGeneric Type generic macros
**
** C11 provides a new feature to "overload" macros and more generally
** the result of any type of expression, @c _Generic. It allows to
** write template-like expressions with the macro preprocessor. The
** idea is to generate <em>type generic mathematical function</em>
** that already had been present in C99:
**
** If you include the "tgmath.h" header you have a macro @c sin that
** implements calls to the family of sine functions, e.g
**
** @code
** double complex z0 = sin(1.0); // invokes the @em function @c sin
** double complex z1 = sin(2.0 + 3*I); // invokes the function @c csin
** @endcode
**
** At compile, these type generic macros decide from the @em type of
** the argument which function call to emit.
**
** The new concept of @c _Generic expressions generalizes this
** concept. From the usually public domain compilers at the time of
** this writing (Apr 2012) only @c clang implements this feature
** already. On the other hand @c gcc has extension that can be used
** to emulate it, and such an emulation is provided through
** ::P99_GENERIC.
**
** @subsection secAtomic Atomic operations
**
** Atomic operations are an important contribution of the new
** standard; these operations are implemented on all commodity CPU
** nowadays but a direct interface in a higher programming language
** was missing.
**
** These operations give guarantees on the coherence of data accesses
** and other primitive operations, even in presence of @em
** races. Such races may occur between different threads (see below)
** of the same application or when a program execution is
** interrupted, e.g for a signal handler or a @c longjmp call. Since
** most instructions on modern CPU are composed of several
** micro-instructions, in such a context an instruction may only
** succeed partially and a data may end up in a intermediate state.
**
** In this example
**
** @code
** static _Atomic(size_t) n = 0;
** atomic_fetch_and_add(&n, 1);
** // do something in here
** atomic_fetch_and_sub(&n, 1);
** @endcode
**
** the variable @c n is always in a clean state: either the addition
** of @c has taken place or it has not. Multiple threads can execute
** this code without locking a mutex or so, the value of @c n will
** always be well defined.
**
** One of the interesting concepts that come with C11 is
** ::atomic_flag, that is a simple interface that can implement
** spinlocks quite efficiently.
**
** @subsection secThreads Threads
**
** Atomic operations have their major advantage in the presence of
** threads, that is multiple entities that compute concurrently
** inside the same application and using a common address space. C11
** provides an optional interface for threads and the principal data
** structures that are needed for them (::thrd_mutex_t and
** ::thrd_cond_t). This thread interface is a bit simpler than POSIX
** threads, but implements the main features.
**
** P99 provides a shallow wrapper on top of POSIX threads that
** provides all the interfaces that are required by C11.
**/
/**
** @page programming Macro programming with P99
**
** Most macros and features for macro programming with P99 are
** defined in @ref meta_programming. This allows operations such as
** <dl>
** <dt>@ref arg_counting</dt>
** <dt>rudimentary argument list processing</dt>
** <dd>to obtain e.g a sublist of the argument list (::P99_NARG) or revert an
** argument (::P99_REVS)</dd>
** <dt>@ref unrolling</dt>
** <dd>not restricted to usual @c for loops but also e.g to produce a
** sequence of declarations with initializers (::P99_VASSIGNS)</dd>
** <dt>constant generation</dt>
** <dd>to compose @c double constants</dd>
** <dt>type and keyword classification</dt>
** <dt>@ref blocks</dt>
** </dl>
**
** @section arg_counting Argument List Counting
**
** To implement macros in which evaluation depends upon the number of
** arguments received, we will need to determine how many
** arguments are received. This can be achieved with something like
** the following:
**
** @code
** #define P00_ARG2(_0, _1, _2, ...) _2
** #define NARG2(...) P00_ARG2(__VA_ARGS__, 2, 1, 0)
** @endcode
**
** If NARG2 is called with two arguments the @c 2 of its expansion is in
** third position and we will see this @c 2. If it is called with just
** one argument the 1 will be in that place and thus be the result of
** the expansion. You can probably imagine an extension of that
** macro to treat more arguments, look into the P99 sources to see a
** version for ::P99_MAX_NUMBER.
**
** The toy macro @c NARG2 has an important property, namely that it
** swallows @em all its arguments when called correctly (say with 0,
** 1 or 2 arguments) and just returns a token that corresponds to the
** number. Such a property is important for macro programming since
** we don't want to have the compiler itself see the same expressions
** multiple times.
**
** The @c NARG2 has a major disadvantage: it is unable to
** detect an empty argument list. This is due to a fundamental
** difference between C and its preprocessor. For C, a parenthesis @c ()
** is empty and contains no argument. For the preprocessor it
** contains just one argument, and this argument is the empty token
** <code> </code>.
**
** So in fact @c NARG2 cheats. It doesn't count the number of
** arguments that it receives, but returns the number of commas plus
** one. In particular, even if it receives an empty argument list it
** will return @c 1. The macro ::P99_NARG deals with that and returns
** the token @c 0, if the list is empty.
**
** Other macros are then programmed with similar tricks as are used for @c
** NARG2, here: the variable argument list is positioned at the
** beginning of a new macro list that is then completed by a list of
** values that contain the different tokens that complete the given
** list, if necessary.
**
** A second trick is then to paste the name of another macro with
** that number together. Look e.g at ::P99_DUPL. When called as follows
**
** @code
** (P99_DUPL(3, A)) ==> (P00_DUPL3(A))
** @endcode
**
** this then is replaced @fntm 4@efntm
** as follow
** @code
** (P00_DUPL3(A)) ==> (A, P00_DUPL2(A)) ==> (A, A, P00_DUPL1(A)) ==> (A, A, A)
** @endcode
**
** The example of ::P99_DUPL together with ::P00_DUPL1, ... in file
** p99_generated.h shows a general strategy to overcome the lack of
** control structures or recursion in the preprocessor. But generally
** it would be tedious and error prone to have to copy similar
** definitions over and over again. Therefore P99 implements some of
** these with a script and collects them in the above mentioned
** include file. This is probably not something you should do
** yourself. Section @ref unrolling will show how to
** avoid this with higher level preprocessor programming constructs.
**
**
** The next step in macro programming is then to use ::P99_NARG to
** obtain the length of a list and to use this numeric token to
** derive a macro name with the number in it
**
** @code
** P99_PASTE(t, o, k, e, n)
** ==>
** P00_PASTE(P00_NARG(t, o, k, e, n), t, o, k, e, n)
** ==>
** P00_PASTE(5, t, o, k, e, n)
** ==>
** P00__PASTE(P99_PASTE, 5, t, o, k, e, n)
** ==>
** P99_PASTE ## 5(t, o, k, e, n)
** ==>
** P99_PASTE5(t, o, k, e, n)
** ==>
** .
** ==>
** token
** @endcode
**
** @fnt 4 @efnt
** The actual definition is a bit more complicated to capture special
** cases.
**
** @section unrolling Code Unrolling
**
** Code unrolling is a generalization of what classicaly would be
** left to the compiler (and not the preprocessor): loop
** unrolling. Suppose that in some context you have a loop of fixed
** length
**
** @code
** for (unsigned i = 0; i < 4; ++i)
** a[i] = b[i];
** @endcode
**
** There is a good chance that an optimizing compiler would unroll
** this loop
**
** @code
** a[0] = b[0];
** a[1] = b[1];
** a[2] = b[2];
** a[3] = b[3];
** @endcode
**
** This would have the advantage to spare a CPU register otherwise used for
** @c i and also that the addressing of the individual
** elements now can be done with constant offsets from the
** basepointers of @c a and @c b.
**
** We will see below how to achieve such loop unrolling directly
** with the preprocessor, avoiding reliance on the compiler.
**
** But code unrolling can do more than that. It may be helpful where
** we have repeated pieces of code for which there is no loop
** construct in C, for example in a declaration:
** @code
** signed a[4] = { b[0], b[1], b[2], b[3] };
** @endcode
**
** With P99 you can write this simply as
** @code
** signed a[4] = { P99_ACCESSORS(b, 4) };
** @endcode
**
** If the length of your vectors might perhaps change during the
** development of your program, you can have the length of the
** array as a compile time parameter
** @code
** #define N 4
** .
** signed a[N] = { P99_ACCESSORS(b, N) };
** @endcode
**
** You'd later just change @c N and the rest of your code remains consistent.
**
** As another example of flexibility take another assignment example,
** namely the "deserialization" of a @c struct. Suppose that we have
** a variable of a @c struct type to which we want to assign values
** that we receive through an array @c b:
** @code
** A.x = b[0];
** A.y0 = b[1];
** A.o7 = b[2];
** A.k = b[3];
** @endcode
**
** Here the pattern is somewhat regular, you assign the elements of
** @c b in order, but the left hand sides are arbitrary. P99 can do
** that for you
** @code
** P99_VASSIGNS(b, A.x, A.y0, A.o7, A.k);
** @endcode
**
** BTW, with the two P99 macros that we just introduced we can now
** perform the loop unrolling from the beginning:
** @code
** P99_VASSIGNS(b, P99_ACCESSORS(a, 4));
** @endcode
**
** The "control structure" that is behind these two macros is called
** ::P99_FOR. This is by itself a macro that takes at least 4 parameters,
** named @c NAME, @c N, @c OP, @c FUNC, but generally will have more,
** namely a list <code>A0, A1, ... , A{N-1}</code> of extra
** arguments. The idea behind this is simple:
** - the argument @c FUNC is supposed to be a macro name that is
** going to be applied to each of the @c A{i}
** - argument @c OP in turn is a macro that is called to control the
** "glue" between the different occurrences of the @c FUNC invocations.
**
** An example of @c FUNC would be something like <code>NAME[i]</code>
** for an accessor. For @c OP we provide simple choices as ::P00_SEQ
** which just puts commas between the occurrences or ::P00_SEP to do
** so with semicolons. For more exact examples and the syntax for @c
** OP and @c FUNC please refer to the documentation of ::P99_FOR.
**/
/**
** @page c99 C99 features
**
** As extensions to C89, C99 offers some features that
** improve the possibilities of programming efficiently and portably
** at the same time. There are four of these new concepts that are
** particularly important for P99 and without them P99 wouldn't be
** possible.
** - @ref variadic
** - @ref inline
** - @ref initializers
** - @ref compound
** - @ref pragma
**
** @section variadic Variadic macros
**
** The preprocessor now has a feature that previously only C
** functions had: a macro may accept an argument list that may
** vary in size. As with functions, such a macro is defined with `...'
** in the argument list to indicate a list following the initial,
** named, arguments:
**
** @code
** #define TOTO(NAME, ...) NAME[__VA_ARGS__]
** @endcode
**
** The variable-length list is then referred to using the reserved
** identifier @c __VA_ARGS__.
**
** This functionality of C99 allows us e.g to implement macros for
** @ref defaults and to perform @ref unrolling.
**
** @section inline Inline functions
**
** The new keyword @c inline is borrowed from C++ with some slightly
** changed semantics. The important part for P99 is that functions
** can be @em defined in a header file (and not only declared) when
** specified as @c inline.
**
** This allows us to define small wrappers in header files using
** macros, without generating conflicts in different compilation
** units. By that we can avoid one of the major drawbacks of C macro
** programming: a macro cannot @em define another macro. In addition
** functions, when compared to macros, have other advantages
** - they are typesafe
** - their arguments are evaluated exactly once
**
**
** @section initializers Designated initializers
**
** In C89, initialization of structures can be tedious and error prone:
** @code
** typedef struct toto toto;
** struct toto {
** unsigned a;
** double b;
** };
** .
** .
** toto A = { 0, 1 };
** @endcode
**
** Components are initialized in the order of the type
** declaration. Here the @c 0 in the initializer is used to
** initialize the component @c A.a and the @c 1 for A.b.
**
** Whenever the structure @c toto changes during the development
** process, we would have to revisit @em all initializations to see
** whether or not they remain consistent:
** - if the order of components @c a and @c b changes, the order of
** the expressions must be inverted
** - if we insert an element before @c a or @c b, the initialization
** of @c b by @c 1 in the example is replaced by the default
** initialization, namely @c 0.0.
**
** Keeping track of these may be particularly difficult if the
** components are of similar types, such that an initializer for one
** is valid for the other.
**
** With designated initializers this situation changes substantially:
** @code
** toto A = { .a = 0, .b = 1 };
** @endcode
**
** By this means we avoid all of the problems mentioned
** above. This scheme is robust against reordering and insertion of
** components. In a certain sense it is also robust against the
** renaming of components: all initializations will then simply fail
** at compile time, so it is easy to identify problems.
**
** For a more detailed discussion of initialization and P99 see @ref
** variableInit.
**
** @section compound Compound literals
**
** A compound literal is syntactically given as a compound
** initializer and a cast such as
** @code
** (int[2]){ 4, 5}
** (T){ .d = 1, .a = 10 }.
** @endcode
**
** It is best seen as defining a temporary object of the requested
** type, initialized using the same rules that apply to a named variable of
** that type.
**
** - Such unnamed temporary objects can be initialized on the fly, e.g
** as the arguments to functions, and they live until the end of
** the block in which they are defined.
** - They define an lvalue from which an address can be taken.
** - Unless the type of the cast is defined with `const' the content
** of such a variable is modifiable.
**
** Example: The following code returns the pointer to a character array
** that is initialized with all @c `a' and a terminating @c 0 character.
** The array
** is a valid object until the program leaves the current block.
** @code
** char const*const hui = memset((char[256]){0}, 'a', 255);
** @endcode
**
** It would be equivalent to the following
** @code
** char tmp[256] = { 0 };
** char const*const hui = memset(tmp, 'a', 255);
** @endcode
**
** Using the compound literal here has the advantage that no other
** non-const reference to the temporary is exposed.
**
** The compound literal syntax is not always very easy to read; in
** fact it might even hurt your eyes. P99 gives you a shortcut for
** compound literals that are initialized from the all @c 0
** initializer. With that the above could have been written:
**
** @code
** char const*const hui = memset(P99_LVAL(char[256]), 'a', 255);
** @endcode
**
** @section hide Macros that hide a function
**
** Per se, this is not a new feature of C99 but had been present
** before. The preprocessor has two special rules, one that applies
** generally to macros and the other that applies only to functional macros:
**
** -# If during expansion of a macro XXX the token XXX is found, it
** is not expanded. So there is no recursion in C macros.
** -# If a functional macro YYY is found without a following
** opening parenthesis it is not expanded.
**
** Theses features can be used to define a macro and another
** identifier that have the same name. It is sometimes used
** for a test if some functionality is present on a platform. E.g on
** my computer I have
**
** @code
** #define stdin stdin
** @endcode
**
** This can be used as follows
** @code
** #ifdef stdin
** // Do something for a hosted environment
** // Use stdin as usual
** #else
** // Do something for a free standing environment
** // We don't have stdin at all, write to a log file or so.
** #endif
** @endcode
**
** But we may equally use this technique for a function symbol. POSIX
** explicitly allows this for example for the functions in @em stdio.h
**
** <center>
** <em>
** The following shall be declared as functions and may also be
** defined as macros.<br />
** Function prototypes shall be provided.
** </em>
** </center>
**
** Lets have a look at a randomly selected function from stdio and suppose it
** would be given as follows:
** @code
** int putc(int, FILE *);
** #define putc(C, F) (is_it_special(C) ? do_something_clever(C, F) : putc(C, F) )
** @endcode
**
** (Yes this evaluates @c C twice.) With that, these uses of @c
** putc are still valid:
** @code
** // Use the macro and implicitly the function, relies on rule 1
** putc('A', stdout);
**
** // Just use the function not the macro, relies on rule 2
** (putc)('A', stdout);
**
** // Get the address of putc and store it in my_putc, relies on rule 2
** int (*my_putc)(int, FILE*) = &putc;
** @endcode
**
** The example above with @c putc has a particular pitfall if we have
** the above definitions in a header file and then include this file
** at the place where we define the function:
**
** @code
** #include <stdio.h>
**
** int putc(int, FILE *) {
** // do the right thing here
** }
** @endcode
**
** This will simply explode since the preprocessor will expand the
** functional reference to @c putc. This can be explicitly avoided
** by undefining the macro before the definition, but for this the
** implementor of @c putc has to know that it is also a macro.
**
** With P99, we use this technique to @em overload a function to
** provide it with @ref defaults. A macro defined in that way will
** avoid this pitfall: if it is called with the same number of
** arguments (or more) that are all non-empty, it will produce the
** same token sequence as if the macro had not been defined.
**
** @subsection pragma Pragmas inside macros
**
** The traditional approach in C had been to specify meta information
** for the compiler in so called pragmas:
** @code
** #pragma omp parallel for
** for (size_t i = 0; i < n; ++i) c[i] += a[i] * b[i];
** @endcode
**
** The inconvenience of such a construct is that it has always to be
** on a line of its own and cannot be placed in a macro. For that
** reason most compilers provided extensions that let the programmer
** place meta information more precisely at some specific point of
** the code, e.g gcc has an @c __attribute__ extension for that.
**
** C99 adds a keyword to overcome that difficulty and to normalize
** the link between macros and @c #pragma: @c _Pragma.
** @code
** _Pragma("omp parallel for") for (size_t i = 0; i < n; ++i) c[i] += a[i] * b[i];
** @endcode
** P99 uses this feature for extensions concerning OpenMP, in
** particular the ::P99_PARALLEL_FOR and ::P99_PARALLEL_FORALL
** macros.
**
**/
#include "p99_choice.h"
#include "p99_defarg.h"
#include "p99_enum.h"
#include "p99_new.h"
#include "p99_double.h"
#include "p99_swap.h"
#include "p99_generic.h"
#endif /* !P99_H_ */
|
trsm_x_sky_n_hi_row.c
|
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < m; r++)
{
const ALPHA_INT indx = A->pointers[r + 1] - 1;
diag[r] = A->values[indx];
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT c = A->cols - 1; c >= 0; c--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ic = A->cols - 1; ic > c; ic--)
{
ALPHA_INT start = A->pointers[ic];
ALPHA_INT end = A->pointers[ic + 1];
ALPHA_INT eles_num = ic - c;
if(end - eles_num - 1 >= start)
alpha_madde(temp, A->values[end - eles_num - 1], y[ic * ldy + out_y_col]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[c * ldx + out_y_col]);
alpha_sub(t, t, temp);
alpha_div(y[c * ldy + out_y_col], t, diag[c]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
bitmap.h
|
/*!
* Copyright 2014 by Contributors
* \file bitmap.h
* \brief a simple implement of bitmap
* NOTE: bitmap is only threadsafe per word access, remember this when using bitmap
* \author Tianqi Chen
*/
#ifndef XGBOOST_COMMON_BITMAP_H_
#define XGBOOST_COMMON_BITMAP_H_
#include <dmlc/omp.h>
#include <vector>
namespace xgboost {
namespace common {
/*! \brief bit map that contains set of bit indicators */
struct BitMap {
/*! \brief internal data structure */
std::vector<uint32_t> data;
/*!
* \brief resize the bitmap to be certain size
* \param size the size of bitmap
*/
inline void Resize(size_t size) {
data.resize((size + 31U) >> 5, 0);
}
/*!
* \brief query the i-th position of bitmap
* \param i the position in
*/
inline bool Get(size_t i) const {
return (data[i >> 5] >> (i & 31U)) & 1U;
}
/*!
* \brief set i-th position to true
* \param i position index
*/
inline void SetTrue(size_t i) {
data[i >> 5] |= (1 << (i & 31U));
}
/*! \brief initialize the value of bit map from vector of bool*/
inline void InitFromBool(const std::vector<int>& vec) {
this->Resize(vec.size());
// parallel over the full cases
auto nsize = static_cast<bst_omp_uint>(vec.size() / 32);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
uint32_t res = 0;
for (int k = 0; k < 32; ++k) {
int bit = vec[(i << 5) | k];
res |= (bit << k);
}
data[i] = res;
}
if (nsize != vec.size()) data.back() = 0;
for (size_t i = nsize; i < vec.size(); ++i) {
if (vec[i]) this->SetTrue(i);
}
}
/*! \brief clear the bitmap, set all places to false */
inline void Clear() {
std::fill(data.begin(), data.end(), 0U);
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_BITMAP_H_
|
CGOpenMPRuntime.h
|
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
Forza.h
|
#ifndef Forza_h__
#define Forza_h__
struct PatternData
{
uint32_t Count;
uint32_t Size;
uint32_t Length[16];
uint32_t Skip[16];
__m128i Value[16];
};
void GeneratePattern(const char* Signature, const char* Mask, PatternData* Out)
{
auto l = strlen(Mask);
Out->Count = 0;
for (auto i = 0; i < l; i++)
{
if (Mask[i] == '?')
continue;
auto ml = 0, sl = 0;
for (auto j = i; j < l; j++)
{
if (Mask[j] == '?' || sl >= 16)
break;
sl++;
}
for (auto j = i + sl; j < l; j++)
{
if (Mask[j] != '?')
break;
ml++;
}
auto c = Out->Count;
Out->Length[c] = sl;
Out->Skip[c] = sl + ml;
Out->Value[c] = _mm_loadu_si128((const __m128i*)((uint8_t*)Signature + i));
Out->Count++;
i += sl - 1;
}
Out->Size = l;
}
__forceinline bool Matches(const uint8_t* Data, PatternData* Patterns)
{
auto k = Data + Patterns->Skip[0];
for (auto i = 1; i < Patterns->Count; i++)
{
auto l = Patterns->Length[i];
if (_mm_cmpestri(Patterns->Value[i], l, _mm_loadu_si128((const __m128i*)k), l, _SIDD_CMP_EQUAL_EACH | _SIDD_MASKED_NEGATIVE_POLARITY) != l)
break;
if (i + 1 == Patterns->Count)
return true;
k += Patterns->Skip[i];
}
return false;
}
uint8_t* FindEx(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask)
{
PatternData d;
GeneratePattern(Signature, Mask, &d);
auto out = static_cast<uint8_t*>(nullptr);
auto end = Data + Length - d.Size;
#pragma omp parallel for
for (intptr_t i = Length - 32; i >= 0; i -= 32)
{
if (out != nullptr)
break;
auto p = Data + i;
auto b = _mm256_loadu_si256((const __m256i*)p);
if (_mm256_test_all_zeros(b, b) == 1)
continue;
auto f = _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 0), 16, _SIDD_CMP_EQUAL_ORDERED);
if (f == 16)
{
f += _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 1), 16, _SIDD_CMP_EQUAL_ORDERED);
if (f == 32)
continue;
}
PossibleMatch:
p += f;
if (p + d.Size > end)
{
for (auto j = 0; j < d.Size & j + i + f < Length; j++)
{
if (Mask[j] == 'x' && (uint8_t)Signature[j] != p[j])
break;
if (j + 1 == d.Size)
out = (uint8_t*)p;
}
continue;
}
if (Matches(p, &d))
out = (uint8_t*)p;
if (out != nullptr)
break;
p++;
f = _mm_cmpestri(d.Value[0], d.Length[0], _mm_loadu_si128((const __m128i*)p), 16, _SIDD_CMP_EQUAL_ORDERED);
if (f < 16)
goto PossibleMatch;
}
return out;
}
void FindLargestArray(const char* Signature, const char* Mask, int Out[2])
{
uint32_t t1 = 0;
uint32_t t2 = strlen(Signature);
uint32_t len = strlen(Mask);
for (auto j = t2; j < len; j++)
{
if (Mask[j] != 'x')
continue;
auto count = strlen(&Signature[j]);
if (count > t2)
{
t1 = j;
t2 = count;
}
j += (count - 1);
}
Out[0] = t1;
Out[1] = t2;
}
uint8_t* Find(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask)
{
int d[2] = { 0 };
FindLargestArray(Signature, Mask, d);
const uint8_t len = static_cast<uint8_t>(strlen(Mask));
const uint8_t mbeg = static_cast<uint8_t>(d[0]);
const uint8_t mlen = static_cast<uint8_t>(d[1]);
uint8_t wildcard[UCHAR_MAX + 1] = { 0 };
for (auto i = mbeg; i < mbeg + mlen; i++)
wildcard[(uint8_t)Signature[i]] = 1;
uint8_t mfirst = (uint8_t)Signature[mbeg];
uint8_t first = (uint8_t)Signature[0];
uint8_t last = (uint8_t)Signature[len - 1];
for (int i = Length - len; i >= 0; i--)
{
uint8_t c = Data[i];
uint8_t w = wildcard[c];
auto k = 0;
while (w == 0 && i > mlen)
{
i -= mlen;
w = wildcard[Data[i]];
k = 1;
}
if (k == 1)
{
i++;
continue;
}
if (c != mfirst)
continue;
if (i - mbeg < 0 || i - mbeg + len > Length)
return nullptr;
for (auto j = 0; j < len - 1; j++)
{
if (j == mbeg || Mask[j] != 'x')
continue;
if (Data[i - mbeg + j] != (uint8_t)Signature[j])
break;
if (j + 1 == len - 1)
return (uint8_t*)(Data + i - mbeg);
}
}
return nullptr;
}
struct ForzaSIMD : public BenchBase
{
virtual void init(Tests test) override
{
switch (test)
{
case Tests::First:
Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21";
Mask = "xxxxxxx????xxx";
break;
case Tests::Second:
Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21";
Mask = "xxxxxxxxxxx????xxx";
break;
default:
break;
}
CPUSupport = Supported();
}
virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override
{
if (CPUSupport)
return FindEx((const uint8_t*)baseAddress, size, Pattern, Mask);
if (!Init)
{
std::cout << "Your CPU does not support SIMD instructions, replacing with Boyer-Moore variant." << std::endl;
Init = true;
}
return Find((const uint8_t*)baseAddress, size, Pattern, Mask);
}
virtual const char* name() const override
{
return "Forza (SIMD With OpenMP)";
}
virtual bool BackwardsSearch() const override
{
return true;
}
bool Supported()
{
int id[4] = { 0 };
__cpuid(id, 1);
bool sse42 = (id[3] & 0x04000000) != 0;
bool avx = (id[2] & 0x18000000) != 0;
return (sse42 && avx);
}
bool Init = false;
bool CPUSupport;
char* Pattern;
char* Mask;
};
struct Forza : public BenchBase
{
virtual void init(Tests test) override
{
switch (test)
{
case Tests::First:
Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21";
Mask = "xxxxxxx????xxx";
break;
case Tests::Second:
Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21";
Mask = "xxxxxxxxxxx????xxx";
break;
default:
break;
}
}
virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override
{
return Find((const uint8_t*)baseAddress, size, Pattern, Mask);
}
virtual const char* name() const override
{
return "Forza (Boyer-Moore Variant)";
}
virtual bool BackwardsSearch() const override
{
return true;
}
char* Pattern;
char* Mask;
};
REGISTER(Forza);
REGISTER(ForzaSIMD);
#endif // Forza_h__
|
elemwise_binary_scalar_op.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const double alpha = nnvm::get<double>(attrs.parsed);
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const double alpha = nnvm::get<double>(attrs.parsed);
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<mxnet::alm::FChangeLayout>("FChangeLayout", ElemwiseChangeLayout) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_argument("scalar", "float", "scalar input")
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
3503.c
|
/* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <[email protected]>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(2)
{
#pragma omp for schedule(static, 16)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp for schedule(static, 16)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
/* Calculate the m * m covariance matrix. */
#pragma omp for schedule(static, 16)
for (j1 = 0; j1 < _PB_M; j1++)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
displacement_lagrangemultiplier_frictional_contact_criteria.h
|
// KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "utilities/color_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "custom_utilities/active_set_utilities.h"
#include "utilities/constraint_utilities.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param RotRatioTolerance Relative tolerance for rotation error
* @param RotAbsTolerance Absolute tolerance for rotation error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType RotRatioTolerance,
const TDataType RotAbsTolerance,
const TDataType LMNormalRatioTolerance,
const TDataType LMNormalAbsTolerance,
const TDataType LMTangentStickRatioTolerance,
const TDataType LMTangentStickAbsTolerance,
const TDataType LMTangentSlipRatioTolerance,
const TDataType LMTangentSlipAbsTolerance,
const TDataType NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The normal contact solution
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact solution
mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance;
mLMTangentStickRatioTolerance = LMTangentSlipRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentSlipAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
//* Copy constructor.
DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mRotRatioTolerance(rOther.mDispRatioTolerance)
,mRotAbsTolerance(rOther.mDispAbsTolerance)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance)
,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance)
,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance)
,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Initialize
TDataType disp_solution_norm = 0.0, rot_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, rot_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0;
IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// Auxiliar values
std::size_t dof_id = 0;
TDataType dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, dof_value, dof_incr) reduction(+:disp_solution_norm, rot_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, rot_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < std::numeric_limits<double>::epsilon()) {
normal_lm_solution_norm += std::pow(dof_value, 2);
normal_lm_increase_norm += std::pow(dof_incr, 2);
} else {
const double normal = it_node->FastGetSolutionStepValue(NORMAL)[r_curr_var.GetComponentIndex()];
const TDataType normal_dof_value = dof_value * normal;
const TDataType normal_dof_incr = dof_incr * normal;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
disp_solution_norm += std::pow(dof_value, 2);
disp_increase_norm += std::pow(dof_incr, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_solution_norm += std::pow(dof_value, 2);
rot_increase_norm += std::pow(dof_incr, 2);
++rot_dof_num;
}
}
}
}
if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0;
if(rot_increase_norm < Tolerance) rot_increase_norm = 1.0;
if(normal_lm_increase_norm < Tolerance) normal_lm_increase_norm = 1.0;
if(tangent_lm_stick_increase_norm < Tolerance) tangent_lm_stick_increase_norm = 1.0;
if(tangent_lm_slip_increase_norm < Tolerance) tangent_lm_slip_increase_norm = 1.0;
if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm);
const TDataType normal_lm_ratio = normal_lm_solution_norm > Tolerance ? std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm) : 0.0;
const TDataType tangent_lm_stick_ratio = tangent_lm_stick_solution_norm > Tolerance ? std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm) : 0.0;
const TDataType tangent_lm_slip_ratio = tangent_lm_slip_solution_norm > Tolerance ? std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm) : 0.0;
const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num);
const TDataType rot_abs = std::sqrt(rot_increase_norm)/ static_cast<TDataType>(rot_dof_num);
const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0;
const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0;
const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs;
const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
} else {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if (disp_converged && rot_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrangemultiplier_frictional_contact_criteria",
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"rotation_relative_tolerance" : 1.0e-4,
"rotation_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_stick_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_stick_contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_slip_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_slip_contact_displacement_absolute_tolerance" : 1.0e-9,
"ratio_normal_tangent_threshold" : 1.0e-4
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrangemultiplier_frictional_contact_criteria";
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble();
// The normal contact solution
mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// The tangent contact solution
mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_displacement_absolute_tolerance"].GetDouble();
mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_displacement_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation
TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation
TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal)
TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal)
TDataType mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-stick)
TDataType mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-stick)
TDataType mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-slip)
TDataType mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-slip)
TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
|
util_test.c
|
/*
* This file is part of ABCDK.
*
* MIT License
*
*/
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
#include <string.h>
#include <linux/serial.h>
#include "abcdk-util/general.h"
#include "abcdk-util/getargs.h"
#include "abcdk-util/geometry.h"
#include "abcdk-util/ffmpeg.h"
#include "abcdk-util/bmp.h"
#include "abcdk-util/freeimage.h"
#include "abcdk-util/uri.h"
#include "abcdk-util/html.h"
#include "abcdk-util/clock.h"
#include "abcdk-util/crc32.h"
#include "abcdk-util/robots.h"
#include "abcdk-util/dirent.h"
#include "abcdk-util/socket.h"
#include "abcdk-util/hexdump.h"
#include "abcdk-util/termios.h"
#include "abcdk-mp4/demuxer.h"
#include "abcdk-util/video.h"
#include "abcdk-util/lz4.h"
#include "abcdk-util/openssl.h"
#include "abcdk-util/redis.h"
#include "abcdk-comm/comm.h"
#include "abcdk-comm/message.h"
#include "abcdk-comm/queue.h"
#include "abcdk-comm/waiter.h"
#include "abcdk-util/json.h"
#include "abcdk-comm/easy.h"
#ifdef HAVE_FUSE
#define FUSE_USE_VERSION 29
#include <fuse.h>
#endif //
#ifdef HAVE_LIBNM
#include <libnm/NetworkManager.h>
#endif
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef HAVE_ARCHIVE
#include <archive.h>
#include <archive_entry.h>
#endif
#ifdef HAVE_MODBUS
#include <modbus.h>
#endif
#ifdef HAVE_LIBUSB
#include <libusb.h>
#endif
#ifdef HAVE_MQTT
#include <mosquitto.h>
#endif
#ifdef HAVE_BLKID
#include <blkid/blkid.h>
#endif
void test_log(abcdk_tree_t *args)
{
abcdk_openlog(NULL,LOG_DEBUG,1);
for(int i = LOG_EMERG ;i<= LOG_DEBUG;i++)
syslog(i,"haha-%d",i);
}
void test_ffmpeg(abcdk_tree_t *args)
{
#ifdef HAVE_FFMPEG
for(int i = 0;i<1000;i++)
{
enum AVPixelFormat pixfmt = (enum AVPixelFormat)i;
int bits = abcdk_av_image_pixfmt_bits(pixfmt,0);
int bits_pad = abcdk_av_image_pixfmt_bits(pixfmt,1);
const char *name = abcdk_av_image_pixfmt_name(pixfmt);
printf("%s(%d): %d/%d bits.\n",name,i,bits,bits_pad);
}
#if 0
abcdk_image_t src = {AV_PIX_FMT_YUV420P,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
abcdk_image_t dst = {AV_PIX_FMT_YUV420P,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
abcdk_image_t dst2 = {AV_PIX_FMT_BGR32,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
int src_heights[4]={0}, dst_heights[4]={0}, dst2_heights[4]={0};
abcdk_av_image_fill_heights(src_heights,src.height,src.pixfmt);
abcdk_av_image_fill_heights(dst_heights,dst.height,dst.pixfmt);
abcdk_av_image_fill_heights(dst2_heights,dst2.height,dst2.pixfmt);
abcdk_av_image_fill_strides2(&src,16);
abcdk_av_image_fill_strides2(&dst,10);
abcdk_av_image_fill_strides2(&dst2,1);
void *src_buf = abcdk_heap_alloc(abcdk_av_image_size3(&src));
void *dst_buf = abcdk_heap_alloc(abcdk_av_image_size3(&dst));
void *dst2_buf = abcdk_heap_alloc(abcdk_av_image_size3(&dst2));
abcdk_av_image_fill_pointers2(&src,src_buf);
abcdk_av_image_fill_pointers2(&dst,dst_buf);
abcdk_av_image_fill_pointers2(&dst2,dst2_buf);
abcdk_av_image_copy2(&dst,&src);
struct SwsContext *ctx = abcdk_sws_alloc2(&src,&dst2,0);
int h = sws_scale(ctx,(const uint8_t *const *)src.datas,src.strides,0,src.height,dst2.datas,dst2.strides);
//int h = sws_scale(ctx,(const uint8_t *const *)src.datas,src.strides,100,src.height,dst2.datas,dst2.strides);
printf("h = %d\n",h);
uint8_t *tmp = dst2.datas[0];
for (int i = 0; i < dst2.height; i++)
{
for (int j = 0; j < dst2.width*4; j += 4)
{
tmp[j+0] = 0;
tmp[j+1] = 0;
tmp[j+2] = 255;
}
tmp += dst2.strides[0];
}
int chk = abcdk_bmp_save2("/tmp/test_bmp.bmp",dst2.datas[0],dst2.strides[0],dst2.width,dst2.height,32);
assert(chk==0);
abcdk_sws_free(&ctx);
abcdk_heap_free(src_buf);
abcdk_heap_free(dst_buf);
abcdk_heap_free(dst2_buf);
#endif
#endif //
}
void test_bmp(abcdk_tree_t *args)
{
const char *src_file = abcdk_option_get(args,"--src-file",0,"");
const char *dst_file = abcdk_option_get(args,"--dst-file",0,"");
uint32_t stride = 0;
uint32_t width = 0;
int32_t height = 0;
uint8_t bits = 0;
int chk = abcdk_bmp_load2(src_file, NULL, 0, 13, &stride, &width, &height, &bits);
assert(chk == 0);
printf("s=%u,w=%u,h=%d,b=%hhu\n",stride,width,height,bits);
uint8_t *data = abcdk_heap_alloc(stride*height);
chk = abcdk_bmp_load2(src_file, data, stride*height, 1, &stride, &width, &height, &bits);
assert(chk == 0);
chk = abcdk_bmp_save2(dst_file, data, stride, width, height, bits);
assert(chk == 0);
abcdk_heap_free(data);
}
void test_freeimage(abcdk_tree_t *args)
{
#ifdef FREEIMAGE_H
abcdk_fi_init(1);
abcdk_fi_init(1);//test run once.
abcdk_fi_log2syslog();
const char *src_file = abcdk_option_get(args,"--src-file",0,"");
const char *dst_file = abcdk_option_get(args,"--dst-file",0,"");
uint8_t *data = NULL;
uint32_t stride = 0;
uint32_t width = 0;
uint32_t height = 0;
uint8_t bits = 0;
uint32_t xbytes = 0;
FREE_IMAGE_FORMAT src_fmt = FreeImage_GetFileType(src_file,0);
FIBITMAP *dib = abcdk_fi_load2(src_fmt,0,src_file);
assert(dib!=NULL);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
abcdk_resize_t r = {0};
int dst_w = 500;
int dst_h = 1100;
abcdk_resize_ratio_2d(&r,width,height,dst_w,dst_h,0);
FIBITMAP *dib2 = FreeImage_RescaleRect(dib,r.x_factor *width,r.y_factor*height,0,0,width,height,FILTER_BICUBIC,0);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
dib2 = FreeImage_ConvertTo24Bits(dib);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
int left = abcdk_resize_src2dst_2d(&r,0,1);
int top = abcdk_resize_src2dst_2d(&r,0,0);
dib2 = FreeImage_Allocate(dst_w,dst_h,24,0,0,0);
FreeImage_Paste(dib2,dib,left,top,1000);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
data = FreeImage_GetBits(dib);
stride = FreeImage_GetPitch(dib);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
bits = FreeImage_GetBPP(dib);
xbytes = FreeImage_GetLine(dib);
// FreeImage_FlipHorizontal(dib);
// FreeImage_FlipVertical(dib);
//FreeImage_AdjustBrightness(dib,100);
FreeImage_Invert(dib);
#if 1
int chk = abcdk_fi_save2(FIF_JPEG,JPEG_QUALITYGOOD,dst_file, data, stride, width, height, bits);
assert(chk == 0);
#else
BOOL chk = FreeImage_Save(FIF_JPEG,dib,dst_file,JPEG_QUALITYGOOD);
assert(chk);
#endif
FreeImage_Unload(dib);
abcdk_fi_uninit();
abcdk_fi_uninit();//test run once.
#endif //FREEIMAGE_H
}
void test_uri(abcdk_tree_t *args)
{
const char *uri = abcdk_option_get(args,"--uri",0,"");
abcdk_allocator_t * alloc = abcdk_uri_split(uri);
assert(alloc);
for(size_t i = 0;i<alloc->numbers;i++)
printf("[%ld]: %s\n",i,alloc->pptrs[i]);
abcdk_allocator_unref(&alloc);
}
void test_strrep(abcdk_tree_t *args)
{
char buf[]={"abcab| |cabcabc"};
char *p = abcdk_strrep(buf," ","",1);
printf("%s\n",p);
abcdk_heap_free(p);
}
/**/
const char *_test_html_cntrl_replace(char *text, char c)
{
if(!text)
return "";
char *tmp = text;
while (*tmp)
{
if (iscntrl(*tmp))
*tmp = c;
tmp += 1;
}
return text;
}
static int _test_html_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if(deep==0)
{
abcdk_tree_fprintf(stderr,deep,node,"%s\n",".");
}
else
{
abcdk_tree_fprintf(stderr, deep, node, "%s:<%s>\n",
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_HTML_KEY], 0),
_test_html_cntrl_replace(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_HTML_VALUE], 0), ' '));
}
return 1;
}
void test_html(abcdk_tree_t *args)
{
const char *file = abcdk_option_get(args,"--file",0,"");
// abcdk_clock_dot(NULL);
abcdk_tree_t *t = abcdk_html_parse_file(file);
// printf("%lu\n",abcdk_clock_step(NULL));
abcdk_tree_iterator_t it = {0,_test_html_dump_cb,NULL};
abcdk_tree_scan(t,&it);
abcdk_tree_free(&t);
}
void test_fnmatch(abcdk_tree_t *args)
{
// char str[]={"abcd?*Qcde"};
// char wd[]={"abc?\\?\\*q*****e"};
char str[]={"/gp/aag/mainA?123456seller=ABVFEJU8LS620"};
char wd[]={"/gp/aag/main\\?\\?*seller=ABVFEJU8LS620"};
int chk = abcdk_fnmatch(str,wd,0,0);
assert(chk==0);
}
void test_crc32(abcdk_tree_t *args)
{
// uint32_t sum = abcdk_crc32_sum("abc",3,0);
// printf("%u\n",sum);
#pragma omp parallel for num_threads(30)
for (int i = 0; i < 300000000; i++)
{
uint32_t sum2 = abcdk_crc32_sum("abc",3,0);
assert(891568578 ==sum2);
}
}
typedef struct _robots_match
{
int flag;
const char *path;
}robots_match_t;
static int _test_robots_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if (deep == 0)
{
abcdk_tree_fprintf(stderr,deep, node, "%s\n", ".");
}
else
{
if (opaque)
{
robots_match_t *m = (robots_match_t*)opaque;
int chk = abcdk_fnmatch(m->path,ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_VALUE], 0),0,0);
if(chk==0)
{
if(abcdk_strcmp(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),"Disallow",0)==0)
m->flag = 2;
if(abcdk_strcmp(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),"Allow",0)==0)
m->flag = 1;
}
else
{
// m->flag = -1;
}
}
else
{
abcdk_tree_fprintf(stderr,deep, node, "%s: %s\n",
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_VALUE], 0));
}
}
return 1;
}
void test_robots(abcdk_tree_t *args)
{
const char *file = abcdk_option_get(args,"--file",0,"");
const char *agent = abcdk_option_get(args,"--agent",0,"*");
robots_match_t m = {0};
m.path = abcdk_option_get(args,"--path",0,"");
abcdk_tree_t *t = abcdk_robots_parse_file(file,agent);
abcdk_tree_iterator_t it = {0,_test_robots_dump_cb,NULL};
abcdk_tree_scan(t,&it);
it.opaque = &m;
abcdk_tree_scan(t,&it);
printf("flag=%d\n",m.flag);
abcdk_tree_free(&t);
}
#ifdef _FUSE_H_
#define MP4_PATH "/home/devel/job/tmp/"
/**/
int fuse_open(const char *file, struct fuse_file_info *info)
{
syslog(LOG_INFO,"%s(%d): %s",__FUNCTION__,__LINE__,file);
char tmp[PATH_MAX]={0};
abcdk_dirdir(tmp,MP4_PATH);
abcdk_dirdir(tmp,file);
int fd = abcdk_open(tmp, 0, 0, 0);
if (fd < 0)
return -errno;
info->fh = fd;
info->direct_io = 1;
info->keep_cache = 0;
return 0;
}
int fuse_read(const char *file, char *buffer, size_t size, off_t offset, struct fuse_file_info *info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
syslog(LOG_INFO, "%s(%d): size=%lu off=%ld", __FUNCTION__, __LINE__, size, offset);
assert(info->fh != -1);
int fd = info->fh;
ssize_t rlen = pread(fd, buffer, size, offset);
if(rlen != size)
sleep(10);
else
usleep(40*1000);
return (rlen >= 0 ? rlen : -errno);
}
int fuse_release(const char* file, struct fuse_file_info *info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
assert(info->fh != -1);
int fd = info->fh;
abcdk_closep(&fd);
return 0;
}
int fuse_getattr(const char *file, struct stat* attr)
{
syslog(LOG_INFO,"%s(%d): %s",__FUNCTION__,__LINE__,file);
// if (abcdk_strcmp(file, "/") == 0)
// {
// }
// else
// {
char tmp[PATH_MAX] = {0};
abcdk_dirdir(tmp, MP4_PATH);
abcdk_dirdir(tmp, file);
int chk = lstat(tmp, attr);
if (chk != 0)
return -errno;
attr->st_dev = 1000;
clock_gettime(CLOCK_REALTIME, &attr->st_ctim);
attr->st_mtim = attr->st_ctim;
attr->st_size = INTMAX_MAX;
// }
return 0;
}
int fuse_fgetattr(const char* file, struct stat* attr, struct fuse_file_info * info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
assert(info->fh != -1);
int fd = info->fh;
int chk = fstat(fd,attr);
if(chk != 0 )
return -errno;
attr->st_dev = 1000;
attr->st_size = INTMAX_MAX;
return 0;
}
#endif //_FUSE_H_
void test_fuse(abcdk_tree_t *args)
{
#ifdef _FUSE_H_
const char *name_p = abcdk_option_get(args,"--name",0,"test_fuse");
const char *mpoint_p = abcdk_option_get(args,"--mpoint",0,"");
if (strlen(name_p) <= 0)
{
syslog(LOG_ERR, "--name must have parameters.");
return;
}
if (access(mpoint_p, R_OK) != 0)
{
syslog(LOG_ERR, "--mpoint must have parameters and exist.");
return;
}
static struct fuse_operations opts = {0};
opts.read = fuse_read;
opts.open = fuse_open;
opts.release = fuse_release;
opts.getattr = fuse_getattr;
opts.fgetattr = fuse_fgetattr;
int fuse_argc = 4;
char **fuse_argv = (char**)abcdk_heap_alloc(fuse_argc*sizeof(char*));
fuse_argv[0] = abcdk_heap_clone(name_p,strlen(name_p));
fuse_argv[1] = abcdk_heap_clone(mpoint_p,strlen(mpoint_p));
fuse_argv[2] = abcdk_heap_clone("-o",2);
fuse_argv[3] = abcdk_heap_clone("allow_other,auto_cache,kernel_cache",35);
fuse_main(fuse_argc, fuse_argv, &opts, NULL);
#endif //_FUSE_H_
}
#if 0
int _mp4_read(abcdk_buffer_t *buf, void *data, size_t size)
{
ssize_t r = abcdk_buffer_read(buf, data, size);
if (r <= 0)
return -2;
else if (r != size)
return -1;
return 0;
}
int _mp4_read_u16(abcdk_buffer_t *buf, uint16_t *data)
{
if (_mp4_read(buf, data, sizeof(uint16_t)))
return -1;
*data = abcdk_endian_b_to_h16(*data);
return 0;
}
int _mp4_read_u24(abcdk_buffer_t *buf, uint8_t *data)
{
if (_mp4_read(buf, data, sizeof(uint8_t)*3))
return -1;
abcdk_endian_b_to_h(data,3);
return 0;
}
int _mp4_read_u32(abcdk_buffer_t *buf, uint32_t *data)
{
if (_mp4_read(buf, data, sizeof(uint32_t)))
return -1;
*data = abcdk_endian_b_to_h32(*data);
return 0;
}
int _mp4_read_u64(abcdk_buffer_t *buf,uint64_t *data)
{
if (_mp4_read(buf, data, sizeof(uint64_t)))
return -1;
*data = abcdk_endian_b_to_h64(*data);
return 0;
}
int _mp4_skip_size(abcdk_buffer_t *buf,uint64_t size)
{
size_t all = 0;
char tmp[1000];
while(all<size)
{
size_t s = ABCDK_MIN(1000,size-all);
if (_mp4_read(buf, tmp, s))
return -1;
all += s;
}
return 0;
}
void _mp4_dump_ftyp(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_ftyp_t *cont = (abcdk_mp4_atom_ftyp_t *)&atom->data;
fprintf(stdout, "major='%c%c%c%c',", cont->major.u8[0], cont->major.u8[1], cont->major.u8[2], cont->major.u8[3] );
fprintf(stdout, "minor='%d',", cont->minor);
fprintf(stdout, "compatible=");
for (size_t i = 0; i < cont->compat->numbers; i++)
{
abcdk_mp4_tag_t *brand = (abcdk_mp4_tag_t *)cont->compat->pptrs[i];
if(!brand->u32)
continue;
fprintf(stdout, "'%c%c%c%c' ", brand->u8[0], brand->u8[1], brand->u8[2], brand->u8[3]);
}
}
void _mp4_dump_mvhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mvhd_t *cont = (abcdk_mp4_atom_mvhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "timescale=%u,",cont->timescale);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "rate=%hu.%hu,",cont->rate>>16,cont->rate&0xffff);
fprintf(stdout, "long=%lu(sec),",cont->duration/cont->timescale);
fprintf(stdout, "nexttrackid=%u,",cont->nexttrackid);
}
void _mp4_dump_tkhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tkhd_t *cont = (abcdk_mp4_atom_tkhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "width=%hu.%hu,",cont->width>>16,cont->width&0xffff);
fprintf(stdout, "height=%hu.%hu,",cont->height>>16,cont->height&0xffff);
}
void _mp4_dump_mdhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mdhd_t *cont = (abcdk_mp4_atom_mdhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "timescale=%u,",cont->timescale);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "lang=%hu,",cont->language);
fprintf(stdout, "quality=%hu,",cont->quality);
}
void _mp4_dump_hdlr(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_hdlr_t *cont = (abcdk_mp4_atom_hdlr_t *)&atom->data;
fprintf(stdout,"type=%c%c%c%c, ",
cont->type.u8[0], cont->type.u8[1], cont->type.u8[2], cont->type.u8[3]);
fprintf(stdout,"subtype=%c%c%c%c, ",
cont->subtype.u8[0], cont->subtype.u8[1], cont->subtype.u8[2], cont->subtype.u8[3]);
if(cont->name)
fprintf(stdout,"name='%s' ",cont->name->pptrs[0]);
}
void _mp4_dump_vmhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_vmhd_t *cont = (abcdk_mp4_atom_vmhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "mode=%u,",cont->mode);
fprintf(stdout, "opcolor=%hu,%hu,%hu",cont->opcolor[0],cont->opcolor[1],cont->opcolor[2]);
}
void _mp4_dump_stts(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stts_t *cont = (abcdk_mp4_atom_stts_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "count=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
}
}
void _mp4_dump_ctts(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_ctts_t *cont = (abcdk_mp4_atom_ctts_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "count=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "offset=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
}
}
void _mp4_dump_stsc(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stsc_t *cont = (abcdk_mp4_atom_stsc_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "Firstchunk=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "perchunk=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "ID=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],8));
}
}
void _mp4_dump_stsz(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stsz_t *cont = (abcdk_mp4_atom_stsz_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "[samplesize=%u],",cont->samplesize);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_stco(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stco_t *cont = (abcdk_mp4_atom_stco_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_stss(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stss_t *cont = (abcdk_mp4_atom_stss_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i < 10;i++)
{
fprintf(stdout, "%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_smhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_smhd_t *cont = (abcdk_mp4_atom_smhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "balance=%hhu.%hhu",cont->balance>>8,cont->balance&0xff);
}
void _mp4_dump_elst(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_elst_t *cont = (abcdk_mp4_atom_elst_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "time=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "rate=%hu.%hu,",
ABCDK_PTR2U32(cont->tables->pptrs[i],8)>>16,
ABCDK_PTR2U32(cont->tables->pptrs[i],8)&&0xffff);
}
}
void _mp4_dump_mehd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mehd_t *cont = (abcdk_mp4_atom_mehd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "duration=%lu",cont->duration);
}
void _mp4_dump_trex(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_trex_t *cont = (abcdk_mp4_atom_trex_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "sample_desc_index=%u,",cont->default_sample_desc_index);
fprintf(stdout, "duration=%lu,",cont->default_duration);
fprintf(stdout, "sample_sample_size=%u,",cont->default_samplesize);
fprintf(stdout, "sample_flags=%08x",cont->default_sampleflags);
}
void _mp4_dump_mfhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mfhd_t *cont = (abcdk_mp4_atom_mfhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "sn=%lu,",cont->sn);
}
void _mp4_dump_tfhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfhd_t *cont = (abcdk_mp4_atom_tfhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "base_data_offset=%lu,",cont->base_data_offset);
fprintf(stdout, "sample_desc_index=%u,",cont->sample_desc_index);
fprintf(stdout, "duration=%lu,",cont->default_duration);
fprintf(stdout, "sample_sample_size=%u,",cont->default_samplesize);
fprintf(stdout, "sample_flags=%08x",cont->default_sampleflags);
}
void _mp4_dump_tfdt(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfdt_t *cont = (abcdk_mp4_atom_tfdt_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "time=%lu,",cont->base_decode_time);
}
void _mp4_dump_trun(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_trun_t *cont = (abcdk_mp4_atom_trun_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "offset=%u,",cont->data_offset);
fprintf(stdout, "flags=%08x,",cont->first_sample_flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "[%ld]={",i);
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "size=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "flags=%08x,",ABCDK_PTR2U32(cont->tables->pptrs[i],8));
fprintf(stdout, "offset=%u",ABCDK_PTR2U32(cont->tables->pptrs[i],12));
fprintf(stdout, "},");
}
}
void _mp4_dump_mfro(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mfro_t *cont = (abcdk_mp4_atom_mfro_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "size=%lu,",cont->size);
}
void _mp4_dump_tfra(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfra_t *cont = (abcdk_mp4_atom_tfra_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "size_traf_num=%hhu,",cont->length_size_traf_num);
fprintf(stdout, "size_trun_num=%hhu,",cont->length_size_trun_num);
fprintf(stdout, "size_sample_num=%hhu,",cont->length_size_sample_num);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "[%ld]={",i);
fprintf(stdout, "time=%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],0));
fprintf(stdout, "moof offset=%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],8));
fprintf(stdout, "traf=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],16));
fprintf(stdout, "trun=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],20));
fprintf(stdout, "sample=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],24));
fprintf(stdout, "},");
}
}
static int atoms =0;
int mp4_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if (deep == -1)
return -1;
atoms += 1;
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
if (deep == 0)
{
abcdk_tree_fprintf(stdout, deep, node, ".\n");
}
else
{
abcdk_tree_fprintf(stdout, deep, node, "offset=%lu,size=%lu,type=%c%c%c%c: ",
atom->off_head, atom->size, atom->type.u8[0], atom->type.u8[1], atom->type.u8[2], atom->type.u8[3]);
if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_FTYP)
_mp4_dump_ftyp(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MVHD)
_mp4_dump_mvhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TKHD)
_mp4_dump_tkhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MDHD)
_mp4_dump_mdhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_HDLR)
_mp4_dump_hdlr(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_VMHD)
_mp4_dump_vmhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STTS)
_mp4_dump_stts(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_CTTS)
_mp4_dump_ctts(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSC)
_mp4_dump_stsc(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSZ)
_mp4_dump_stsz(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STCO)
_mp4_dump_stco(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSS)
_mp4_dump_stss(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_SMHD)
_mp4_dump_smhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_ELST)
_mp4_dump_elst(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MEHD)
_mp4_dump_mehd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TREX)
_mp4_dump_trex(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MFHD)
_mp4_dump_mfhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFHD)
_mp4_dump_tfhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFDT)
_mp4_dump_tfdt(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TRUN)
_mp4_dump_trun(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MFRO)
_mp4_dump_mfro(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFRA)
_mp4_dump_tfra(deep, node, opaque);
fprintf(stdout, " \n");
// if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MOOF)
// return -1;
}
// if(atoms>70)
// return -1;
return 1;
}
#endif
void show_mp4_info(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe(fd,0,-1UL, NULL);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,1,1);
abcdk_tree_t *avc1_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVC1,1,1);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *avc1 = (abcdk_mp4_atom_t*)avc1_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
#ifdef HAVE_FFMPEG
AVCodecContext *enc_ctx = abcdk_avcodec_alloc(abcdk_avcodec_find2(AV_CODEC_ID_H264,0));
enc_ctx->extradata_size = avcc->data.avcc.extradata->sizes[0];
enc_ctx->extradata = av_mallocz(avcc->data.avcc.extradata->sizes[0]);
memcpy(enc_ctx->extradata,avcc->data.avcc.extradata->pptrs[0],avcc->data.avcc.extradata->sizes[0]);
enc_ctx->width = avc1->data.sample_desc.detail.video.width;
enc_ctx->height = avc1->data.sample_desc.detail.video.height;
enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
assert(abcdk_avcodec_open(enc_ctx,NULL)==0);
AVFrame *frame_p = av_frame_alloc();
AVPacket packet = {0};
av_init_packet(&packet);
packet.data = 0;
packet.size = 0;
packet.stream_index = 0;
assert(abcdk_avcodec_decode(enc_ctx,frame_p,&packet)>=0);
av_frame_free(&frame_p);
av_packet_unref(&packet);
abcdk_avcodec_free(&enc_ctx);
#endif //HAVE_FFMPEG
abcdk_tree_free(&root);
}
void collect_fmp4_video(int fd)
{
int fd2 = abcdk_open("/tmp/abcdk2.h264",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
abcdk_mp4_tag_t a;
a.u32 = ABCDK_MP4_ATOM_MKTAG('\0','\0','\0','\1');
char *buf= abcdk_heap_alloc(1024*1024*16);
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_tree_t *moov_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MOOV,1,1);
abcdk_tree_t *mvex_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MVEX,1,1);
abcdk_mp4_dump(stdout,moov_p);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(moov_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
abcdk_tree_t *mehd_p = abcdk_mp4_find2(mvex_p,ABCDK_MP4_ATOM_TYPE_MEHD,1,1);
abcdk_mp4_atom_t *mehd = (abcdk_mp4_atom_t*)mehd_p->alloc->pptrs[0];
abcdk_tree_t *moof_p = abcdk_tree_child(root,1);
while (moof_p)
{
abcdk_mp4_atom_t *moof = (abcdk_mp4_atom_t*)moof_p->alloc->pptrs[0];
if(moof->type.u32 == ABCDK_MP4_ATOM_TYPE_MOOF)
{
abcdk_tree_t *mfhd_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_MFHD, 1, 1);
abcdk_tree_t *tfhd_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TFHD, 1, 1);
abcdk_tree_t *tfdt_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TFDT, 1, 1);
abcdk_tree_t *trun_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TRUN, 1, 1);
abcdk_mp4_atom_t *mfhd = (abcdk_mp4_atom_t *)mfhd_p->alloc->pptrs[0];
abcdk_mp4_atom_t *tfhd = (abcdk_mp4_atom_t *)tfhd_p->alloc->pptrs[0];
abcdk_mp4_atom_t *tfdt = (abcdk_mp4_atom_t *)tfdt_p->alloc->pptrs[0];
abcdk_mp4_atom_t *trun = (abcdk_mp4_atom_t *)trun_p->alloc->pptrs[0];
#if 1
printf("-----------------------------------mfhd---------------------------------------\n");
printf("Sequence_Number: %lu\n", mfhd->data.mfhd.sequence_number);
printf("-----------------------------------mfhd---------------------------------------\n");
printf("-----------------------------------tfhd---------------------------------------\n");
printf("TrackID: %u\n", tfhd->data.tfhd.trackid);
printf("Base_Data_Offset: %lu\n", tfhd->data.tfhd.base_data_offset);
printf("Sample_Desc_Index: %u\n", tfhd->data.tfhd.sample_desc_idx);
printf("-----------------------------------tfhd---------------------------------------\n");
printf("-----------------------------------tfdt---------------------------------------\n");
printf("base_decode_time: %lu\n", tfdt->data.tfdt.base_decode_time);
printf("-----------------------------------tfdt---------------------------------------\n");
printf("-----------------------------------trun---------------------------------------\n");
printf("Data_Offset: %u\n", trun->data.trun.data_offset);
printf("First_Sample_Flags: %08x\n", trun->data.trun.first_sample_flags);
printf("Numbers: %u\n", trun->data.trun.numbers);
uint64_t duration_start = tfdt->data.tfdt.base_decode_time;
for (size_t i = 0; i < trun->data.trun.numbers; i++)
{
uint64_t duration = tfhd->data.tfhd.sample_duration;
duration = tfhd->data.tfhd.sample_duration;
if(trun->data.trun.flags & ABCDK_MP4_TRUN_FLAG_SAMPLE_DURATION_PRESENT)
duration = trun->data.trun.tables[i].sample_duration;
duration_start += duration;
printf("Size: %u,PTS: %lu(%lu+%d), DUR: %lu\n",
trun->data.trun.tables[i].sample_size,
duration_start+trun->data.trun.tables[i].composition_offset,
duration_start,
trun->data.trun.tables[i].composition_offset,
duration);
}
printf("-----------------------------------trun---------------------------------------\n");
#else
if (tfhd->data.tfhd.trackid == 1)
{
lseek(fd, moof->off_head + trun->data.trun.data_offset, SEEK_SET);
for (size_t i = 0; i < trun->data.trun.numbers; i++)
{
abcdk_mp4_read(fd, buf, trun->data.trun.tables[i].sample_size);
abcdk_write(fd2, &a.u32, 4);
abcdk_write(fd2, avcc->data.avcc.sps->pptrs[0], avcc->data.avcc.sps->sizes[0]);
abcdk_write(fd2, &a.u32, 4);
abcdk_write(fd2, avcc->data.avcc.pps->pptrs[0], avcc->data.avcc.pps->sizes[0]);
memcpy(buf, &a.u32, 4); //替换长度
abcdk_write(fd2, buf, trun->data.trun.tables[i].sample_size);
}
}
#endif
}
moof_p = abcdk_tree_sibling(moof_p,0);
}
abcdk_heap_free(buf);
abcdk_tree_free(&root);
abcdk_closep(&fd2);
}
void collect_mp4_video(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_mp4_dump(stdout,root);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,1,1);
abcdk_tree_t *stsz_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSZ,1,1);
abcdk_tree_t *stss_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSS,1,1);
abcdk_tree_t *stts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STTS,1,1);
abcdk_tree_t *ctts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_CTTS,1,1);
abcdk_tree_t *stsc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSC,1,1);
abcdk_tree_t *stco_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STCO,1,1);
abcdk_tree_t *avc1_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVC1,1,1);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *stsz = (abcdk_mp4_atom_t*)stsz_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stss = (abcdk_mp4_atom_t*)stss_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stts = (abcdk_mp4_atom_t*)stts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *ctts = (abcdk_mp4_atom_t*)ctts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stco = (abcdk_mp4_atom_t*)stco_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stsc = (abcdk_mp4_atom_t*)stsc_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avc1 = (abcdk_mp4_atom_t*)avc1_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
char sps[200] = {0};
abcdk_bin2hex(sps,avcc->data.avcc.sps->pptrs[0],avcc->data.avcc.sps->sizes[0],0);
printf("SPS:[%s]\n",sps);
char pps[200] = {0};
abcdk_bin2hex(pps,avcc->data.avcc.pps->pptrs[0],avcc->data.avcc.pps->sizes[0],0);
printf("PPS:[%s]\n",pps);
#if 0
printf("-----------------------------------stsz---------------------------------------\n");
printf("Size: %u\n",stsz->data.stsz.sample_size);
printf("Numbers: %u\n",stsz->data.stsz.numbers);
for (size_t i = 0; i < stsz->data.stsz.numbers; i++)
{
uint64_t dts = 0;
uint32_t dur = 0;
int32_t cts = 0;
abcdk_mp4_stts_tell(&stts->data.stts,i+1,&dts,&dur);
abcdk_mp4_ctts_tell(&ctts->data.ctts,i+1,&cts);
printf("Size[%lu]: %u, PTS: %lu(%lu+%d) DUR: %u, KEY: %s\n",
i+1,stsz->data.stsz.tables[i].size,dts+cts,dts,cts,dur,
(abcdk_mp4_stss_tell(&stss->data.stss,i+1)?"No":"Yes") );
}
printf("-----------------------------------stsz---------------------------------------\n");
printf("-----------------------------------stss---------------------------------------\n");
printf("Numbers: %u\n",stss->data.stss.numbers);
for (size_t i = 0; i < stss->data.stss.numbers; i++)
{
printf("KeyFrame[%lu]: %u\n",i+1,stss->data.stss.tables[i].sync);
}
printf("-----------------------------------stss---------------------------------------\n");
printf("-----------------------------------stts---------------------------------------\n");
printf("Numbers: %u\n",stts->data.stts.numbers);
for (size_t i = 0; i < stts->data.stts.numbers; i++)
{
printf("Count[%lu]: %u\n",i+1,stts->data.stts.tables[i].sample_count);
printf("Duration[%lu]: %u\n",i+1,stts->data.stts.tables[i].sample_duration);
}
printf("-----------------------------------stts---------------------------------------\n");
printf("-----------------------------------ctts---------------------------------------\n");
printf("Numbers: %u\n",ctts->data.ctts.numbers);
for (size_t i = 0; i < ctts->data.ctts.numbers; i++)
{
printf("Count[%lu]: %u\n",i+1,ctts->data.ctts.tables[i].sample_count);
printf("Offset[%lu]: %u\n",i+1,ctts->data.ctts.tables[i].composition_offset);
}
printf("-----------------------------------ctts---------------------------------------\n");
printf("-----------------------------------stco---------------------------------------\n");
printf("Numbers: %u\n",stco->data.stco.numbers);
for (size_t i = 0; i < stco->data.stco.numbers; i++)
{
printf("Offset[%lu]: %lu\n",i+1,stco->data.stco.tables[i].offset);
}
printf("-----------------------------------stco---------------------------------------\n");
printf("-----------------------------------stsc---------------------------------------\n");
printf("Numbers: %u\n",stsc->data.stsc.numbers);
for(size_t i= 0 ;i<stsc->data.stsc.numbers;i++)
{
printf("First_Chunk: %u\n",stsc->data.stsc.tables[i].first_chunk);
printf("PerChunk: %u\n",stsc->data.stsc.tables[i].samples_perchunk);
printf("ID: %u\n",stsc->data.stsc.tables[i].sample_desc_id);
}
printf("-----------------------------------stsc---------------------------------------\n");
#else
int fd2 = abcdk_open("/tmp/abcdk.h264",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
// abcdk_write(fd2,avcc->data.avcc.extradata->pptrs[0],avcc->data.avcc.extradata->sizes[0]);
char *buf= abcdk_heap_alloc(1024*1024*16);
abcdk_mp4_tag_t a;
a.u32 = ABCDK_MP4_ATOM_MKTAG('\0','\0','\0','\1');
for(size_t i = 1 ;i<=stsz->data.stsz.numbers;i++)
{
uint32_t chunk=0, offset=0, id=0;
abcdk_mp4_stsc_tell(&stsc->data.stsc,i,&chunk,&offset,&id);
printf("[%lu]={chunk=%u,offset=%u,id=%u}\n",i,chunk,offset,id);
uint32_t offset2=0, size = 0;
abcdk_mp4_stsz_tell(&stsz->data.stsz,offset,i,&offset2,&size);
printf("[%lu]={offset2=%u,size=%u}\n",i,offset2,size);
lseek(fd,stco->data.stco.tables[chunk-1].offset + offset2,SEEK_SET);
abcdk_mp4_read(fd,buf,size);
abcdk_write(fd2,&a.u32,4);
abcdk_write(fd2,avcc->data.avcc.sps->pptrs[0],avcc->data.avcc.sps->sizes[0]);
abcdk_write(fd2,&a.u32,4);
abcdk_write(fd2,avcc->data.avcc.pps->pptrs[0],avcc->data.avcc.pps->sizes[0]);
memcpy(buf,&a.u32,4);//替换长度
abcdk_write(fd2,buf,size);
}
abcdk_closep(&fd2);
abcdk_heap_free(buf);
#endif
abcdk_tree_free(&root);
}
#define ADTS_HEADER_SIZE 7
typedef struct _adtsctx
{
int write_adts;
int objecttype;
int sample_rate_index;
int channel_conf;
} adtsctx;
int aac_decode_extradata(adtsctx *adts, unsigned char *pbuf, int bufsize)
{
int aot, aotext, samfreindex;
int i, channelconfig;
unsigned char *p = pbuf;
if (!adts || !pbuf || bufsize < 2)
{
return -1;
}
aot = (p[0] >> 3) & 0x1f;
if (aot == 31)
{
aotext = (p[0]<<3 | (p[1]>>5)) & 0x3f;
aot = 32 + aotext;
samfreindex = (p[1] >> 1) & 0x0f;
if (samfreindex == 0x0f)
{
channelconfig = ((p[4] << 3) | (p[5] >> 5)) & 0x0f;
}
else
{
channelconfig = ((p[1] << 3) | (p[2] >> 5)) & 0x0f;
}
}
else
{
samfreindex = ((p[0] << 1) | p[1] >> 7) & 0x0f;
if (samfreindex == 0x0f)
{
channelconfig = (p[4] >> 3) & 0x0f;
}
else
{
channelconfig = (p[1] >> 3) & 0x0f;
}
}
#ifdef AOT_PROFILE_CTRL
if (aot < 2)
aot = 2;
#endif
adts->objecttype = aot-1;
adts->sample_rate_index = samfreindex;
adts->channel_conf = channelconfig;
adts->write_adts = 1;
return 0;
}
int aac_set_adts_head(adtsctx *acfg, unsigned char *buf, int size)
{
unsigned char byte;
if (size < ADTS_HEADER_SIZE)
return -1;
buf[0] = 0xff;
buf[1] = 0xf1;
byte = 0;
byte |= (acfg->objecttype & 0x03) << 6;
byte |= (acfg->sample_rate_index & 0x0f) << 2;
byte |= (acfg->channel_conf & 0x07) >> 2;
buf[2] = byte;
byte = 0;
byte |= (acfg->channel_conf & 0x07) << 6;
byte |= (ADTS_HEADER_SIZE + size) >> 11;
buf[3] = byte;
byte = 0;
byte |= (ADTS_HEADER_SIZE + size) >> 3;
buf[4] = byte;
byte = 0;
byte |= ((ADTS_HEADER_SIZE + size) & 0x7) << 5;
byte |= (0x7ff >> 6) & 0x1f;
buf[5] = byte;
byte = 0;
byte |= (0x7ff & 0x3f) << 2;
buf[6] = byte;
return 0;
}
void collect_mp4_sound(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_mp4_dump(stdout,root);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,2,1);
abcdk_tree_t *stsz_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSZ,1,1);
abcdk_tree_t *stss_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSS,1,1);
abcdk_tree_t *stts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STTS,1,1);
abcdk_tree_t *ctts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_CTTS,1,1);
abcdk_tree_t *stsc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSC,1,1);
abcdk_tree_t *stco_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STCO,1,1);
abcdk_tree_t *mp4a_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_MP4A,1,1);
abcdk_tree_t *esds_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_ESDS,1,1);
abcdk_mp4_atom_t *stsz = (abcdk_mp4_atom_t*)stsz_p->alloc->pptrs[0];
// abcdk_mp4_atom_t *stss = (abcdk_mp4_atom_t*)stss_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stts = (abcdk_mp4_atom_t*)stts_p->alloc->pptrs[0];
// abcdk_mp4_atom_t *ctts = (abcdk_mp4_atom_t*)ctts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stco = (abcdk_mp4_atom_t*)stco_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stsc = (abcdk_mp4_atom_t*)stsc_p->alloc->pptrs[0];
abcdk_mp4_atom_t *mp4a = (abcdk_mp4_atom_t*)mp4a_p->alloc->pptrs[0];
abcdk_mp4_atom_t *esds = (abcdk_mp4_atom_t*)esds_p->alloc->pptrs[0];
int fd2 = abcdk_open("/tmp/abcdk.acc",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
char *buf= abcdk_heap_alloc(1024*1024*16);
adtsctx adts={0};
aac_decode_extradata(&adts,esds->data.esds.dec_sp_info.extradata->pptrs[0],esds->data.esds.dec_sp_info.extradata->sizes[0]);
for(size_t i = 1 ;i<=stsz->data.stsz.numbers;i++)
{
uint32_t chunk=0, offset=0, id=0;
abcdk_mp4_stsc_tell(&stsc->data.stsc,i,&chunk,&offset,&id);
printf("[%lu]={chunk=%u,offset=%u,id=%u}\n",i,chunk,offset,id);
uint32_t offset2=0, size = 0;
abcdk_mp4_stsz_tell(&stsz->data.stsz,offset,i,&offset2,&size);
printf("[%lu]={offset2=%u,size=%u}\n",i,offset2,size);
lseek(fd,stco->data.stco.tables[chunk-1].offset + offset2,SEEK_SET);
abcdk_mp4_read(fd,buf,size);
char hdr[7]={0};
aac_set_adts_head(&adts,hdr,size);
abcdk_write(fd2,hdr,7);
abcdk_write(fd2,buf,size);
}
abcdk_closep(&fd2);
abcdk_heap_free(buf);
abcdk_tree_free(&root);
}
void test_mp4(abcdk_tree_t *args)
{
const char *name_p = abcdk_option_get(args,"--file",0,"");
#if 0
abcdk_allocator_t *t = abcdk_mmap2(name_p,0,0);
if(!t)
return;
abcdk_buffer_t *buf = abcdk_buffer_alloc(t);
if(!buf)
{
abcdk_allocator_unref(&t);
return;
}
buf->wsize = t->sizes[0];
while (1)
{
uint32_t size2 = 0;
uint64_t size = 0;
if (_mp4_read_u32(buf, &size2))
break;
uint32_t type = 0;
if (_mp4_read(buf, &type, sizeof(uint32_t)))
break;
for (int i = 0; i < 4; i++)
printf("%c", ABCDK_PTR2I8(&type, i));
printf("\n");
if (size2 == 0)
break;
else if (size2 == 1)
{
if (_mp4_read_u64(buf, &size))
break;
}
size = size2;
size_t hsize = (size2==1?16:8);
/*skip data*/
if(_mp4_skip_size(buf,size-hsize))
break;
}
abcdk_buffer_free(&buf);
#else
int fd = abcdk_open(name_p,0,0,0);
if(fd<0)
return;
#if 0
abcdk_tree_t *root = abcdk_mp4_read_probe(fd,0,-1UL, NULL);
abcdk_tree_t *ftyp = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_FTYP,1,0);
abcdk_tree_t *moov = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MOOV,1,0);
abcdk_tree_iterator_t it = {0,mp4_dump_cb,(void*)(int64_t)fd};
abcdk_tree_scan(root,&it);
printf("\natoms:%d\n",atoms);
abcdk_tree_free(&root);
#else
// show_mp4_info(fd);
//collect_mp4_video(fd);
collect_mp4_sound(fd);
//collect_fmp4_video(fd);
#endif
abcdk_closep(&fd);
#endif
}
void test_dirent(abcdk_tree_t *args)
{
const char *path_p = abcdk_option_get(args,"--path",0,"");
abcdk_tree_t *t = abcdk_tree_alloc3(1);
abcdk_dirent_open(t,path_p);
for(;;)
{
char file[PATH_MAX]={0};
int chk = abcdk_dirent_read(t,file);
if(chk != 0)
break;
printf("%s\n",file);
abcdk_dirent_open(t,file);
}
}
void test_netlink(abcdk_tree_t *args)
{
const char *ap = abcdk_option_get(args,"--i",0,"");
int flag = 0;
int chk = abcdk_netlink_fetch(ap,&flag);
if (chk == 0)
{
printf("%s: UP=%s,BCAST=%s,MCAST=%s,LOOP=%s,P2P=%s,RUN=%s\n", ap,
(flag & IFF_UP) ? "Yes" : "No",
(flag & IFF_BROADCAST) ? "Yes" : "No",
(flag & IFF_MULTICAST) ? "Yes" : "No",
(flag & IFF_LOOPBACK) ? "Yes" : "No",
(flag & IFF_POINTOPOINT) ? "Yes" : "No",
(flag & IFF_RUNNING) ? "Yes" : "No");
}
else
printf("%s: %s\n", ap, strerror(errno));
}
#ifdef HAVE_LIBNM
void request_rescan_cb (GObject *object, GAsyncResult *result, gpointer user_data)
{
NMClient *cli = (NMClient *) user_data;
GError *error = NULL;
nm_device_wifi_request_scan_finish (NM_DEVICE_WIFI (object), result, &error);
// if (error) {
// g_string_printf (nmc->return_text, _("Error: %s."), error->message);
// nmc->return_value = NMC_RESULT_ERROR_UNKNOWN;
// g_error_free (error);
// }
}
static int
compare_devices (const void *a, const void *b)
{
NMDevice *da = *(NMDevice **)a;
NMDevice *db = *(NMDevice **)b;
int cmp;
/* Sort by later device states first */
cmp = nm_device_get_state (db) - nm_device_get_state (da);
if (cmp != 0)
return cmp;
cmp = g_strcmp0 (nm_device_get_type_description (da),
nm_device_get_type_description (db));
if (cmp != 0)
return cmp;
return g_strcmp0 (nm_device_get_iface (da),
nm_device_get_iface (db));
}
static NMDevice **
get_devices_sorted (NMClient *client)
{
const GPtrArray *devs;
NMDevice **sorted;
devs = nm_client_get_devices (client);
sorted = g_new (NMDevice *, devs->len + 1);
memcpy (sorted, devs->pdata, devs->len * sizeof (NMDevice *));
sorted[devs->len] = NULL;
qsort (sorted, devs->len, sizeof (NMDevice *), compare_devices);
return sorted;
}
#endif //HAVE_LIBNM
void
iw_essid_escape(char * dest,
const char * src,
const int slen)
{
const unsigned char * s = (const unsigned char *) src;
const unsigned char * e = s + slen;
char * d = dest;
/* Look every character of the string */
while(s < e)
{
int isescape;
/* Escape the escape to avoid ambiguity.
* We do a fast path test for performance reason. Compiler will
* optimise all that ;-) */
if(*s == '\\')
{
/* Check if we would confuse it with an escape sequence */
if((e-s) > 4 && (s[1] == 'x')
&& (isxdigit(s[2])) && (isxdigit(s[3])))
{
isescape = 1;
}
else
isescape = 0;
}
else
isescape = 0;
/* Is it a non-ASCII character ??? */
if(isescape || !isascii(*s) || iscntrl(*s))
{
/* Escape */
sprintf(d, "\\x%02X", *s);
d += 4;
}
else
{
/* Plain ASCII, just copy */
*d = *s;
d++;
}
s++;
}
/* NUL terminate destination */
*d = '\0';
}
void test_iwscan(abcdk_tree_t *args)
{
#if 0
abcdk_allocator_t * k = abcdk_allocator_alloc(NULL,1,0);
abcdk_allocator_t * p = abcdk_allocator_alloc(NULL,1,0);
k->pptrs[0] = "GH";
k->sizes[0] = 2;
p->pptrs[0] = ABCDK_ANSI_COLOR_RED;
//
int sock = socket(AF_INET, SOCK_DGRAM, 0);
struct iw_scan_req scan_req = {0};
// scan_req.scan_type = IW_SCAN_TYPE_ACTIVE;
//scan_req.flags = ;
struct iwreq req = {0};
strncpy(req.ifr_ifrn.ifrn_name, "wlx70f11c3c3500", IFNAMSIZ);
//req.u.data.pointer = &scan_req;
//req.u.data.length = sizeof(struct iw_scan_req);
//req.u.data.flags = IW_SCAN_DEFAULT;
// int sock = socket(AF_INET, SOCK_DGRAM, 0);
// int chk = abcdk_socket_ioctl(SIOCSIWSCAN,&req);
int chk = ioctl(sock, SIOCSIWSCAN,&req);
abcdk_allocator_t * scan_rsp = abcdk_allocator_alloc2(100000);
/* Forever */
while (1)
{
struct iwreq rsp = {0};
strncpy(rsp.ifr_ifrn.ifrn_name, "wlx70f11c3c3500", IFNAMSIZ);
rsp.u.data.pointer = scan_rsp->pptrs[0];
rsp.u.data.length = scan_rsp->sizes[0];
rsp.u.data.flags = 0;
//chk = abcdk_socket_ioctl(SIOCGIWSCAN,&rsp);
chk = ioctl(sock,SIOCGIWSCAN,&rsp);
if (chk !=0)
{
if(errno == EAGAIN)
continue;
else
goto END;
}
abcdk_hexdump_option_t opt = {0};
if(rsp.u.data.length)
abcdk_hexdump(stderr,rsp.u.data.pointer,rsp.u.data.length,0,&opt);
void *p = rsp.u.data.pointer;
for (;p - rsp.u.data.pointer < rsp.u.data.length;)
{
struct iw_event *event = ABCDK_PTR2PTR(struct iw_event, p, 0);
printf("cmd = %04X,len = %hu\n", event->cmd, event->len);
switch (event->cmd)
{
case SIOCGIWAP:
{
struct ether_addr *eth = ABCDK_PTR2PTR(struct ether_addr, event->u.addr.sa_data, 0);
printf("address: %02X:%02X:%02X:%02X:%02X:%02X\n",
eth->ether_addr_octet[0], eth->ether_addr_octet[1],
eth->ether_addr_octet[2], eth->ether_addr_octet[3],
eth->ether_addr_octet[4], eth->ether_addr_octet[5]);
}
break;
case SIOCGIWNWID:
{
if (event->u.nwid.disabled)
printf("\tNWID: off/any\n");
else
printf(" NWID: %X\n", event->u.nwid.value);
}
break;
case SIOCGIWFREQ:
{
printf("\tchannel: %f\n",((double) event->u.freq.m) * pow(10,event->u.freq.e));
}
break;
case SIOCGIWESSID:
{
break;
event->u.essid.pointer = p+4+sizeof(struct iw_point);
event->u.essid.length = event->len-4-sizeof(struct iw_point);
char essid[4 * IW_ESSID_MAX_SIZE + 1];
memset(essid, '\0', sizeof(essid));
if ((event->u.essid.pointer) && (event->u.essid.length))
iw_essid_escape(essid,event->u.essid.pointer, event->u.essid.length);
if (event->u.essid.flags)
{
if ((event->u.essid.flags & IW_ENCODE_INDEX) > 1)
printf("\tESSID: %s [%d]\n",essid,(event->u.essid.flags & IW_ENCODE_INDEX));
else
printf("\tESSID: %s\n",essid);
}
else
{
printf("\tESSID: off/any/hidden\n");
}
}
break;
default:
break;
}
p = p+event->len;
}
goto END;
}
END:
abcdk_allocator_unref(&scan_rsp);
abcdk_allocator_unref(&k);
abcdk_allocator_unref(&p);
abcdk_closep(&sock);
#else
#ifdef HAVE_LIBNM
GError *err = NULL;
NMClient *cli = nm_client_new(NULL,&err);
// NMDevice *dev = nm_client_get_device_by_iface(cli,"wlx70f11c3c3500");
// gboolean chk = nm_device_wifi_request_scan(NM_DEVICE_WIFI(dev),NULL,&err);
// nm_device_wifi_request_scan_async (NM_DEVICE_WIFI (dev),
// NULL, request_rescan_cb, cli);
// //nm_device_wifi_request_scan_finish(&device,&cancellable,&err);
// g_error_free(err);
NMDevice **devices = get_devices_sorted (cli);
for (int i = 0; devices[i]; i++)
{
NMDevice *dev = devices[i];
if (!NM_IS_DEVICE_WIFI (dev))
continue;
NMAccessPoint * ap = nm_device_wifi_get_active_access_point(NM_DEVICE_WIFI (dev));
const char * ssid = ap? nm_access_point_get_bssid (ap):"";
printf("ssid: %s\n",ssid);
}
#endif //HAVE_LIBNM
#endif
}
void test_hexdump(abcdk_tree_t *args)
{
const char *file_p = abcdk_option_get(args,"--file",0,"");
abcdk_allocator_t * m = abcdk_mmap2(file_p,0,0);
abcdk_hexdump_option_t opt = {0};
if(abcdk_option_exist(args,"--show-addr"))
opt.flag |= ABCDK_HEXDEMP_SHOW_ADDR;
if(abcdk_option_exist(args,"--show-char"))
opt.flag |= ABCDK_HEXDEMP_SHOW_CHAR;
opt.width = abcdk_option_get_int(args,"--width",0,16);
opt.keyword = abcdk_allocator_alloc(NULL,4,0);
opt.palette = abcdk_allocator_alloc(NULL,3,0);
opt.keyword->pptrs[0] = "mvhd";
opt.keyword->sizes[0] = 4;
opt.keyword->pptrs[1] = "ftyp";
opt.keyword->sizes[1] = 4;
opt.keyword->pptrs[2] = "moov";
opt.keyword->sizes[2] = 4;
opt.keyword->pptrs[3] = "mdat";
opt.keyword->sizes[3] = 4;
opt.palette->pptrs[0] = ABCDK_ANSI_COLOR_RED;
opt.palette->pptrs[1] = ABCDK_ANSI_COLOR_GREEN;
opt.palette->pptrs[2] = ABCDK_ANSI_COLOR_BLUE;
if(m)
{
//ssize_t w = abcdk_hexdump(stdout,m->pptrs[0],m->sizes[0],0,&opt);
ssize_t w = abcdk_hexdump(stdout,m->pptrs[0],1000,0,&opt);
fprintf(stderr,"w=%ld",w);
}
abcdk_allocator_unref(&m);
abcdk_allocator_unref(&opt.keyword);
abcdk_allocator_unref(&opt.palette);
}
void test_video(abcdk_tree_t *args)
{
#ifdef HAVE_FFMPEG
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int chk;
const char *src_file_p = abcdk_option_get(args,"--src",0,"");
const char *dst_file_p = abcdk_option_get(args,"--dst",0,"");
AVDictionary *dict = NULL;
#if 1
av_dict_set(&dict,"framerate","120",0);
//av_dict_set(&dict,"video_size","1920x1080",0);
av_dict_set(&dict,"video_size","640x480",0);
//av_dict_set(&dict,"input_format","mjpeg",0);
av_dict_set(&dict,"input_format","yuyv422",0);
#endif
abcdk_video_t *src = abcdk_video_open_capture(NULL,src_file_p,-1UL,1,dict);
av_dict_free(&dict);
//abcdk_avformat_show_options(src->ctx);
//int dst = abcdk_open(dst_file_p,1,0,1);
abcdk_video_t *dst = abcdk_video_open_writer(NULL,dst_file_p,NULL);
int stream_index = abcdk_video_find_stream(src,1);
double fps = abcdk_video_get_fps(src,stream_index);
int width = abcdk_video_get_width(src,stream_index);
int height = abcdk_video_get_height(src,stream_index);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58,35,100)
enum AVCodecID id = src->ctx->streams[stream_index]->codec->codec_id;
#else
enum AVCodecID id = src->ctx->streams[stream_index]->codecpar->codec_id;
#endif
//int stream_index2 = abcdk_video_add_stream(dst,fps,width,height,id,NULL,0,0);
int stream_index2 = abcdk_video_add_stream(dst,fps,width,height,AV_CODEC_ID_H264,NULL,0,0);
// int stream_index2 = abcdk_video_add_stream(dst, fps, width, height, id,
// src->ctx->streams[stream_index]->codec->extradata,
// src->ctx->streams[stream_index]->codec->extradata_size,
// 1);
uint64_t c = 0;
uint64_t s = 0;
abcdk_video_write_header(dst,0,1);
printf("LONG: %f\n",abcdk_video_get_duration(src,stream_index));
printf("FPS: %f\n",abcdk_video_get_fps(src,stream_index));
AVPacket pkt;
av_init_packet(&pkt);
AVFrame *fae = av_frame_alloc();
AVFrame *fae2 = av_frame_alloc();
fae2->format = dst->codec_ctx[0]->pix_fmt;
fae2->height = height;
fae2->width = width;
av_frame_get_buffer(fae2,1);
struct SwsContext *sws = NULL;
for(int i =0;i<1000;i++)
{
// chk = abcdk_video_read(src,&pkt,stream_index,0,1);
chk = abcdk_video_read2(src,fae,stream_index,0);
if(chk < 0)
break;
printf("DTS: %f ,PTS: %f\n",
// abcdk_video_ts2sec(src, pkt.stream_index, pkt.dts),
// abcdk_video_ts2sec(src, pkt.stream_index, pkt.pts));
abcdk_video_ts2sec(src, chk, fae->pkt_dts),
abcdk_video_ts2sec(src, chk, fae->pkt_pts));
// abcdk_write(dst,pkt.data,pkt.size);
// chk = abcdk_video_write3(dst,stream_index2,pkt.data,pkt.size);
if(!sws)
sws = abcdk_sws_alloc2(fae, fae2, 0);
abcdk_sws_scale(sws,fae,fae2);
chk = abcdk_video_write2(dst,stream_index2,fae2);
if(chk < 0)
break;
s = abcdk_clock(c, &c) / 1000;
if (s < (1000 / fps))
usleep(((1000 / fps) - s) * 1000);
}
av_frame_free(&fae);
av_packet_unref(&pkt);
abcdk_video_write_trailer(dst);
// abcdk_closep(&dst);
abcdk_video_close(dst);
abcdk_video_close(src);
#pragma GCC diagnostic pop
#endif //
}
void test_com(abcdk_tree_t *args)
{
const char *port = abcdk_option_get(args,"--port",0,"");
int fd = open(port,O_RDWR|O_NOCTTY);
// assert(isatty(fd)==0);
#if 0
struct termios opt = {0};
int chk = tcgetattr(fd,&opt);
// tcflush(fd, TCIOFLUSH);
cfsetispeed(&opt,B9600);
cfsetospeed(&opt,B9600);
opt.c_cflag |=(CLOCAL|CREAD);
opt.c_cflag &= ~PARENB;
opt.c_cflag &= ~CSTOPB;
opt.c_cflag &= ~CSIZE;
opt.c_cflag |= ~CS8;
opt.c_cc[VTIME] = 0;
opt.c_cc[VMIN] = 0;
tcflush(fd,TCIOFLUSH);
//cfsetispeed(&opt,B4800);
assert(tcsetattr(fd,TCSANOW,&opt)==0);
struct serial_rs485 conf = {0};
conf.flags |= SER_RS485_ENABLED;
// conf.flags |= SER_RS485_RX_DURING_TX;
// assert(ioctl(fd,TIOCSRS485,&conf)==0);
#else
assert(abcdk_tcattr_serial(fd, 9600, 8, 0, 1,NULL)== 0);
#endif
uint64_t s = 0,s1 = 0,s2 = 0;
char buf1[18]={0};
char buf2[18]={0};
for(int i = 0;i<999999999;i++)
{
int chk = abcdk_poll(fd,0x01,-1);
assert(chk>0);
abcdk_read(fd,buf1,17);
s1 = abcdk_clock(s,&s);
s2 += s1;
if(memcmp(buf1,buf2,17)!=0)
s2 = 0;
else if(s2 >= 1000000)
s2 = 0;
if(s2 ==0 )
{
memcpy(buf2,buf1,17);
char buf3[35]={0};
abcdk_bin2hex(buf3,buf1,17,0);
printf("[%d]: '%s' '%s'\n",i,buf1,buf3);
}
}
abcdk_closep(&fd);
}
void test_mpi(abcdk_tree_t *args)
{
#ifdef HAVE_MPI
// int argc = 1;
// char *argv[1] = {
// abcdk_option_get(args,"--",0,""),
// };
int rank,size;
// MPI_Init(&argc, &argv);
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello World from thread %d of %d\n", rank, size);
MPI_Finalize();
#endif
}
void test_lz4(abcdk_tree_t *args)
{
#ifdef HAVE_LZ4
const char *src = abcdk_option_get(args,"--src",0,"");
const char *dst = abcdk_option_get(args,"--dst",0,"");
abcdk_allocator_t *s = abcdk_mmap2(src,0,0);
size_t dsize = abcdk_endian_b_to_h32(ABCDK_PTR2U32(s->pptrs[0],0));
abcdk_allocator_t *d = abcdk_allocator_alloc2(dsize);
//LZ4_decompress_fast(s->pptrs[0]+4,d->pptrs[0],dsize);
int m = abcdk_lz4_dec_fast(d->pptrs[0],dsize,s->pptrs[0]+4);
abcdk_allocator_t *q = abcdk_allocator_alloc2(2000);
int n = abcdk_lz4_enc_default(q->pptrs[0],q->sizes[0],d->pptrs[0],dsize);
//assert(memcmp(q->pptrs[0],s->pptrs[0]+4,s->sizes[0]-4)==0);
abcdk_allocator_t *p = abcdk_allocator_alloc2(dsize);
int m2 = abcdk_lz4_dec_fast(p->pptrs[0],dsize,q->pptrs[0]);
assert(memcmp(p->pptrs[0],d->pptrs[0],d->sizes[0])==0);
abcdk_allocator_unref(&q);
abcdk_allocator_unref(&p);
int fd = abcdk_open(dst,1,0,1);
ftruncate(fd,0);
abcdk_write(fd,d->pptrs[0],dsize);
abcdk_closep(&fd);
abcdk_allocator_unref(&s);
abcdk_allocator_unref(&d);
#endif
}
void test_archive(abcdk_tree_t *args)
{
#ifdef HAVE_ARCHIVE
const char *src = abcdk_option_get(args,"--src",0,"");
const char *dst = abcdk_option_get(args,"--dst",0,"");
struct archive *a = archive_write_new();
struct archive_entry *entry = archive_entry_new();
// archive_write_add_filter_bzip2(a);
// archive_write_set_format_zip(a);
// archive_write_add_filter_gzip(a);
// archive_write_set_format_pax_restricted(a); // Note 1
archive_write_set_format_gnutar(a);
archive_write_open_filename(a, dst);
int fd = abcdk_open(src,0,0,0);
struct stat st = {0};
fstat(fd,&st);
archive_entry_copy_pathname(entry,src+10);
#if 0
archive_entry_set_size(entry, st.st_size); // Note 3
archive_entry_set_filetype(entry, AE_IFREG);
archive_entry_set_perm(entry, 0644);
#else
archive_entry_copy_stat(entry,&st);
#endif
archive_write_header(a, entry);
char buf[500];
for(;;)
{
ssize_t r = abcdk_read(fd,buf,500);
if(r<=0)
break;
archive_write_data(a,buf,r);
}
archive_write_finish_entry(a);
archive_entry_free(entry);
abcdk_closep(&fd);
archive_write_close(a);
archive_write_free(a);
#endif
}
void test_modbus(abcdk_tree_t *args)
{
#ifdef HAVE_MODBUS
const char *port = abcdk_option_get(args,"--port",0,"");
modbus_t *m = modbus_new_rtu(port, 9600, 'N', 8, 1);
modbus_set_debug(m, 0);
modbus_set_slave(m,1);
modbus_connect(m);
struct timeval t;
t.tv_sec = 10;
t.tv_usec = 0;
modbus_set_response_timeout(m, &t);
//int chk = modbus_rtu_set_serial_mode(m,MODBUS_RTU_RS232);
int f2 = 0;
while(1)
{
uint16_t buf[20]={0};
int regs = modbus_read_registers(m,3,2,buf);
int f = ABCDK_PTR2OBJ(float, buf, 0) * 1000;
if (f != f2)
{
printf("%f\n", (float)f / 1000);
f2 = f;
}
usleep(1000);
}
modbus_close(m);
modbus_free(m);
#endif
}
#ifdef HAVE_LIBUSB
static void print_devs(libusb_device **devs)
{
libusb_device *dev;
int i = 0, j = 0;
uint8_t path[8];
while ((dev = devs[i++]) != NULL) {
struct libusb_device_descriptor desc;
int r = libusb_get_device_descriptor(dev, &desc);
if (r < 0) {
fprintf(stderr, "failed to get device descriptor");
return;
}
printf("%04x:%04x (bus %d, device %d)",
desc.idVendor, desc.idProduct,
libusb_get_bus_number(dev), libusb_get_device_address(dev));
r = libusb_get_port_numbers(dev, path, sizeof(path));
if (r > 0) {
printf(" path: %d", path[0]);
for (j = 1; j < r; j++)
printf(".%d", path[j]);
}
printf("\n");
}
}
#endif
int test_libusb(abcdk_tree_t *args)
{
#ifdef HAVE_LIBUSB
libusb_device **devs;
int r;
ssize_t cnt;
r = libusb_init(NULL);
if (r < 0)
return r;
cnt = libusb_get_device_list(NULL, &devs);
if (cnt < 0){
libusb_exit(NULL);
return (int) cnt;
}
print_devs(devs);
libusb_free_device_list(devs, 1);
libusb_exit(NULL);
#endif
return 0;
}
#ifdef HAVE_OPENSSL
void test_openssl_server(abcdk_tree_t *args)
{
#if OPENSSL_VERSION_NUMBER <= 0x100020bfL
const SSL_METHOD *method = TLSv1_2_server_method();
#else
const SSL_METHOD *method = TLS_server_method();
#endif
SSL_CTX * ctx = SSL_CTX_new(method);
int chk;
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
/*如果使用证书路径加载证书,则需要使用工具生成证收的hash文件名。c_rehash <CApath> */
chk = SSL_CTX_load_verify_locations(ctx, NULL, capath);
assert(chk == 1);
X509_VERIFY_PARAM *param = SSL_CTX_get0_param(ctx);
//X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
}
chk = abcdk_openssl_ssl_ctx_load_crt(ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
SSL* s = abcdk_openssl_ssl_alloc(ctx);
SSL_set_verify(s,SSL_VERIFY_PEER,NULL);
abcdk_sockaddr_t addr = {0};
//abcdk_sockaddr_from_string(&addr,"0.0.0.0:12345",0);
addr.family = ABCDK_UNIX;
strcpy(addr.addr_un.sun_path,"/tmp/abcdk.txt2");
int l = abcdk_socket(addr.family,0);
int flag = 1;
abcdk_sockopt_option_int(l, SOL_SOCKET, SO_REUSEPORT, &flag, 2);
abcdk_sockopt_option_int(l, SOL_SOCKET, SO_REUSEADDR, &flag, 2);
unlink(addr.addr_un.sun_path);
assert(abcdk_bind(l,&addr)==0);
assert(listen(l, SOMAXCONN)==0);
abcdk_sockaddr_t addr2 = {0};
int c = abcdk_accept(l,&addr2);
assert(abcdk_openssl_ssl_handshake(c,s,1,10000)==0);
int chk2 = SSL_get_verify_result(s);
printf("chk2 = %d\n",chk2);
//assert(X509_V_OK == chk2);
char buf[100]={0};
SSL_read(s,buf,5);
printf("{%s}\n",buf);
SSL_write(s,"abcdk",5);
abcdk_closep(&c);
abcdk_closep(&l);
abcdk_openssl_ssl_free(&s);
SSL_CTX_free(ctx);
}
void test_openssl_client(abcdk_tree_t *args)
{
#if OPENSSL_VERSION_NUMBER <= 0x100020bfL
const SSL_METHOD *method = TLSv1_2_client_method();
#else
const SSL_METHOD *method = TLS_client_method();
#endif
int chk ;
SSL_CTX * ctx = SSL_CTX_new(method);
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
chk = SSL_CTX_load_verify_locations(ctx, NULL, capath);
assert(chk == 1);
X509_VERIFY_PARAM *param = SSL_CTX_get0_param(ctx);
//X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
}
chk = abcdk_openssl_ssl_ctx_load_crt(ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
assert(chk == 0);
SSL* s = abcdk_openssl_ssl_alloc(ctx);
// void *p = SSL_get_app_data(s);
// printf("p = %p\n",p);
SSL_set_verify(s,SSL_VERIFY_PEER,NULL);
abcdk_sockaddr_t addr = {0};
// abcdk_sockaddr_from_string(&addr,
// abcdk_option_get(args, "--server-addr", 0, "localhost:12345"),
// 1);
addr.family = ABCDK_UNIX;
strcpy(addr.addr_un.sun_path,"/tmp/abcdk.txt2");
int c = abcdk_socket(addr.family,0);
assert(abcdk_connect(c,&addr,10000)==0);
assert(abcdk_openssl_ssl_handshake(c,s,0,10000)==0);
int chk2 = SSL_get_verify_result(s);
printf("chk2 = %d\n",chk2);
//assert(X509_V_OK == chk2);
SSL_write(s,"abcdk",5);
char buf[100]={0};
SSL_read(s,buf,100);
printf("{%s}\n",buf);
abcdk_closep(&c);
abcdk_openssl_ssl_free(&s);
SSL_CTX_free(ctx);
}
#endif
int test_openssl(abcdk_tree_t *args)
{
int sub_func = abcdk_option_get_int(args, "--sub-func", 0, 0);
#ifdef HAVE_OPENSSL
if (sub_func == 1)
test_openssl_server(args);
else if (sub_func == 2)
test_openssl_client(args);
#endif
return 0;
}
#ifdef HAVE_MQTT
void my_message_callback(struct mosquitto *mosq, void *userdata, const struct mosquitto_message *message)
{
if (message->payloadlen)
{
printf("%s %s\n", message->topic, (char*)message->payload);
}
else
{
printf("%s (null)\n", message->topic);
}
fflush(stdout);
}
void my_connect_callback(struct mosquitto *mosq, void *userdata, int result)
{
int i;
if (!result)
{
/* Subscribe to broker information topics on successful connect. */
//mosquitto_subscribe(mosq, NULL, "$SYS/#", 2);
mosquitto_subscribe(mosq, NULL, "hello", 2);
}
else
{
fprintf(stderr, "Connect failed\n");
}
}
void my_subscribe_callback(struct mosquitto *mosq, void *userdata, int mid, int qos_count, const int *granted_qos)
{
int i;
printf("Subscribed (mid: %d): %d", mid, granted_qos[0]);
for (i = 1; i < qos_count; i++)
{
printf(", %d", granted_qos[i]);
}
printf("\n");
}
void my_log_callback(struct mosquitto *mosq, void *userdata, int level, const char *str)
{
/* Pring all log messages regardless of level. */
printf("%s\n", str);
}
#endif
int test_mqtt(abcdk_tree_t *args)
{
#ifdef HAVE_MQTT
int i;
char *host = "localhost";
int port = 1883;
int keepalive = 60;
bool clean_session = true;
struct mosquitto *mosq = NULL;
mosquitto_lib_init();
mosq = mosquitto_new(NULL, clean_session, NULL);
if (!mosq)
{
fprintf(stderr, "Error: Out of memory.\n");
return 1;
}
mosquitto_log_callback_set(mosq, my_log_callback);
mosquitto_connect_callback_set(mosq, my_connect_callback);
mosquitto_message_callback_set(mosq, my_message_callback);
mosquitto_subscribe_callback_set(mosq, my_subscribe_callback);
if (mosquitto_connect(mosq, host, port, keepalive))
{
fprintf(stderr, "Unable to connect.\n");
return 1;
}
mosquitto_loop_forever(mosq, -1, 1);
mosquitto_destroy(mosq);
mosquitto_lib_cleanup();
#endif
return 0;
}
void test_http(abcdk_tree_t *args)
{
int s = abcdk_socket(ABCDK_IPV4,0);
abcdk_sockaddr_t a;
a.family = ABCDK_IPV4;
abcdk_sockaddr_from_string(&a,"0.0.0.0:12345",0);
abcdk_bind(s,&a);
listen(s,10);
int c = abcdk_accept(s,NULL);
char buf[10]={0};
printf("--->>>\r\n");
while(read(c,buf,1)>0)
{
printf("%s",buf);
}
printf("<<<---\r\n");
abcdk_closep(&c);
abcdk_closep(&s);
}
void test_redis(abcdk_tree_t *args)
{
#ifdef __HIREDIS_H
const char *server = abcdk_option_get(args, "--server", 0, "127.0.0.1");
int port = abcdk_option_get_int(args, "--port", 0, 6379);
redisContext *c = abcdk_redis_connect(server, port, 20);
if (!c)
return;
//printf("%s\n", c->errstr);
int chk = abcdk_redis_auth(c,"12345678");
assert(chk==0);
char buf[128]={0};
abcdk_redis_get_auth(c,buf);
printf("{%s}\n",buf);
chk = abcdk_redis_set_auth(c,"12345678");
assert(chk==0);
chk = abcdk_redis_auth(c,"12345678");
assert(chk==0);
char buf2[128]={0};
abcdk_redis_get_auth(c,buf2);
printf("{%s}\n",buf2);
redisFree(c);
#endif //
}
void test_cert_verify(abcdk_tree_t *args)
{
#ifdef HAVE_OPENSSL
const char *user = abcdk_option_get(args, "--user-crt", 0, "");
//SSLeay_add_all_algorithms();
X509 *cert = abcdk_openssl_load_crt(user,NULL);
//PEM_read_X509_CRL()
X509_STORE *store = X509_STORE_new();
for(int i = 0;i<100;i++)
{
const char *ca = abcdk_option_get(args,"--ca-crt",i,NULL);
if(!ca)
break;
abcdk_openssl_load_crt2store(store,ca,NULL);
}
for(int i = 0;i<100;i++)
{
const char *ca = abcdk_option_get(args,"--ca-crl",i,NULL);
if(!ca)
break;
abcdk_openssl_load_crl2store(store,ca,NULL);
}
X509_STORE_CTX *store_ctx = abcdk_openssl_verify_crt_prepare(store,cert);
X509_VERIFY_PARAM *param = X509_STORE_CTX_get0_param(store_ctx);
// X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
/*
* X509_V_FLAG_CRL_CHECK 只验证叶证书是否被吊销,并且只要求叶证书的父级证书存在吊销列表即可。
* X509_V_FLAG_CRL_CHECK_ALL 验证证书链,并且要求所有父级证书(根证书除外)的吊销列表都存在。
*
* X509_V_FLAG_CRL_CHECK_ALL 单独启用无效,至少要配合X509_V_FLAG_CRL_CHECK启用。
*/
X509_VERIFY_PARAM_set_flags(param,X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
// X509_VERIFY_PARAM_set_flags(param,X509_V_FLAG_CRL_CHECK);
int chk = X509_verify_cert(store_ctx);
assert(chk == 1);
//X509_VERIFY_PARAM_free(param);
X509_free(cert);
X509_STORE_free(store);
X509_STORE_CTX_free(store_ctx);
#endif
}
void test_json(abcdk_tree_t *args)
{
#ifdef _json_h_
const char *src = abcdk_option_get(args,"--src",0,NULL);
json_object *src_obj = json_object_from_file(src);
abcdk_json_readable(stdout,1,0,src_obj);
abcdk_json_unref(&src_obj);
#endif //_json_h_
}
void test_refer_count(abcdk_tree_t *args)
{
int user = abcdk_option_get_int(args,"--user",0,10);
abcdk_allocator_t * p= abcdk_allocator_alloc2(100);
#pragma omp parallel for num_threads(user)
for (int i = 0; i < 100000; i++)
{
abcdk_allocator_t *q = abcdk_allocator_refer(p);
usleep(10*1000);
abcdk_allocator_unref(&q);
}
abcdk_allocator_unref(&p);
}
typedef struct _one_node
{
int id;
abcdk_comm_message_t *in_buffer;
abcdk_comm_message_t *out_buffer;
abcdk_comm_queue_t *out_queue;
abcdk_comm_node_t *node;
abcdk_comm_waiter_t *rsp;
}one_node_t;
int smb_protocol(abcdk_comm_node_t *node, abcdk_comm_message_t *msg)
{
size_t off = abcdk_comm_message_offset(msg);
if (off < 4)
return 0;
size_t len = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg), 0));
if (len != abcdk_comm_message_size(msg))
{
abcdk_comm_message_realloc(msg, len);
return 0;
}
else if (len != abcdk_comm_message_offset(msg))
{
return 0;
}
return 1;
}
void _output_event(one_node_t *one)
{
int chk;
NEXT_MSG:
if (!one->out_buffer)
{
one->out_buffer = abcdk_comm_queue_pop(one->out_queue);
if (!one->out_buffer)
return;
}
chk = abcdk_comm_message_send(one->node, one->out_buffer);
if (chk < 0)
{
abcdk_comm_set_timeout(one->node, 1);
return;
}
else if (chk == 0)
{
abcdk_comm_write_watch(one->node);
return;
}
/*释放消息缓存,并继续发送。*/
abcdk_comm_message_unref(&one->out_buffer);
goto NEXT_MSG;
}
void test_comm_message_cb(abcdk_comm_node_t *node, uint32_t event)
{
one_node_t *one = (one_node_t *)abcdk_comm_get_userdata(node);
switch (event)
{
case ABCDK_COMM_EVENT_ACCEPT:
{
assert(one == NULL);
one = (one_node_t*)abcdk_heap_alloc(sizeof(one_node_t));
one->out_queue = abcdk_comm_queue_alloc();
one->node = abcdk_comm_node_refer(node);
abcdk_comm_set_userdata(node,one);
abcdk_comm_read_watch(node);
}
break;
case ABCDK_COMM_EVENT_INPUT:
{
if(!one->in_buffer)
{
one->in_buffer = abcdk_comm_message_alloc(4);
abcdk_comm_message_protocol_set(one->in_buffer,smb_protocol);
}
int chk = abcdk_comm_message_recv(node,one->in_buffer);
if(chk < 0)
{
abcdk_comm_set_timeout(node,1);
}
else if(chk == 0)
{
abcdk_comm_read_watch(node);
}
else
{
abcdk_comm_message_t *msg_copy = abcdk_comm_message_refer(one->in_buffer);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_read_watch(node);
// usleep(rand()%10000+1000);
abcdk_comm_message_reset(msg_copy);
abcdk_comm_queue_push(one->out_queue,msg_copy);
abcdk_comm_write_watch(one->node);
}
}
break;
case ABCDK_COMM_EVENT_OUTPUT:
_output_event(one);
break;
case ABCDK_COMM_EVENT_CLOSE:
default:
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_get_sockname(node, &sockname);
abcdk_comm_get_peername(node, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
printf("Socket: %s -> %s Disconnected.\n", sockname_str, peername_str);
if(one)
{
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_queue_free(&one->out_queue);
abcdk_comm_node_unref(&one->node);
abcdk_heap_free(one);
}
}
break;
}
}
void *test_send_msg(void *args)
{
one_node_t *one = (one_node_t *)args;
for (int i = 0; i < 1000; i++)
{
// usleep(10);
abcdk_comm_message_t *msg = abcdk_comm_message_alloc(128);
uint64_t mid = abcdk_time_clock2kind_with(0, 6);
abcdk_comm_waiter_request2(one->rsp,&mid);
ABCDK_PTR2U32(abcdk_comm_message_data(msg), 0) = abcdk_endian_h_to_b32(128);
ABCDK_PTR2U64(abcdk_comm_message_data(msg), 4) = abcdk_endian_h_to_b64(mid);
ABCDK_PTR2U32(abcdk_comm_message_data(msg), 12) = abcdk_endian_h_to_b32(i+1);
abcdk_comm_queue_push(one->out_queue, msg);
abcdk_comm_write_watch(one->node);
abcdk_comm_queue_t * q = abcdk_comm_waiter_wait2(one->rsp,&mid,1,10);
if(!q)
continue;
uint64_t a = abcdk_time_clock2kind_with(0,6);
printf("mid(%lu),timeout(%lu), count(%lu)\n",mid,a-mid,abcdk_comm_queue_count(q));
abcdk_comm_queue_free(&q);
}
return NULL;
}
void test_comm_message2_cb(abcdk_comm_node_t *node, uint32_t event)
{
one_node_t *one = (one_node_t *)abcdk_comm_get_userdata(node);
switch (event)
{
case ABCDK_COMM_EVENT_CONNECT:
{
one->out_queue = abcdk_comm_queue_alloc();
one->rsp = abcdk_comm_waiter_alloc();
one->node = abcdk_comm_node_refer(node);
// abcdk_comm_set_userdata(node,one);
abcdk_comm_read_watch(node);
abcdk_thread_t t;
t.routine = test_send_msg;
t.opaque = one;
abcdk_thread_create(&t,0);
}
break;
case ABCDK_COMM_EVENT_INPUT:
{
if(!one->in_buffer)
{
one->in_buffer = abcdk_comm_message_alloc(4);
abcdk_comm_message_protocol_set(one->in_buffer,smb_protocol);
}
int chk = abcdk_comm_message_recv(node,one->in_buffer);
if(chk != 1)
{
abcdk_comm_read_watch(node);
}
else
{
abcdk_comm_message_t *msg_copy = abcdk_comm_message_refer(one->in_buffer);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_read_watch(node);
size_t len = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg_copy),0));
uint64_t mid = abcdk_endian_b_to_h64(ABCDK_PTR2U64(abcdk_comm_message_data(msg_copy),4));
uint32_t id = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg_copy), 12));
uint64_t a = abcdk_time_clock2kind_with(0,3);
//printf("mid=%lu,id=%u,time=%lu\n",mid,id,a-mid);
abcdk_comm_waiter_response2(one->rsp,&mid,msg_copy);
// abcdk_comm_message_unref(&msg_copy);
}
}
break;
case ABCDK_COMM_EVENT_OUTPUT:
_output_event(one);
break;
default:
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_get_sockname(node, &sockname);
abcdk_comm_get_peername(node, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
printf("Socket: %s -> %s Disconnected.\n", sockname_str, peername_str);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_queue_free(&one->out_queue);
abcdk_comm_node_unref(&one->node);
abcdk_comm_waiter_free(&one->rsp);
abcdk_heap_free(one);
}
break;
}
}
void test_comm(abcdk_tree_t *args)
{
signal(SIGPIPE,NULL);
abcdk_comm_start(0);
SSL_CTX *server_ssl_ctx = NULL;
SSL_CTX *client_ssl_ctx = NULL;
#ifdef HAVE_OPENSSL
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
server_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(1, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(server_ssl_ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER, NULL);
client_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(0, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(client_ssl_ctx, abcdk_option_get(args, "--crt2-file", 0, NULL),
abcdk_option_get(args, "--key2-file", 0, NULL),
abcdk_option_get(args, "--key2-pwd", 0, NULL));
// SSL_CTX_set_verify(client_ssl_ctx, SSL_VERIFY_PEER, NULL);
}
#endif //HAVE_OPENSSL
abcdk_sockaddr_t addr = {0};
abcdk_sockaddr_t addr2 = {0};
const char *listen_p = abcdk_option_get(args,"--listen",0,"0.0.0.0:12345");
abcdk_sockaddr_from_string(&addr,listen_p,0);
abcdk_comm_listen(server_ssl_ctx,&addr,test_comm_message_cb,NULL);
const char *connect_p = abcdk_option_get(args,"--connect",0,"127.0.0.1:12345");
abcdk_sockaddr_from_string(&addr2,connect_p,0);
abcdk_comm_connect(client_ssl_ctx,&addr2,test_comm_message2_cb,abcdk_heap_alloc(sizeof(one_node_t)));
while (getchar() != 'Q')
;
abcdk_comm_stop();
}
void test_easy_request_cb(abcdk_comm_easy_t *easy, const void *data, size_t len)
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_easy_get_sockname(easy, &sockname);
abcdk_comm_easy_get_peername(easy, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
// printf("Server(%s -> %s): ", sockname_str, peername_str);
if(!data)
{
printf(" Disconnected.\n");
}
else
{
uint64_t a = abcdk_time_clock2kind_with(CLOCK_MONOTONIC, 6);
uint64_t b = atoll((char*)data);
// printf("%lu-%lu=%lu\n",a,b,a-b);
usleep(rand()%10000+1000);
abcdk_comm_easy_response(easy,data,len);
abcdk_comm_easy_request(easy,data,len,NULL,0);
}
}
void test_easy_request2_cb(abcdk_comm_easy_t *easy, const void *data, size_t len)
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_easy_get_sockname(easy, &sockname);
abcdk_comm_easy_get_peername(easy, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
// printf("Client(%s -> %s): ", sockname_str, peername_str);
if(!data)
{
printf(" Disconnected.\n");
}
else
{
// printf(" %s\n",(char*)data);
}
}
void test_easy(abcdk_tree_t *args)
{
signal(SIGPIPE,NULL);
abcdk_comm_start(0);
SSL_CTX *server_ssl_ctx = NULL;
SSL_CTX *client_ssl_ctx[4] = {NULL};
#ifdef HAVE_OPENSSL
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
server_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(1, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(server_ssl_ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER, NULL);
for(int i =0;i<4;i++)
{
client_ssl_ctx[i] = abcdk_openssl_ssl_ctx_alloc(0, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(client_ssl_ctx[i], abcdk_option_get(args, "--crt2-file", i, NULL),
abcdk_option_get(args, "--key2-file", i, NULL),
abcdk_option_get(args, "--key2-pwd", i, NULL));
SSL_CTX_set_verify(client_ssl_ctx[i], SSL_VERIFY_PEER, NULL);
}
}
#endif //HAVE_OPENSSL
abcdk_sockaddr_t addr = {0};
abcdk_sockaddr_t addr2 = {0};
const char *listen_p = abcdk_option_get(args,"--listen",0,"0.0.0.0:12345");
abcdk_sockaddr_from_string(&addr,listen_p,0);
abcdk_comm_easy_t *easy_listen = abcdk_comm_easy_listen(server_ssl_ctx,&addr,test_easy_request_cb,NULL);
const char *connect_p = abcdk_option_get(args,"--connect",0,"127.0.0.1:12345");
abcdk_sockaddr_from_string(&addr2,connect_p,0);
abcdk_comm_easy_t *easy_client[4] = {NULL};
for (int i = 0; i < 4; i++)
easy_client[i] = abcdk_comm_easy_connect(client_ssl_ctx[i], &addr2, test_easy_request2_cb, NULL);
uint64_t d = 0,s = 0;
s = abcdk_clock(d,&d);
#pragma omp parallel for num_threads(4)
for(int i = 0;i<1000000;i++)
{
uint64_t d = 0,s = 0;
s = abcdk_clock(d,&d);
int len = 10000;
char *req= (char*)abcdk_heap_alloc(len);
abcdk_comm_message_t *rsp= NULL;
sprintf(req,"%lu",abcdk_time_clock2kind_with(CLOCK_MONOTONIC, 6));
abcdk_comm_easy_request(easy_client[i%4],req,len,&rsp,1000);
if (rsp)
{
// printf("%d=%s\n",i,(char*)abcdk_comm_message_data(rsp));
abcdk_comm_message_unref(&rsp);
}
else
{
printf("Pipe(%d) %s timeout\n",i%4,req);
}
abcdk_heap_free(req);
s = abcdk_clock(d,&d);
// printf("[%d]:s = %lu,d = %lu\n",i,s,d);
}
s = abcdk_clock(d,&d);
printf("s = %lu,d = %lu\n",s,d);
// abcdk_comm_easy_set_timeout(easy_listen,1);
// abcdk_comm_easy_unref(&easy_listen);
for(int i = 0;i<4;i++)
abcdk_comm_easy_unref(&easy_client[i]);
while (getchar() != 'Q')
;
abcdk_comm_stop();
}
int test_blkid(abcdk_tree_t *args)
{
#ifdef HAVE_BLKID
int i, nparts;
char *devname;
blkid_probe pr;
blkid_partlist ls;
blkid_parttable root_tab;
devname = (char*)abcdk_option_get(args,"--dev",0,"");
pr = blkid_new_probe_from_filename(devname);
if (!pr)
return 1;
/* Binary interface */
ls = blkid_probe_get_partitions(pr);
if (!ls)
return 1;
/*
* Print info about the primary (root) partition table
*/
root_tab = blkid_partlist_get_table(ls);
if (!root_tab)
return 1;
printf("size: %jd, sector size: %u, PT: %s, offset: %jd, id=%s\n---\n",
blkid_probe_get_size(pr),
blkid_probe_get_sectorsize(pr),
blkid_parttable_get_type(root_tab),
blkid_parttable_get_offset(root_tab),
blkid_parttable_get_id(root_tab));
/*
* List partitions
*/
nparts = blkid_partlist_numof_partitions(ls);
if (!nparts)
goto done;
for (i = 0; i < nparts; i++) {
const char *p;
blkid_partition par = blkid_partlist_get_partition(ls, i);
blkid_parttable tab = blkid_partition_get_table(par);
printf("#%d: %10llu %10llu 0x%x",
blkid_partition_get_partno(par),
(unsigned long long) blkid_partition_get_start(par),
(unsigned long long) blkid_partition_get_size(par),
blkid_partition_get_type(par));
if (root_tab != tab)
/* subpartition (BSD, Minix, ...) */
printf(" (%s)", blkid_parttable_get_type(tab));
p = blkid_partition_get_name(par);
if (p)
printf(" name='%s'", p);
p = blkid_partition_get_uuid(par);
if (p)
printf(" uuid='%s'", p);
p = blkid_partition_get_type_string(par);
if (p)
printf(" type='%s'", p);
putc('\n', stdout);
}
done:
blkid_free_probe(pr);
#endif
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
abcdk_openlog(NULL,LOG_DEBUG,1);
srand(time(NULL));
abcdk_tree_t *args = abcdk_tree_alloc3(1);
abcdk_getargs(args,argc,argv,"--");
abcdk_option_fprintf(stderr,args,NULL);
const char *func = abcdk_option_get(args,"--func",0,"");
// abcdk_clock_reset();
int a = 0x112233;
int b = 0;
char a8[3] = {0};
abcdk_endian_h_to_b24(a8,a);
b = abcdk_endian_b_to_h24(a8);
assert(a == b);
abcdk_endian_h_to_l24(a8,a);
b = abcdk_endian_l_to_h24(a8);
assert(a == b);
uint64_t c = 1234567890987654321;
uint64_t d = 0,e = 0;
d = abcdk_endian_h_to_b64(c);
e = abcdk_endian_b_to_h64(d);
assert(c == e);
uint64_t f = 0,g = 0;
g = abcdk_clock(f,&f);
for(int i = 0;i<100000;i++)
{
void *p = abcdk_heap_alloc(1024);
abcdk_heap_free(p);
}
g = abcdk_clock(f,&f);
printf("g = %lu,f = %lu\n",g,f);
for (int i = 0; i < 10000; i++)
{
int v = rand() % 127;
int k = ABCDK_CLAMP(v,33,126);
assert(k >= 33 && k <= 126);
}
#ifdef HAVE_OPENSSL
SSL_library_init();
OpenSSL_add_all_algorithms();
SSL_load_error_strings();
#endif //HAVE_OPENSSL
if(abcdk_strcmp(func,"test_ffmpeg",0)==0)
test_ffmpeg(args);
if(abcdk_strcmp(func,"test_bmp",0)==0)
test_bmp(args);
if(abcdk_strcmp(func,"test_freeimage",0)==0)
test_freeimage(args);
if(abcdk_strcmp(func,"test_uri",0)==0)
test_uri(args);
if (abcdk_strcmp(func, "test_strrep", 0) == 0)
test_strrep(args);
if (abcdk_strcmp(func, "test_html", 0) == 0)
test_html(args);
if (abcdk_strcmp(func, "test_fnmatch", 0) == 0)
test_fnmatch(args);
if (abcdk_strcmp(func, "test_crc32", 0) == 0)
test_crc32(args);
if (abcdk_strcmp(func, "test_robots", 0) == 0)
test_robots(args);
if (abcdk_strcmp(func, "test_fuse", 0) == 0)
test_fuse(args);
if (abcdk_strcmp(func, "test_mp4", 0) == 0)
test_mp4(args);
if (abcdk_strcmp(func, "test_dirent", 0) == 0)
test_dirent(args);
if (abcdk_strcmp(func, "test_netlink", 0) == 0)
test_netlink(args);
if (abcdk_strcmp(func, "test_iwscan", 0) == 0)
test_iwscan(args);
if (abcdk_strcmp(func, "test_hexdump", 0) == 0)
test_hexdump(args);
if (abcdk_strcmp(func, "test_video", 0) == 0)
test_video(args);
if (abcdk_strcmp(func, "test_com", 0) == 0)
test_com(args);
if (abcdk_strcmp(func, "test_mpi", 0) == 0)
test_mpi(args);
if (abcdk_strcmp(func, "test_lz4", 0) == 0)
test_lz4(args);
if (abcdk_strcmp(func, "test_archive", 0) == 0)
test_archive(args);
if (abcdk_strcmp(func, "test_modbus", 0) == 0)
test_modbus(args);
if (abcdk_strcmp(func, "test_libusb", 0) == 0)
test_libusb(args);
if (abcdk_strcmp(func, "test_openssl", 0) == 0)
test_openssl(args);
if (abcdk_strcmp(func, "test_mqtt", 0) == 0)
test_mqtt(args);
if (abcdk_strcmp(func, "test_http", 0) == 0)
test_http(args);
if (abcdk_strcmp(func, "test_redis", 0) == 0)
test_redis(args);
if (abcdk_strcmp(func, "test_cert_verify", 0) == 0)
test_cert_verify(args);
if (abcdk_strcmp(func, "test_json", 0) == 0)
test_json(args);
if (abcdk_strcmp(func, "test_refer_count", 0) == 0)
test_refer_count(args);
if (abcdk_strcmp(func, "test_comm", 0) == 0)
test_comm(args);
if (abcdk_strcmp(func, "test_easy", 0) == 0)
test_easy(args);
if (abcdk_strcmp(func, "test_blkid", 0) == 0)
test_blkid(args);
abcdk_tree_free(&args);
return 0;
}
|
GB_unop__log1p_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fp32_fp32)
// op(A') function: GB (_unop_tran__log1p_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log1pf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log1pf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16)
// C=scalar+B GB (_bind1st__bget_uint16)
// C=scalar+B' GB (_bind1st_tran__bget_uint16)
// C=A+scalar GB (_bind2nd__bget_uint16)
// C=A'+scalar GB (_bind2nd_tran__bget_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, uint16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bget_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bget_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rumi-64-128-18r.c
|
/*
* Date: 11 December 2015
* Contact: Thomas Peyrin - [email protected]
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: [email protected]
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
// using namespace std;
typedef unsigned long long int UINT64;
// #define DEBUG 1
#define Nthreads 1
#define STEP ((1 << 10) - 1)
#define PROGRAMNUMBER 1
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void string_state(unsigned char state[16], int ver)
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
printf("%02x", state[i]);
}
}
void string_tweak(unsigned char state[16], int ver)
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
printf("%02x", state[i]);
}
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char p1_old[16], p2_old[16];
unsigned char c3_old[16], c4_old[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (UINT64 t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
{
p1[i] = rand() & 0xff;
p1_old[i] = p1[i];
}
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
{
p2[i] = p1[i] ^ dp[i];
p2_old[i] = p2[i];
}
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
{
c3[i] = p1[i] ^ dc[i];
c3_old[i] = c3[i];
}
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
{
c4[i] = p2[i] ^ dc[i];
c4_old[i] = c4[i];
}
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
printf("%s\n", "A right quartet found :)\n");
printf("p1: ");
string_state(p1_old, ver);
printf("\n");
printf("p2: ");
string_state(p2_old, ver);
printf("\n");
printf("p3: ");
string_state(c3, ver);
printf("\n");
printf("p4: ");
string_state(c4, ver);
printf("\n");
printf("c1: ");
string_state(p1, ver);
printf("\n");
printf("c2: ");
string_state(p2, ver);
printf("\n");
printf("c3: ");
string_state(c3_old, ver);
printf("\n");
printf("c4: ");
string_state(c4_old, ver);
printf("\n");
printf("k1: ");
string_tweak(k1, ver);
printf("\n");
printf("k2: ");
string_tweak(k2, ver);
printf("\n");
printf("k3: ");
string_tweak(k3, ver);
printf("\n");
printf("k4: ");
string_tweak(k4, ver);
printf("\n");
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, UINT64 N2, UINT64 N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, log(N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
for (UINT64 j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
if ((j & STEP) == 0){
printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2);
}
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void init_prng(int offset) {
//int initial_seed = 0x5EC7F2B0;
//int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = time(NULL) + offset*1000000;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
int main(int argc, char *argv[])
{
//srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
init_prng(atoi(argv[1]));
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int n = 1; // Number of indipendent experiments
int R = 18; // Number of rounds
int ver = 1; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "0000000000000008";
char dc_str[] = "0454000404070404";
char dk1_str[] = "00000000C000000000000000F0000000";
char dk2_str[] = "00000000000040000000000000007000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of paralle threads : N1
int deg = 16;
UINT64 N2 = 1 << deg; // Number of bunches per threads : N2 = 2^(deg)
UINT64 N3 = 1 << 16; // Number of queries per bunches : N3
//################### Number of total queries : N1*N2*N3 ###############
double sum = 0;
for (int i = 0; i < n; i++)
{
sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
}
printf("Program number = %d", PROGRAMNUMBER);
printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
// sum = (double)(n * N1 * N2 * N3) / sum;
// printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2));
return 0;
}
|
GeneralMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
GB_unop__identity_fp32_fc32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_fc32)
// op(A') function: GB (_unop_tran__identity_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: float cij = (float) crealf (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) crealf (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) crealf (aij) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_for_schedule_dynamic.c
|
// RUN: %libomp-compile-and-run
/*
* Test for dynamic scheduling with chunk size
* Method: calculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested chunk size
* unless it is the last one.
* It is possible for two adjacent chunks are assigned to the same thread
* Modified by Chunhua Liao
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#define CFDMAX_SIZE 100
const int chunk_size = 7;
int test_omp_for_schedule_dynamic()
{
int tid;
int *tids;
int i;
int tidsArray[CFDMAX_SIZE];
int count = 0;
int tmp_count = 0; /*dispatch times*/
int *tmp; /*store chunk size for each dispatch*/
int result = 0;
tids = tidsArray;
#pragma omp parallel private(tid) shared(tids)
{ /* begin of parallel */
int tid;
tid = omp_get_thread_num ();
#pragma omp for schedule(dynamic,chunk_size)
for (i = 0; i < CFDMAX_SIZE; i++) {
tids[i] = tid;
}
}
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tids[i] != tids[i + 1]) {
count++;
}
}
tmp = (int *) malloc (sizeof (int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tmp_count > count) {
printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */
break;
}
if (tids[i] != tids[i + 1]) {
tmp_count++;
tmp[tmp_count] = 1;
} else {
tmp[tmp_count]++;
}
}
/* is dynamic statement working? */
for (i = 0; i < count; i++) {
if ((tmp[i]%chunk_size)!=0) {
/* it is possible for 2 adjacent chunks assigned to a same thread */
result++;
fprintf(stderr,"The intermediate dispatch has wrong chunksize.\n");
/* result += ((tmp[i] / chunk_size) - 1); */
}
}
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) {
result++;
fprintf(stderr,"the last dispatch has wrong chunksize.\n");
}
/* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_dynamic()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unop__identity_int64_int64.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_int64
// op(A') function: GB_unop_tran__identity_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
black_kernel.h
|
#pragma omp target teams distribute parallel for collapse(2) thread_limit(BLOCK_SIZE)
for (int col = 1; col < NUM+1; col++)
{
for (int row = 1; row < NUM/2+1; row++)
{
int NUM_2 = NUM >> 1;
Real p_ij = pres_black(col, row);
Real p_im1j = pres_red(col - 1, row);
Real p_ip1j = pres_red(col + 1, row);
Real p_ijm1 = pres_red(col, row - ((col + 1) & 1));
Real p_ijp1 = pres_red(col, row + (col & 1));
// right-hand side
Real rhs = (((F(col, (2 * row) - ((col + 1) & 1))
- F(col - 1, (2 * row) - ((col + 1) & 1))) / dx)
+ ((G(col, (2 * row) - ((col + 1) & 1))
- G(col, (2 * row) - ((col + 1) & 1) - 1)) / dy)) / dt;
pres_black(col, row) = p_ij * (ONE - omega) + omega *
(((p_ip1j + p_im1j) / (dx * dx)) + ((p_ijp1 + p_ijm1) / (dy * dy)) -
rhs) / ((TWO / (dx * dx)) + (TWO / (dy * dy)));
}
}
|
segment.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ``fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ``classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
MagickRealType
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
MagickRealType
tau;
ssize_t
left,
right;
MagickRealType
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
MagickRealType
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static MagickRealType
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *),
ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const MagickRealType cluster_threshold,
% const MagickRealType weighting_exponent,
% const MagickBooleanType verbose)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const MagickRealType cluster_threshold,
const MagickRealType weighting_exponent,const MagickBooleanType verbose)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExceptionInfo
*exception;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickRealType
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register MagickRealType
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(MagickRealType) i*(MagickRealType) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,0);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(q->red) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->red) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(indexes+x,cluster->id);
break;
}
}
if (cluster == (Cluster *) NULL)
{
MagickRealType
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(indexes+x,j);
}
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const MagickRealType *histogram,
% MagickRealType *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of MagickRealTypes is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const MagickRealType *histogram,
MagickRealType *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% MagickPixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
MagickPixelPacket *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
MagickRealType
threshold;
register const PixelPacket
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++;
p++;
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register MagickRealType
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(MagickRealType) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
MagickRealType
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(MagickRealType *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(MagickRealType) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(MagickRealType *) RelinquishMagickMemory(derivative);
second_derivative=(MagickRealType *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(MagickRealType) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
% MagickRealType *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
MagickRealType *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap");
alpha=1.0/(tau*sqrt(2.0*MagickPI));
beta=(-1.0/(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=(MagickRealType) (alpha*sum);
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace);
InitializeHistogram(image,histogram,&image->exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose);
(void) TransformImageColorspace(image,previous_colorspace);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(MagickRealType *second_derivative,
% const MagickRealType smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of MagickRealTypes representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(MagickRealType *second_derivative,
const MagickRealType smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
lcpi_logic.c
|
/*
* Copyright (c) 2011-2016 University of Texas at Austin. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* This file is part of PerfExpert.
*
* PerfExpert is free software: you can redistribute it and/or modify it under
* the terms of the The University of Texas at Austin Research License
*
* PerfExpert is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.
*
* Authors: Antonio Gomez-Iglesias, Leonardo Fialho and Ashay Rane
*
* $HEADER$
*/
#ifdef __cplusplus
extern "C" {
#endif
/* System standard headers */
#include <string.h>
/* Utility headers */
#include <matheval.h>
/* Modules headers */
#include "lcpi.h"
#include "lcpi_types.h"
/* PerfExpert common headers */
#include "common/perfexpert_alloc.h"
#include "common/perfexpert_constants.h"
#include "common/perfexpert_hash.h"
#include "common/perfexpert_list.h"
#include "common/perfexpert_output.h"
#include <omp.h>
#include <sched.h>
/* logic_lcpi_compute */
int logic_lcpi_compute(lcpi_profile_t *profile) {
lcpi_metric_t *h_lcpi = NULL, *l = NULL, *t = NULL;
lcpi_hotspot_t *h = NULL;
double *values = NULL;
lcpi_hound_t *hound_info;
char **names = NULL;
int count = 0, i = 0;
int mpi_tasks, threads, num_threads;
int task, thread, my_thread;
sqlite3 *db[MAX_THREADS];
OUTPUT_VERBOSE((4, "%s", _YELLOW("Calculating LCPI metrics")));
if (my_module_globals.hound_info==NULL) {
if (PERFEXPERT_SUCCESS != import_hound (my_module_globals.hound_info)) {
OUTPUT((_ERROR("importing hound")));
return PERFEXPERT_ERROR;
}
}
#pragma omp parallel
{
num_threads = omp_get_num_threads()%MAX_THREADS;
}
for (i = 0; i < num_threads; ++i) {
if (PERFEXPERT_SUCCESS != perfexpert_database_connect(&(db[i]),globals.dbfile)) {
OUTPUT(("ERROR creating DB number %d", i));
return PERFEXPERT_ERROR;
}
}
mpi_tasks = database_get_mpi_tasks();
threads = database_get_threads();
for (task = 0; task < mpi_tasks; task++) {
/* Iterate over all threads */
for (thread = 0; thread < threads; thread++) {
/* With this if statement we don't need a break at the end -> good for OpenMP */
if ((globals.output_mode != SERIAL_OUTPUT) || ((globals.output_mode == SERIAL_OUTPUT) && (task == 0) && (thread == 0))) {
//#pragma omp parallel private (h, task, thread, count, l, hound_info, values, h_lcpi, names, i, t) num_threads(num_threads)
/* For each LCPI definition... */
perfexpert_hash_iter_str(my_module_globals.metrics_by_name, l, t) {
/* Get the list of variables and their values */
evaluator_get_variables(l->expression, &names, &count);
if (count <= 0) {
continue;
}
/* For each hotspot in this profile... */
#pragma omp parallel private(i, h_lcpi, values, hound_info, my_thread, h) default(none) shared(profile, names, count, l, task, thread, my_module_globals, db) num_threads(num_threads)
{
#pragma omp single nowait
{
perfexpert_list_for(h, &(profile->hotspots), lcpi_hotspot_t) {
#pragma omp task
{
my_thread = omp_get_thread_num();
OUTPUT_VERBOSE((10, " %s (%s:%d@%s)", _YELLOW(h->name),
h->file, h->line, h->module->name));
PERFEXPERT_ALLOC(lcpi_metric_t, h_lcpi, sizeof(lcpi_metric_t));
strcpy(h_lcpi->name_md5, l->name_md5);
h_lcpi->expression = l->expression;
h_lcpi->value = l->value;
h_lcpi->name = l->name;
h_lcpi->mpi_task = task;
h_lcpi->thread_id = thread;
PERFEXPERT_ALLOC(double, values, (sizeof(double *) * count));
/* Iterate over all the events of each metric */
for (i = 0; i < count; i++) {
/* Check if the current event is in hound */
perfexpert_hash_find_str(my_module_globals.hound_info, perfexpert_md5_string(names[i]), hound_info);
if (hound_info) {
/* If in hound, use that value */
values[i] = hound_info->value;
OUTPUT_VERBOSE((10, " Found name %s = %g", names[i], values[i]));
} else {
/* If not in hound, look that event in the DB for the task and thread */
values[i] = database_get_event(db[my_thread], names[i], h->id, task, thread);
if (values[i] != -1.0) {
OUTPUT_VERBOSE((10, " [%d] Found name %s = %g", h->id, names[i], values[i]));
}
else {
/* Value not found */
values[i] = 0.0;
}
}
}
/* Evaluate the LCPI expression */
#pragma omp critical
{
h_lcpi->value = evaluator_evaluate(h_lcpi->expression, count,
names, values);
}
#pragma omp critical
{
/* Add the LCPI to the hotspot's list of LCPIs */
perfexpert_hash_add_str(h->metrics_by_name, name_md5, h_lcpi);
}
OUTPUT_VERBOSE((10, " %s (%d - %d) = [%g]", h_lcpi->name, h_lcpi->mpi_task, h_lcpi->thread_id, h_lcpi->value));
PERFEXPERT_DEALLOC(values);
} //task
}
} // single
} //parallel
// if it's serial (aggregated), we don't need to continue
//if (my_module_globals.output == SERIAL_OUTPUT)
// break;
}
// if it's serial (aggregated), we don't need to continue
//if (my_module_globals.output == SERIAL_OUTPUT)
// break;
}
} //thread
}//mpi
for (i = 0; i < num_threads; ++i) {
if (PERFEXPERT_SUCCESS != perfexpert_database_disconnect(db[i])) {
OUTPUT(("ERROR disconnecting DB number %d", i));
return PERFEXPERT_ERROR;
}
}
// PERFEXPERT_DEALLOC(db);
return PERFEXPERT_SUCCESS;
}
#ifdef __cplusplus
}
#endif
// EOF
|
p4.c
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#define ISIZE 1000
#define JSIZE 1000
int main(int argc, char **argv) {
double *a = new double[ISIZE*JSIZE];
double *A = new double[ISIZE*JSIZE];
FILE *ff;
int nth = 1;
if (argc == 2) nth = atoi(argv[1]);
omp_set_dynamic(0);
omp_set_num_threads(nth);
// Initialization
for (int i = 0; i < ISIZE; ++i) {
for (int j = 0; j < JSIZE; ++j) {
a[i*JSIZE+j] = 10 * i + j;
}
}
for (int i = 0; i < ISIZE; ++i) {
A[i*JSIZE+JSIZE-2] = 10 * i + JSIZE-2;
A[i*JSIZE+JSIZE-1] = 10 * i + JSIZE-1;
}
double t = omp_get_wtime();
for (int k = 0; k < 1000; ++k){
// Parallelize
#pragma omp parallel for collapse(2)
for (int j = 0; j < JSIZE - 2; ++j) {
for (int i = 0; i < ISIZE; ++i) {
A[i*JSIZE+j] = sin(0.00001 * a[i*JSIZE+j+2]);
}
}
}
t = omp_get_wtime() - t;
printf("Time: %f\n", t);
ff = fopen("p4.out", "w");
for (int i = 0; i < ISIZE; ++i) {
for (int j = 0; j < JSIZE; ++j) {
fprintf(ff, "%f\n", A[i*JSIZE+j]);
}
fprintf(ff, "\n");
}
fclose(ff);
delete [] a;
delete [] A;
return 0;
}
|
elemproduct.c
|
#include<stdio.h>
#include<omp.h>
void elemprod(double *c, double *a, double *b, int len){
int i;
#pragma omp parallel for
for(i=0;i<len;i++){
c[i] = a[i] * b[i];
printf("%f, %f, %f\n", a[i],b[i],c[i]);
}
}
int main(int argc, char *argv[]){
double v1[10] = {1,2,3,4,5,6,7,8,9,10};
double v2[10] = {1,2,3,4,5,6,7,8,9,10};
double v3[10];
elemprod(v3, v1, v2, 10);
int j;
for (j=0;j<10;j++)
printf("%f\n", v3[j]);
return 0;
}
|
bml_submatrix_ellpack.c
|
#include "../../macros.h"
#include "../bml_logger.h"
#include "../bml_submatrix.h"
#include "../bml_types.h"
#include "../dense/bml_types_dense.h"
#include "bml_submatrix_ellpack.h"
#include "bml_types_ellpack.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param A Hamiltonian matrix A
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and core_pos
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void
bml_matrix2submatrix_index_ellpack(
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
switch (A->matrix_precision)
{
case single_real:
bml_matrix2submatrix_index_ellpack_single_real(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_real:
bml_matrix2submatrix_index_ellpack_double_real(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case single_complex:
bml_matrix2submatrix_index_ellpack_single_complex(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_complex:
bml_matrix2submatrix_index_ellpack_double_complex(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and core_pos
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void
bml_matrix2submatrix_index_graph_ellpack(
bml_matrix_ellpack_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
switch (B->matrix_precision)
{
case single_real:
bml_matrix2submatrix_index_graph_ellpack_single_real(B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_real:
bml_matrix2submatrix_index_graph_ellpack_single_real(B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case single_complex:
bml_matrix2submatrix_index_graph_ellpack_double_complex(B,
nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_complex:
bml_matrix2submatrix_index_graph_ellpack_double_complex(B,
nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Extract a submatrix from a matrix given a set of core+halo rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param B Submatrix B
* \param core_halo_index Set of row indeces for submatrix
* \param llsize Number of indeces
*/
void
bml_matrix2submatrix_ellpack(
bml_matrix_ellpack_t * A,
bml_matrix_dense_t * B,
int *core_halo_index,
int lsize)
{
switch (A->matrix_precision)
{
case single_real:
bml_matrix2submatrix_ellpack_single_real(A, B, core_halo_index,
lsize);
break;
case double_real:
bml_matrix2submatrix_ellpack_double_real(A, B, core_halo_index,
lsize);
break;
case single_complex:
bml_matrix2submatrix_ellpack_single_complex(A, B, core_halo_index,
lsize);
break;
case double_complex:
bml_matrix2submatrix_ellpack_double_complex(A, B, core_halo_index,
lsize);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Assemble submatrix into a full matrix based on core+halo indeces.
*
* \ingroup submatrix_group_C
*
* \param A Submatrix A
* \param B Matrix B
* \param core_halo_index Set of submatrix row indeces
* \param lsize Number of indeces
* \param llsize Number of core positions
* \param threshold Threshold for elements
*/
void
bml_submatrix2matrix_ellpack(
bml_matrix_dense_t * A,
bml_matrix_ellpack_t * B,
int *core_halo_index,
int lsize,
int llsize,
double threshold)
{
switch (A->matrix_precision)
{
case single_real:
bml_submatrix2matrix_ellpack_single_real(A, B, core_halo_index,
lsize, llsize,
threshold);
break;
case double_real:
bml_submatrix2matrix_ellpack_double_real(A, B, core_halo_index,
lsize, llsize,
threshold);
break;
case single_complex:
bml_submatrix2matrix_ellpack_single_complex(A, B, core_halo_index,
lsize,
llsize, threshold);
break;
case double_complex:
bml_submatrix2matrix_ellpack_double_complex(A, B, core_halo_index,
lsize,
llsize, threshold);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Get vector from matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param jj Index set
* \param irow Which row
* \param colCnt Number of columns
* \param rvalue Returned vector
*/
void *
bml_getVector_ellpack(
bml_matrix_ellpack_t * A,
int *jj,
int irow,
int colCnt)
{
switch (A->matrix_precision)
{
case single_real:
return bml_getVector_ellpack_single_real(A, jj, irow, colCnt);
break;
case double_real:
return bml_getVector_ellpack_double_real(A, jj, irow, colCnt);
break;
case single_complex:
return bml_getVector_ellpack_single_complex(A, jj, irow, colCnt);
break;
case double_complex:
return bml_getVector_ellpack_double_complex(A, jj, irow, colCnt);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
return NULL;
}
/** Assemble matrix based on groups of rows from a matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param ngroups Number of groups
* \param threshold Threshold for graph
*/
bml_matrix_ellpack_t *
bml_group_matrix_ellpack(
bml_matrix_ellpack_t * A,
int *hindex,
int ngroups,
double threshold)
{
switch (A->matrix_precision)
{
case single_real:
return bml_group_matrix_ellpack_single_real(A, hindex, ngroups,
threshold);
break;
case double_real:
return bml_group_matrix_ellpack_double_real(A, hindex, ngroups,
threshold);
break;
case single_complex:
return bml_group_matrix_ellpack_single_complex(A, hindex, ngroups,
threshold);
break;
case double_complex:
return bml_group_matrix_ellpack_double_complex(A, hindex, ngroups,
threshold);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
return NULL;
}
int
sortById(
void *a,
void *b)
{
int aId = *((int *) a);
int bId = *((int *) b);
if (aId < bId)
return -1;
else if (aId == bId)
return 0;
else
return 1;
}
/** Assemble adjacency structure from matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param xadj Index of each row in adjncy
* \param adjncy Adjacency vector
* \param base_flag Return 0- or 1-based
*/
void
bml_adjacency_ellpack(
bml_matrix_ellpack_t * A,
int *xadj,
int *adjncy,
int base_flag)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int j;
int check;
xadj[0] = 0;
// Check if diagonal elements are included
check = 0;
for (int i = 0; i < A_nnz[0]; i++)
{
if (A_index[ROWMAJOR(0, i, A_N, A_M)] == 0)
{
check = 1;
break;
}
}
for (int i = 1; i < A_N + 1; i++)
{
if (check == 1)
xadj[i] = xadj[i - 1] + A_nnz[i - 1] - 1;
else
xadj[i] = xadj[i - 1] + A_nnz[i - 1];
}
#pragma omp parallel for \
private(j) \
shared(A_N, A_M, A_index, A_nnz, xadj, adjncy)
for (int i = 0; i < A_N; i++)
{
j = xadj[i];
for (int jj = 0; jj < A_nnz[i]; jj++)
{
if (A_index[ROWMAJOR(i, jj, A_N, A_M)] != i)
{
adjncy[j] = A_index[ROWMAJOR(i, jj, A_N, A_M)];
j++;
}
}
//assert(j == xadj[i+1]);
}
#pragma omp parallel for \
shared(A_N, xadj, adjncy)
for (int i = 0; i < A_N; i++)
{
qsort(&adjncy[xadj[i]], xadj[i + 1] - xadj[i], sizeof(int), sortById);
}
// Add 1 for 1-based
if (base_flag == 1)
{
#pragma omp parallel for \
shared(xadj, A_N, adjncy)
for (int i = 0; i < A_N; i++)
{
for (int j = xadj[i]; j < xadj[i + 1]; j++)
{
adjncy[j] += 1;
}
}
#pragma omp parallel for \
shared(xadj, A_N)
for (int i = 0; i < A_N + 1; i++)
{
xadj[i] += 1;
}
}
}
/** Assemble adjacency structure from matrix based on groups of rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param nnodes Number of groups
* \param xadj Index of each row in adjncy
* \param adjncy Adjacency vector
* \param base_flag Return 0- or 1-based
*/
void
bml_adjacency_group_ellpack(
bml_matrix_ellpack_t * A,
int *hindex,
int nnodes,
int *xadj,
int *adjncy,
int base_flag)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *hnode = malloc(nnodes * sizeof(int));
for (int i = 0; i < nnodes; i++)
{
hnode[i] = hindex[i] - 1;
}
// Determine number of adjacent atoms per atom
xadj[0] = 0;
for (int i = 1; i < nnodes + 1; i++)
{
int hcount = 0;
for (int j = 0; j < nnodes; j++)
{
for (int k = 0; k < A_nnz[hnode[i - 1]]; k++)
{
if (hnode[j] == A_index[ROWMAJOR(hnode[i - 1], k, A_N, A_M)])
{
hcount++;
break;
}
}
}
xadj[i] = xadj[i - 1] + hcount;
}
// Fill in adjacent atoms
#pragma omp parallel for \
shared(A_N, A_M, A_index, A_nnz) \
shared(xadj, adjncy, hnode)
for (int i = 0; i < nnodes; i++)
{
int ll = xadj[i];
for (int j = 0; j < nnodes; j++)
{
for (int k = 0; k < A_nnz[hnode[i]]; k++)
{
if (hnode[j] == A_index[ROWMAJOR(hnode[i], k, A_N, A_M)])
{
//adjncy[ll] = hnode[j];
adjncy[ll] = j;
ll++;
break;
}
}
}
}
// Add 1 for 1-based
if (base_flag == 1)
{
#pragma omp parallel for \
shared(xadj, A_N, adjncy)
for (int i = 0; i <= xadj[nnodes]; i++)
{
adjncy[i] += 1;
}
#pragma omp parallel for \
shared(xadj, A_N)
for (int i = 0; i < nnodes + 1; i++)
{
xadj[i] += 1;
}
}
}
|
GridInit.c
|
/*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
// Generates randomized energy grid for each nuclide
// Note that this is done as part of initialization (serial), so
// rand() is used.
void generate_grids(long n_isotopes, long n_gridpoints,
NuclideGridPoint **nuclide_grids)
{
for(long i = 0; i < n_isotopes; i++)
for(long j = 0; j < n_gridpoints; j++)
{
nuclide_grids[i][j].energy = ((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].total_xs = ((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].elastic_xs = ((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].absorbtion_xs = ((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].fission_xs = ((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].nu_fission_xs = ((double)rand()/(double)RAND_MAX);
}
}
// Verification version of this function (tighter control over RNG)
void generate_grids_v( long n_isotopes, long n_gridpoints,
NuclideGridPoint **nuclide_grids)
{
for(long i = 0; i < n_isotopes; i++)
for(long j = 0; j < n_gridpoints; j++)
{
nuclide_grids[i][j].energy = rn_v();
nuclide_grids[i][j].total_xs = rn_v();
nuclide_grids[i][j].elastic_xs = rn_v();
nuclide_grids[i][j].absorbtion_xs = rn_v();
nuclide_grids[i][j].fission_xs = rn_v();
nuclide_grids[i][j].nu_fission_xs = rn_v();
}
}
// Sorts the nuclide grids by energy (lowest -> highest)
void sort_nuclide_grids(long n_isotopes, long n_gridpoints,
NuclideGridPoint **nuclide_grids)
{
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
for(long i = 0; i < n_isotopes; i++)
qsort(nuclide_grids[i], n_gridpoints, sizeof(NuclideGridPoint), cmp);
// error debug check
/*
for( int i = 0; i < n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < n_gridpoints; j++ )
printf("E%d = %lf\n", j, nuclide_grids[i][j].energy);
}
*/
}
// allocate pointer grid
int * generate_ptr_grid(int n_isotopes, int n_gridpoints)
{
int * grid_ptrs = (int *) malloc(n_isotopes * n_isotopes
* n_gridpoints * sizeof(int));
return grid_ptrs;
}
// Allocates unionized energy grid, and assigns union of energy levels
// from nuclide grids to it.
GridPoint * generate_energy_grid( long n_isotopes, long n_gridpoints,
NuclideGridPoint **nuclide_grids, int * grid_ptrs)
{
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Generating Unionized Energy Grid...\n");
long n_unionized_grid_points = n_isotopes*n_gridpoints;
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
GridPoint * energy_grid = (GridPoint *)malloc( n_unionized_grid_points
* sizeof( GridPoint ) );
if( mype == 0 ) printf("Copying and Sorting all nuclide grids...\n");
NuclideGridPoint ** n_grid_sorted = gpmatrix( n_isotopes, n_gridpoints );
memcpy( n_grid_sorted[0], nuclide_grids[0], n_isotopes*n_gridpoints*
sizeof( NuclideGridPoint ) );
qsort( &n_grid_sorted[0][0], n_unionized_grid_points,
sizeof(NuclideGridPoint), cmp);
if( mype == 0 ) printf("Assigning energies to unionized grid...\n");
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].energy = n_grid_sorted[0][i].energy;
gpmatrix_free(n_grid_sorted);
//int * full = (int *) malloc( n_isotopes * n_unionized_grid_points
// * sizeof(int) );
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].xs_ptrs = n_isotopes * i;
// debug error checking
/*
for( int i = 0; i < n_unionized_grid_points; i++ )
printf("E%d = %lf\n", i, energy_grid[i].energy);
*/
return energy_grid;
}
// Searches each nuclide grid for the closest energy level and assigns
// pointer from unionized grid to the correct spot in the nuclide grid.
// This process is time consuming, as the number of binary searches
// required is: binary searches = n_gridpoints * n_isotopes^2
void set_grid_ptrs( GridPoint * energy_grid, int * grid_ptrs, long n_isotopes,
long n_gridpoints, NuclideGridPoint **nuclide_grids)
{
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Assigning pointers to Unionized Energy Grid...\n");
#pragma omp parallel for default(none) \
shared( energy_grid, nuclide_grids, grid_ptrs, n_isotopes, n_gridpoints, mype )
for( long i = 0; i < n_isotopes * n_gridpoints ; i++ ){
double quarry = energy_grid[i].energy;
if( INFO && mype == 0 && omp_get_thread_num() == 0 && i % 200 == 0 )
printf("\rAligning Unionized Grid...(%.0lf%% complete)",
100.0 * (double) i / (n_isotopes*n_gridpoints /
omp_get_num_threads()) );
for( long j = 0; j < n_isotopes; j++ ){
// j is the nuclide i.d.
// log n binary search
grid_ptrs[energy_grid[i].xs_ptrs + j] =
binary_search( nuclide_grids[j], quarry, (uint)n_gridpoints );
}
}
if( mype == 0 ) printf("\n");
//test
/*
for( int i=0; i < n_isotopes * n_gridpoints; i++ )
for( int j = 0; j < n_isotopes; j++ )
printf("E = %.4lf\tNuclide %d->%p->%.4lf\n",
energy_grid[i].energy,
j,
energy_grid[i].xs_ptrs[j],
(energy_grid[i].xs_ptrs[j])->energy
);
*/
}
|
salted_sha1_fmt_plug.c
|
/*
* generic salted-sha1 support for LDAP style password storage
*
* Copyright (c) 2003 Simon Marechal, salt length fixes (c) 2012 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha);
#else
#define MAX_SALT_LEN 16 // bytes, the base64 representation is longer
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "arch.h"
#include "options.h"
#include "johnswap.h"
#ifdef MMX_COEF
#define NBKEYS (MMX_COEF * SHA1_SSE_PARA)
#endif
#include "sse-intrinsics.h"
#include "common.h"
#include "sha.h"
#include "base64.h"
#ifdef _OPENMP
#ifdef MMX_COEF_SHA512
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Salted-SHA1"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55-MAX_SALT_LEN)
#define BINARY_SIZE 20 // this is 28 chars of base64
#define BINARY_ALIGN 4
#define SALT_SIZE (MAX_SALT_LEN + sizeof(unsigned int))
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH ((BINARY_SIZE + 1 + MAX_SALT_LEN + 2) / 3 * 4)
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + (3-((i)&3)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*4*MMX_COEF ) //for endianity conversion
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define NSLDAP_MAGIC "{ssha}"
#define NSLDAP_MAGIC_LENGTH 6
#define BASE64_ALPHABET \
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
struct s_salt
{
unsigned int len;
union {
unsigned char c[MAX_SALT_LEN];
ARCH_WORD_32 w32;
} data;
};
static struct s_salt *saved_salt;
static struct fmt_tests tests[] = {
// Test hashes originally(?) in OPENLDAPS_fmt (openssha) (salt length 4)
{"{SSHA}bPXG4M1KkwZh2Hbgnuoszvpat0T/OS86", "thales"},
{"{SSHA}hHSEPW3qeiOo5Pl2MpHQCXh0vgfyVR/X", "test1"},
{"{SSHA}pXp4yIiRmppvKYn7cKCT+lngG4qELq4h", "test2"},
{"{SSHA}Bv8tu3wB8WTMJj3tcOsl1usm5HzGwEmv", "test3"},
{"{SSHA}kXyh8wLCKbN+QRbL2F2aUbkP62BJ/bRg", "lapin"},
{"{SSHA}rnMVxsf1YJPg0L5CBhbVLIsJF+o/vkoE", "canard"},
{"{SSHA}Uf2x9YxSWZZNAi2t1QXbG2PmT07AtURl", "chien"},
{"{SSHA}XXGLZ7iKpYSBpF6EwoeTl27U0L/kYYsY", "hibou"},
{"{SSHA}HYRPmcQIIzIIg/c1L8cZKlYdNpyeZeml", "genou"},
{"{SSHA}Zm/0Wll7rLNpBU4HFUKhbASpXr94eSTc", "caillou"},
{"{SSHA}Qc9OB+aEFA/mJ5MNy0AB4hRIkNiAbqDb", "doudou"},
// Test vectors originally in NSLDAPS_fmt (ssha) (salt length 8)
{"{SSHA}WTT3B9Jjr8gOt0Q7WMs9/XvukyhTQj0Ns0jMKQ==", "Password9"},
{"{SSHA}ypkVeJKLzbXakEpuPYbn+YBnQvFmNmB+kQhmWQ==", "qVv3uQ45"},
{"{SSHA}cKFVqtf358j0FGpPsEIK1xh3T0mtDNV1kAaBNg==", "salles"},
{"{SSHA}W3ipFGmzS3+j6/FhT7ZC39MIfqFcct9Ep0KEGA==", "asddsa123"},
{"{SSHA}YbB2R1D2AlzYc9wk/YPtslG7NoiOWaoMOztLHA==", "ripthispassword"},
/*
* These two were found in john-1.6-nsldaps4.diff.gz
*/
{"{SSHA}/EExmSfmhQSPHDJaTxwQSdb/uPpzYWx0ZXI=", "secret"},
{"{SSHA}gVK8WC9YyFT1gMsQHTGCgT3sSv5zYWx0", "secret"},
{NULL}
};
#ifdef MMX_COEF
static ARCH_WORD_32 (*saved_key)[SHA_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/4*NBKEYS];
static unsigned int *saved_len;
static unsigned char out[PLAINTEXT_LENGTH + 1];
static int last_salt_size;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
#ifndef MMX_COEF
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#else
saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
#endif
}
static void * binary(char *ciphertext) {
static char *realcipher;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE + 1 + SALT_SIZE, MEM_ALIGN_WORD);
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, BINARY_SIZE);
base64_decode(ciphertext, strlen(ciphertext), realcipher);
#ifdef MMX_COEF
alter_endianity((unsigned char *)realcipher, BINARY_SIZE);
#endif
return (void *)realcipher;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int len;
if (strncasecmp(ciphertext, NSLDAP_MAGIC, NSLDAP_MAGIC_LENGTH))
return 0;
ciphertext += NSLDAP_MAGIC_LENGTH;
len = strspn(ciphertext, BASE64_ALPHABET);
if (len < (BINARY_SIZE+1+2)/3*4-2)
return 0;
len = strspn(ciphertext, BASE64_ALPHABET "=");
if (len != strlen(ciphertext))
return 0;
if (len & 3 || len > CIPHERTEXT_LENGTH)
return 0;
return 1;
}
static void set_key(char *key, int index)
{
#ifdef MMX_COEF
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80 << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
saved_len[index] = len;
#else
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
#endif
}
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char *p;
char realcipher[CIPHERTEXT_LENGTH];
int len;
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
base64_decode(ciphertext, len, realcipher);
// We now support any salt length up to SALT_SIZE
cursalt.len = (len + 3) / 4 * 3 - BINARY_SIZE;
p = &ciphertext[len];
while (*--p == '=')
cursalt.len--;
memcpy(cursalt.data.c, realcipher+BINARY_SIZE, cursalt.len);
return &cursalt;
}
static char *get_key(int index) {
#ifdef MMX_COEF
unsigned int i,s;
s = saved_len[index];
for(i=0;i<s;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char *) out;
#else
return saved_key[index];
#endif
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
#ifdef MMX_COEF
if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF])
#else
if ( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)&(crypt_key[index][0]))[0] )
#endif
return 1;
return 0;
}
static int cmp_exact(char *source, int count){
return (1);
}
static int cmp_one(void * binary, int index)
{
#ifdef MMX_COEF
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++)
if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*5*MMX_COEF+i*MMX_COEF])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static void set_salt(void *salt) {
saved_salt = salt;
}
#ifdef MMX_COEF
static inline void set_onesalt(int index)
{
unsigned int i, idx=index%NBKEYS;
unsigned char *sk = (unsigned char*)&saved_key[index/NBKEYS];
for(i=0;i<saved_salt->len;++i)
sk[GETPOS(i+saved_len[index], idx)] = saved_salt->data.c[i];
sk[GETPOS(i+saved_len[index], idx)] = 0x80;
while (++i <= last_salt_size)
sk[GETPOS(i+saved_len[index], idx)] = 0;
((unsigned int*)sk)[15*MMX_COEF + (index&3) + ((idx)>>2)*SHA_BUF_SIZ*MMX_COEF] = (saved_salt->len + saved_len[index])<<3;
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef MMX_COEF
int inc = NBKEYS;
#else
int inc = 1;
#endif
#pragma omp parallel for
for (index=0; index < count; index += inc)
#endif
{
#ifdef MMX_COEF
unsigned int i;
for(i=0;i<NBKEYS;i++)
set_onesalt(i+index);
SSESHA1body(saved_key[index/NBKEYS], crypt_key[index/NBKEYS], NULL, SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init( &ctx );
SHA1_Update( &ctx, (unsigned char *) saved_key[index], strlen( saved_key[index] ) );
SHA1_Update( &ctx, (unsigned char *) saved_salt->data.c, saved_salt->len);
SHA1_Final( (unsigned char *)crypt_key[index], &ctx);
#endif
}
#ifdef MMX_COEF
last_salt_size = saved_salt->len;
#endif
return count;
}
#ifdef MMX_COEF
#define HASH_OFFSET (index&(MMX_COEF-1))+((index%NBKEYS)/MMX_COEF)*MMX_COEF*5
static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; }
#endif
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
jacobi_cpu_omp_kernel.c
|
#include <homp.h>
#define REAL float
void jacobi_cpu_omp_wrapper2(omp_offloading_t *off, long n,long m,REAL *u,REAL *uold,long uold_n, long uold_m, int uold_0_offset, int uold_1_offset)
{
int i, j;
int num_omp_threads = off->dev->num_cores;
#pragma omp parallel for private(j,i) shared(m,n,uold,u,uold_0_offset,uold_1_offset, uold_m) num_threads(num_omp_threads)
for (i=0; i < n; i++) {
/* since uold has halo region, here we need to adjust index to reflect the new offset */
REAL * tmp_uold = &uold[(i + uold_0_offset) * uold_m + uold_1_offset];
REAL * tmp_u = &u[i*m];
#pragma omp simd
for (j = 0; j < m; j++) {
*tmp_uold = *tmp_u;
tmp_uold ++;
tmp_u++;
}
}
}
void jacobi_cpu_omp_wrapper1(omp_offloading_t *off, long n,long m,REAL omega,REAL ax,REAL ay,REAL b,REAL *u,REAL *f, \
REAL *uold, long uold_m, int uold_0_offset, int uold_1_offset, int i_start, int j_start, REAL *error) {
int num_omp_threads = off->dev->num_cores;
#if CORRECTNESS_CHECK
BEGIN_SERIALIZED_PRINTF(off->devseqid);
printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m);
print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m);
printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length);
print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length);
printf("i_start: %d, j_start: %d, n: %d, m: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, uold_0_offset, uold_1_offset);
print_array_dev("f", off->devseqid, "f",(REAL*)f, map_f->map_dim[0], map_f->map_dim[1]);
END_SERIALIZED_PRINTF();
#endif
int i, j;
REAL er = 0.0;
#pragma omp parallel for private(j,i) reduction(+:er) num_threads(num_omp_threads)
for (i = i_start; i < n; i++) {
REAL * tmp_uold = &uold[(i + uold_0_offset)* uold_m + uold_1_offset+j_start];
REAL * tmp_f = &f[i*m+j_start];
REAL * tmp_u = &u[i*m+j_start];
#pragma omp simd
for (j = j_start; j < m; j++) {
REAL resid = (ax * (tmp_uold[uold_m] + tmp_uold[-uold_m]) + ay * (tmp_uold[-1] * tmp_uold[1]) + b * tmp_uold[0] - *tmp_f)/b;
*tmp_u = *tmp_uold = omega * resid;
er = er + resid * resid;
tmp_uold++;
tmp_f++;
tmp_u++;
}
}
*error = er;
}
|
LSH_query_batch.c
|
/* AUTORIGHTS
Copyright (C) 2007 Princeton University
This file is part of Ferret Toolkit.
Ferret Toolkit is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <cass.h>
#include <cass_timer.h>
#include "LSH.h"
static inline int prime (int n)
{
long g, j;
double k;
if (n % 2 == 0) n++;
for (;;)
{
g = sqrt((double)n);
j = 3;
k = (double)n / (double)j;
while ((k != (double)(long)k) && (j<=g)) {
j += 2;
k = (double)n / (double)j;
}
if (j>g) return n;
n += 2;
}
assert(0);
return 0;
}
static inline void LSH_hash2_L (LSH_t *lsh, const unsigned **hash, unsigned *hash2, int L)
{
int i, j;
for (i = 0; i < L; i++)
{
hash2[i] = 0;
for (j = 0; j < lsh->M; j++)
{
hash2[i] += lsh->rnd[i][j] * hash[i][j];
}
hash2[i] %= lsh->H;
}
}
static inline void LSH_hash_L (LSH_t *lsh, const float *pnt, unsigned **hash, int L)
{
float s;
int i, j, k, l;
l = 0;
for (i = 0; i < L; i++)
{
for (j = 0; j < lsh->M; j++)
{
s = lsh->betas[l];
for (k = 0; k < lsh->D; k++)
{
s += pnt[k] * lsh->alphas[l][k];
}
s /= lsh->W[i];
hash[i][j] = floor(s);
l++;
}
}
}
void LSH_query_batch (const LSH_query_t *query, int N, const float **point, cass_list_entry_t **topk)
{
LSH_t *lsh = query->lsh;
int max_th;
int D = lsh->D;
int T = query->T;
unsigned ***_tmp = NULL;
unsigned **_tmp2 = NULL;
int i, L = query->L, K = query->K, M = lsh->M;
ptb_vec_t ***_score = NULL;
ptb_vec_t **_vec = NULL;
#ifdef _OPENMP
max_th = omp_get_max_threads();
#else
max_th = 1;
#endif
// fprintf(stderr, "#TH = %d\n", max_th);
_tmp = type_matrix3_alloc(unsigned, max_th, L, M);
_tmp2 = type_matrix_alloc(unsigned, max_th, L);
if (T > 0)
{
_score = type_matrix3_alloc(ptb_vec_t, max_th, L, M * 2);
_vec = type_matrix_alloc(ptb_vec_t, max_th, T);
}
#pragma omp parallel for schedule(guided, 1) default(shared)
for (i = 0; i < N; i++)
{
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
assert(tid < max_th);
unsigned **tmp = _tmp[tid];
unsigned *tmp2 = _tmp2[tid];
ptb_vec_t **score = T > 0 ? _score[tid] : NULL;
ptb_vec_t *vec = T > 0 ? _vec[tid] : NULL;
cass_list_entry_t entry;
int j;
unsigned h;
if (query->T == 0)
{
LSH_hash_L(lsh, point[i], tmp, L);
}
else
{
LSH_hash_score(lsh, L, point[i], tmp, score);
}
LSH_hash2_noperturb(lsh, tmp, tmp2, L);
TOPK_INIT(topk[i], dist, K, HUGE);
for (j = 0; j < L; j++)
{
int k;
ptb_vec_t ptb;
ARRAY_BEGIN_FOREACH(lsh->hash[j].bucket[tmp2[j]], uint32_t id) {
cass_vec_t *vec = DATASET_VEC(query->ds, id);
entry.id = id;
entry.dist = dist_L2_float(D, vec->u.float_data, point[i]);
TOPK_INSERT_MIN_UNIQ(topk[i], dist, id, K, entry);
}
ARRAY_END_FOREACH;
if (T == 0) continue;
ptb_qsort(score[j], M * 2);
map_perturb_vector(query->ptb_set, vec, score[j], M, T);
for (k = 0; k < T; k++)
{
ptb = vec[k];
LSH_hash2_perturb(lsh, tmp, &h, &ptb, j);
ARRAY_BEGIN_FOREACH(lsh->hash[j].bucket[h], uint32_t id) {
cass_vec_t *vec = DATASET_VEC(query->ds, id);
entry.id = id;
entry.dist = dist_L2_float(D, vec->u.float_data, point[i]);
TOPK_INSERT_MIN_UNIQ(topk[i], dist, id, K, entry);
}
ARRAY_END_FOREACH;
}
}
}
if (_vec != NULL) matrix_free(_vec);
if (_score != NULL) matrix3_free(_score);
matrix3_free(_tmp);
matrix_free(_tmp2);
}
struct b2s {
unsigned bucket;
int qry;
int t;
struct b2s *next;
};
struct b2s_r {
int qry;
int t;
};
static inline void LSH_hash2_b2s_L (LSH_t *lsh, unsigned **hash, struct b2s **hash2, int L, int qry)
{
int i, j;
unsigned h2;
for (i = 0; i < L; i++)
{
h2 = 0;
for (j = 0; j < lsh->M; j++)
{
h2 += lsh->rnd[i][j] * hash[i][j];
}
// hash2[i].L = i;
hash2[i][0].qry = qry;
hash2[i][0].t = -1;
hash2[i][0].bucket = h2 % lsh->H;
hash2[i][0].next = NULL;
}
}
void LSH_query_batch_ca (const LSH_query_t *query, int N, const float **point, cass_list_entry_t **topk)
{
// stimer_t tmr;
LSH_t *lsh = query->lsh;
int max_th;
int D = lsh->D;
unsigned ***_tmp = NULL;
int i, l;
int L = query->L, K = query->K, T = query->T, M = lsh->M;
struct b2s ***hash;
struct b2s ***b2s;
ptb_vec_t ***_score = NULL;
ptb_vec_t **_vec = NULL;
ARRAY_TYPE(struct b2s_r) *_2scan;
size_t B2S_SIZE;
cass_list_entry_t ***ptopk;
b2s = type_matrix3_alloc(struct b2s, N, L, T + 1);
#ifdef _OPENMP
max_th = omp_get_max_threads();
#else
max_th = 1;
#endif
// fprintf(stderr, "#TH = %d\n", max_th);
_tmp = type_matrix3_alloc(unsigned, max_th, L, M);
if (T > 0)
{
_score = type_matrix3_alloc(ptb_vec_t, max_th, L, M * 2);
_vec = type_matrix_alloc(ptb_vec_t, max_th, T);
}
/* hashing */
//stimer_tick(&tmr);
#pragma omp parallel for schedule(guided, 1) default(shared)
for (i = 0; i < N; i++)
{
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
unsigned **tmp;
int j, k;
ptb_vec_t **score = T > 0 ? _score[tid] : NULL;
ptb_vec_t *vec = T > 0 ? _vec[tid] : NULL;
assert(tid < max_th);
tmp = _tmp[tid];
if (T == 0)LSH_hash_L(lsh, point[i], tmp, L);
else LSH_hash_score(lsh, L, point[i], tmp, score);
LSH_hash2_b2s_L(query->lsh, tmp, b2s[i], L, i);
if (T == 0) continue;
for (j = 0; j < L; j++)
{
ptb_qsort(score[j], M * 2);
map_perturb_vector(query->ptb_set, vec, score[j], M, T);
for (k = 0; k < T; k++)
{
unsigned h;
LSH_hash2_perturb(lsh, tmp, &h, &vec[k], j);
b2s[i][j][k+1].qry = i;
b2s[i][j][k+1].t = k;
b2s[i][j][k+1].bucket = h;
b2s[i][j][k+1].next = NULL;
}
}
}
matrix3_free(_tmp);
if (_score != NULL) matrix3_free(_score);
if (_vec != NULL) matrix_free(_vec);
//stimer_tuck(&tmr, "Stage-1");
//stimer_tick(&tmr);
/* hash to bucket */
B2S_SIZE = prime(N * (T + 1) * 5);
hash = type_matrix_alloc(struct b2s *, L, B2S_SIZE);
#pragma omp parallel for schedule(guided, 1) default(shared)
for (i = 0; i < L; i++)
{
int j, t;
for (j = 0; j < N; j++)
for (t = 0; t <= T; t++)
{
struct b2s *b = &b2s[j][i][t];
unsigned k = b->bucket % B2S_SIZE;
for (;;)
{
if (hash[i][k] == NULL) break;
if (hash[i][k]->bucket == b->bucket) break;
k = (k + 1) % B2S_SIZE;
}
b->next = hash[i][k];
hash[i][k] = b;
}
}
/* scan the bucket */
_2scan = malloc(max_th * sizeof (*_2scan));
for (i = 0; i < max_th; i++) ARRAY_INIT(_2scan[i]);
ptopk = NULL;
if (T > 0) ptopk = type_matrix3_alloc(cass_list_entry_t, N, T, K);
#pragma omp parallel for schedule(guided, 1) default(shared)
for (i = 0; i < N; i++)
{
int j;
TOPK_INIT(topk[i], dist, K, HUGE);
for (j = 0; j < T; j++)
TOPK_INIT(ptopk[i][j], dist, K, HUGE);
}
//stimer_tuck(&tmr, "Stage-2");
// stimer_tick(&tmr);
// double p = 0;
for (l = 0; l < L; l++)
#pragma omp parallel for schedule(guided, 1) default(shared) //reduction(+:p)
for (i = 0; i < B2S_SIZE; i++)
if (hash[l][i] != NULL)
{
struct b2s *tmp;
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
assert(tid < max_th);
unsigned bucket = 0;
cass_list_entry_t entry;
ARRAY_TRUNC(_2scan[tid]);
tmp = hash[l][i];
bucket = tmp->bucket;
while (tmp != NULL)
{
struct b2s_r t = {.qry = tmp->qry, .t = tmp->t};
ARRAY_APPEND(_2scan[tid], t);
tmp = tmp->next;
}
/*
if (lsh->hash[l].bucket[bucket].len == 0) continue;
int id = lsh->hash[l].bucket[bucket].data[0];
*/
ARRAY_BEGIN_FOREACH(lsh->hash[l].bucket[bucket], uint32_t id) {
ARRAY_BEGIN_FOREACH_P(_2scan[tid], struct b2s_r *b)
{
cass_vec_t *vec = DATASET_VEC(query->ds, id);
entry.id = id;
entry.dist = dist_L2_float(D, vec->u.float_data, point[b->qry]);
if (b->t == -1)
{
TOPK_INSERT_MIN_UNIQ(topk[b->qry], dist, id, K, entry);
}
else
{
TOPK_INSERT_MIN_UNIQ(ptopk[b->qry][b->t], dist, id, K, entry);
}
}
ARRAY_END_FOREACH;
}
ARRAY_END_FOREACH;
}
for (i = 0; i < max_th; i++) ARRAY_CLEANUP(_2scan[i]);
free(_2scan);
matrix_free(hash);
matrix_free(b2s);
// stimer_tuck(&tmr, "Stage-2");
//stimer_tick(&tmr);
if (T > 0)
#pragma omp parallel for schedule(guided, 1) default(shared)
for (i = 0; i < N; i++)
{
int j, k;
for (j = 0; j < T; j++)
{
for (k = 0; k < K; k++)
{
TOPK_INSERT_MIN_UNIQ(topk[i], dist, id, K, ptopk[i][j][k]);
}
}
}
if (ptopk != NULL) matrix3_free(ptopk);
//stimer_tuck(&tmr, "Stage-4");
}
|
GB_unaryop__lnot_int32_uint16.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_uint16
// op(A') function: GB_tran__lnot_int32_uint16
// C type: int32_t
// A type: uint16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_uint16
(
int32_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
divsufsort.c
|
/*
* divsufsort.c for libdivsufsort
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "divsufsort_private.h"
#ifdef _OPENMP
# include <omp.h>
#endif
/*- Private Functions -*/
/* Sorts suffixes of type B*. */
static
saidx_t
sort_typeBstar(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n) {
saidx_t *PAb, *ISAb, *buf;
#ifdef _OPENMP
saidx_t *curbuf;
saidx_t l;
#endif
saidx_t i, j, k, t, m, bufsize;
saint_t c0, c1;
#ifdef _OPENMP
saint_t d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort_par(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort_par(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort_par(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
saidx_t
construct_BWT(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k, *orig;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((saidx_t)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
saint_t
divsufsort_par(const sauchar_t *T, saidx_t *SA, saidx_t n) {
saidx_t *bucket_A, *bucket_B;
saidx_t m;
saint_t err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
saidx_t
divbwt_par(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) {
saidx_t *B;
saidx_t *bucket_A, *bucket_B;
saidx_t m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
const char *
divsufsort_par_version(void) {
return PROJECT_VERSION_FULL;
}
|
wow_srp_fmt_plug.c
|
/*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2012. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2012 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
*
* This implements the SRP protocol, with Blizzard's (battlenet) documented
* implementation specifics.
*
* U = username in upper case
* P = password in upper case
* s = random salt value.
*
* x = SHA1(s . SHA1(U . ":" . P));
* v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
*
* v is the 'verifier' value (256 bit value).
*
* Added OMP. Added 'default' oSSL BigNum exponentiation.
* GMP exponentation (faster) is optional, and controlled with HAVE_LIBGMP in autoconfig.h
*
* NOTE, big fix required. The incoming binary may be 64 bytes OR LESS. It
* can also be 64 bytes (or less), and have left padded 0's. We have to adjust
* several things to handle this properly. First, valid must handle it. Then
* binary and salt both must handle this. Also, crypt must handle this. NOTE,
* the string 'could' be an odd length. If so, then only 1 byte of hex is put
* into the first binary byte. all of these problems were found once I got
* jtrts.pl working with wowsrp. There now are 2 input files for wowsrp. One
* bytes of precision, then only 61 bytes will be in the string). The other
* file left pads the numbers with 0's to an even 64 bytes long, so all are
* 64 bytes. the format MUST handle both, since at this momement, we are not
* exactly sure which type will be seen in the wild. NOTE, the byte swapped
* method (GMP) within is no longer valid, and was removed.
* NOTE, we need to add split() to canonize this format (remove LPad 0's)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_blizzard;
#elif FMT_REGISTERS_H
john_register_one(&fmt_blizzard);
#else
#if AC_BUILT
/* we need to know if HAVE_LIBGMP is defined */
#include "autoconfig.h"
#endif
#include <string.h>
#include "sha.h"
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "unicode.h" /* For encoding-aware uppercasing */
#ifdef HAVE_LIBGMP
#if HAVE_GMP_GMP_H
#include <gmp/gmp.h>
#else
#include <gmp.h>
#endif
#define EXP_STR " GMP-exp"
#else
#include <openssl/bn.h>
#define EXP_STR " oSSL-exp"
#endif
#include "johnswap.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "WoWSRP"
#define FORMAT_NAME "Battlenet"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR EXP_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define WOWSIG "$WoWSRP$"
#define WOWSIGLEN (sizeof(WOWSIG)-1)
// min plaintext len is 8 PW's are only alpha-num uppercase
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 4
#define BINARY_ALIGN 4
#define FULL_BINARY_SIZE 32
#define SALT_SIZE (64+3)
#define SALT_ALIGN 1
#define USERNAMELEN 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 4
// salt is in hex (salt and salt2)
static struct fmt_tests tests[] = {
{WOWSIG"6D00CD214C8473C7F4E9DC77AE8FC6B3944298C48C7454E6BB8296952DCFE78D$73616C74", "PASSWORD", {"SOLAR"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432", "PASSWORD2", {"DIZ"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432*DIZ", "PASSWORD2"},
// this one has a leading 0
{"$WoWSRP$01C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
// same hash, but without 0 (only 63 byte hash).
{"$WoWSRP$1C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
{NULL}
};
#ifdef HAVE_LIBGMP
typedef struct t_SRP_CTX {
mpz_t z_mod, z_base, z_exp, z_rop;
} SRP_CTX;
#else
typedef struct t_SRP_CTX {
BIGNUM *z_mod, *z_base, *z_exp, *z_rop;
BN_CTX *BN_ctx;
}SRP_CTX;
#endif
static SRP_CTX *pSRP_CTX;
static unsigned char saved_salt[SALT_SIZE];
static unsigned char user_id[USERNAMELEN];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[8];
static int max_keys_per_crypt;
static void init(struct fmt_main *self)
{
int i;
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
pSRP_CTX = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*pSRP_CTX));
max_keys_per_crypt = self->params.max_keys_per_crypt;
for (i = 0; i < max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_init_set_str(pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719", 10);
mpz_init_set_str(pSRP_CTX[i].z_base, "47", 10);
mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10);
mpz_init(pSRP_CTX[i].z_rop);
// Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA1 value
// we need to put into it. Then we simply need to copy in the data, and possibly set
// the limb count size.
mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159);
#else
pSRP_CTX[i].z_mod=BN_new();
BN_dec2bn(&pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719");
pSRP_CTX[i].z_base=BN_new();
BN_set_word(pSRP_CTX[i].z_base, 47);
pSRP_CTX[i].z_exp=BN_new();
pSRP_CTX[i].z_rop=BN_new();
pSRP_CTX[i].BN_ctx = BN_CTX_new();
#endif
}
}
static void done(void)
{
int i;
for (i = 0; i < max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_clear(pSRP_CTX[i].z_mod);
mpz_clear(pSRP_CTX[i].z_base);
mpz_clear(pSRP_CTX[i].z_exp);
mpz_clear(pSRP_CTX[i].z_rop);
#else
BN_clear_free(pSRP_CTX[i].z_mod);
BN_clear_free(pSRP_CTX[i].z_base);
BN_clear_free(pSRP_CTX[i].z_exp);
BN_clear_free(pSRP_CTX[i].z_rop);
BN_CTX_free(pSRP_CTX[i].BN_ctx);
#endif
}
MEM_FREE(pSRP_CTX);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, WOWSIG, WOWSIGLEN))
return 0;
q = p = &ciphertext[WOWSIGLEN];
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q-p > CIPHERTEXT_LENGTH)
return 0;
if (*q != '$')
return 0;
++q;
p = strchr(q, '*');
if (!p)
return 0;
if (((p - q) & 1))
return 0;
if (p - q >= 2 * SALT_SIZE)
return 0;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q != p)
return 0;
if (strlen(&p[1]) > USERNAMELEN)
return 0;
return 1;
}
/*
* Copy as much as ct2_size to ct2 to avoid buffer overflow
*/
static void StripZeros(const char *ct, char *ct2, const int ct2_size) {
int i;
for (i = 0; i < WOWSIGLEN && i < (ct2_size - 1); ++i)
*ct2++ = *ct++;
while (*ct == '0')
++ct;
while (*ct && i < (ct2_size - 1)) {
*ct2++ = *ct++;
i++;
}
*ct2 = 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *pFmt) {
// if user name not there, then add it
static char ct[128+32+1];
char *cp;
if (!split_fields[1][0] || strncmp(split_fields[1], WOWSIG, WOWSIGLEN))
return split_fields[1];
cp = strchr(split_fields[1], '*');
if (cp) {
if (split_fields[1][WOWSIGLEN] == '0') {
StripZeros(split_fields[1], ct, sizeof(ct));
return ct;
}
return split_fields[1];
}
strnzcpy(ct, split_fields[1], 128);
cp = &ct[strlen(ct)];
*cp++ = '*';
strnzcpy(cp, split_fields[0], USERNAMELEN);
// upcase user name
enc_strupper(cp);
// Ok, if there are leading 0's for that binary resultant value, then remove them.
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt) {
static char ct[128+32+1];
char *cp;
strnzcpy(ct, ciphertext, 128+32+1);
cp = strchr(ct, '*');
if (cp) *cp = 0;
strupr(&ct[WOWSIGLEN]);
if (cp) *cp = '*';
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char b[FULL_BINARY_SIZE];
ARCH_WORD_32 dummy[1];
} out;
char *p, *q;
int i;
p = &ciphertext[WOWSIGLEN];
q = strchr(p, '$');
memset(out.b, 0, sizeof(out.b));
while (*p == '0')
++p;
if ((q-p)&1) {
out.b[0] = atoi16[ARCH_INDEX(*p)];
++p;
} else {
out.b[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 1; i < FULL_BINARY_SIZE; i++) {
out.b[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
if (p >= q)
break;
}
//dump_stuff_msg("binary", out.b, 32);
return out.b;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char b[SALT_SIZE];
ARCH_WORD_32 dummy;
} out;
char *p;
int length=0;
memset(out.b, 0, SALT_SIZE);
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// We need to know if this is odd length or not.
while (atoi16[ARCH_INDEX(*p++)] != 0x7f)
length++;
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// handle odd length hex (yes there can be odd length in these SRP files).
if ((length&1)&&atoi16[ARCH_INDEX(*p)] != 0x7f) {
length=0;
out.b[++length] = atoi16[ARCH_INDEX(*p)];
++p;
} else
length = 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7f && atoi16[ARCH_INDEX(p[1])] != 0x7f) {
out.b[++length] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
out.b[0] = length;
if (*p) {
++p;
memcpy(out.b + length+1, p, strlen(p)+1);
}
return out.b;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int salt_hash(void *salt)
{
unsigned int hash = 0;
char *p = (char *)salt;
while (*p) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
static void set_salt(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
memcpy(saved_salt, &cp[1], *cp);
saved_salt[*cp] = 0;
strcpy((char*)user_id, (char*)&cp[*cp+1]);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
enc_strupper(saved_key[index]);
}
static char *get_key(int index)
{
return saved_key[index];
}
// x = SHA1(s, H(U, ":", P));
// v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < count; ++j) {
SHA_CTX ctx;
unsigned char Tmp[20];
memset(crypt_out[j], 0, sizeof(crypt_out[j]));
SHA1_Init(&ctx);
SHA1_Update(&ctx, user_id, strlen((char*)user_id));
SHA1_Update(&ctx, ":", 1);
SHA1_Update(&ctx, saved_key[j], strlen(saved_key[j]));
SHA1_Final(Tmp, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_salt, strlen((char*)saved_salt));
SHA1_Update(&ctx, Tmp, 20);
SHA1_Final(Tmp, &ctx);
// Ok, now Tmp is v
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// printf ("salt=%s user=%s pass=%s, ", (char*)saved_salt, (char*)user_id, saved_key[j]);
// dump_stuff_msg("sha$h ", Tmp, 20);
//}
#ifdef HAVE_LIBGMP
{
unsigned char HashStr[80], *p;
int i, todo;
p = HashStr;
for (i = 0; i < 20; ++i) {
*p++ = itoa16[Tmp[i]>>4];
*p++ = itoa16[Tmp[i]&0xF];
}
*p = 0;
mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);
mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );
mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);
p = HashStr;
todo = strlen((char*)p);
if (todo&1) {
((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];
++p;
--todo;
} else {
((unsigned char*)(crypt_out[j]))[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
todo -= 2;
}
todo >>= 1;
for (i = 1; i <= todo; i++) {
((unsigned char*)(crypt_out[j]))[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
}
#else
// using oSSL's BN to do expmod.
pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,20,pSRP_CTX[j].z_exp);
BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);
BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (*((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[i])))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE);
}
struct fmt_main fmt_blizzard = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
8,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ WOWSIG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
DRB030-truedep1-var-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program has data races due to true dependence within a loop.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for schedule(dynamic)
for (i=0;i<len-1;i++)
a[i+1]=a[i]+1;
return 0;
}
|
test_1.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@67:10 vs. a[i]@67:5
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0; i<len; i++)
a[i]= i;
//#pragma omp parallel for schedule(dynamic,1)
//#pragma omp parallel for schedule(static,1)
#pragma omp parallel for //num_threads(4)
for (i=0;i< len -1 ;i++)
a[i]=a[i]+1;
omprace_fini();
return 0;
}
|
MINDSSCbox.h
|
void boxfilter(float* input,float* temp1,float* temp2,int hw,int m,int n,int o){
int sz=m*n*o;
for(int i=0;i<sz;i++){
temp1[i]=input[i];
}
for(int k=0;k<o;k++){
for(int j=0;j<n;j++){
for(int i=1;i<m;i++){
temp1[i+j*m+k*m*n]+=temp1[(i-1)+j*m+k*m*n];
}
}
}
for(int k=0;k<o;k++){
for(int j=0;j<n;j++){
for(int i=0;i<(hw+1);i++){
temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n];
}
for(int i=(hw+1);i<(m-hw);i++){
temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n];
}
for(int i=(m-hw);i<m;i++){
temp2[i+j*m+k*m*n]=temp1[(m-1)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n];
}
}
}
for(int k=0;k<o;k++){
for(int j=1;j<n;j++){
for(int i=0;i<m;i++){
temp2[i+j*m+k*m*n]+=temp2[i+(j-1)*m+k*m*n];
}
}
}
for(int k=0;k<o;k++){
for(int i=0;i<m;i++){
for(int j=0;j<(hw+1);j++){
temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n];
}
for(int j=(hw+1);j<(n-hw);j++){
temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n];
}
for(int j=(n-hw);j<n;j++){
temp1[i+j*m+k*m*n]=temp2[i+(n-1)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n];
}
}
}
for(int k=1;k<o;k++){
for(int j=0;j<n;j++){
for(int i=0;i<m;i++){
temp1[i+j*m+k*m*n]+=temp1[i+j*m+(k-1)*m*n];
}
}
}
for(int j=0;j<n;j++){
for(int i=0;i<m;i++){
for(int k=0;k<(hw+1);k++){
input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n];
}
for(int k=(hw+1);k<(o-hw);k++){
input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]-temp1[i+j*m+(k-hw-1)*m*n];
}
for(int k=(o-hw);k<o;k++){
input[i+j*m+k*m*n]=temp1[i+j*m+(o-1)*m*n]-temp1[i+j*m+(k-hw-1)*m*n];
}
}
}
}
void imshift(float* input,float* output,int dx,int dy,int dz,int m,int n,int o){
for(int k=0;k<o;k++){
for(int j=0;j<n;j++){
for(int i=0;i<m;i++){
if(i+dy>=0&&i+dy<m&&j+dx>=0&&j+dx<n&&k+dz>=0&&k+dz<o)
output[i+j*m+k*m*n]=input[i+dy+(j+dx)*m+(k+dz)*m*n];
else
output[i+j*m+k*m*n]=input[i+j*m+k*m*n];
}
}
}
}
/*void *distances(void *threadarg)
{
struct mind_data *my_data;
my_data = (struct mind_data *) threadarg;
float* im1=my_data->im1;
float* d1=my_data->d1;
int qs=my_data->qs;
int ind_d1=my_data->ind_d1;
int m=image_m;
int n=image_n;
int o=image_o;*/
void distances(float* im1,float* d1,int m,int n,int o,int qs,int l){
int sz1=m*n*o;
float* w1=new float[sz1];
int len1=6;
float* temp1=new float[sz1]; float* temp2=new float[sz1];
int dx[6]={+qs,+qs,-qs,+0,+qs,+0};
int dy[6]={+qs,-qs,+0,-qs,+0,+qs};
int dz[6]={0,+0,+qs,+qs,+qs,+qs};
imshift(im1,w1,dx[l],dy[l],dz[l],m,n,o);
for(int i=0;i<sz1;i++){
w1[i]=(w1[i]-im1[i])*(w1[i]-im1[i]);
}
boxfilter(w1,temp1,temp2,qs,m,n,o);
for(int i=0;i<sz1;i++){
d1[i+l*sz1]=w1[i];
}
delete temp1; delete temp2; delete w1;
}
//__builtin_popcountll(left[i]^right[i]); absolute hamming distances
void descriptor(uint64_t* mindq,float* im1,int m,int n,int o,int qs){
timeval time1,time2;
//MIND with self-similarity context
int dx[6]={+qs,+qs,-qs,+0,+qs,+0};
int dy[6]={+qs,-qs,+0,-qs,+0,+qs};
int dz[6]={0,+0,+qs,+qs,+qs,+qs};
int sx[12]={-qs,+0,-qs,+0,+0,+qs,+0,+0,+0,-qs,+0,+0};
int sy[12]={+0,-qs,+0,+qs,+0,+0,+0,+qs,+0,+0,+0,-qs};
int sz[12]={+0,+0,+0,+0,-qs,+0,-qs,+0,-qs,+0,-qs,+0};
int index[12]={0,0,1,1,2,2,3,3,4,4,5,5};
float sigma=0.75;//1.0;//0.75;//1.5;
int rho=ceil(sigma*1.5)*2+1;
int len1=6;
const int len2=12;
image_d=12;
int d=12;
int sz1=m*n*o;
pthread_t thread1, thread2, thread3;
//============== DISTANCES USING BOXFILTER ===================
float* d1=new float[sz1*len1];
gettimeofday(&time1, NULL);
#pragma omp parallel for
for(int l=0;l<len1;l++){
distances(im1,d1,m,n,o,qs,l);
}
gettimeofday(&time2, NULL);
float timeMIND1=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
gettimeofday(&time1, NULL);
//quantisation table
const int val=6;
const unsigned long long power=32;
#pragma omp parallel for
for(int k=0;k<o;k++){
unsigned int tablei[6]={0,1,3,7,15,31};
float compare[val-1];
for(int i=0;i<val-1;i++){
compare[i]=-log((i+1.5f)/val);
}
float mind1[12];
for(int j=0;j<n;j++){
for(int i=0;i<m;i++){
for(int l=0;l<len2;l++){
if(i+sy[l]>=0&&i+sy[l]<m&&j+sx[l]>=0&&j+sx[l]<n&&k+sz[l]>=0&&k+sz[l]<o){
mind1[l]=d1[i+sy[l]+(j+sx[l])*m+(k+sz[l])*m*n+index[l]*sz1];
}
else{
mind1[l]=d1[i+j*m+k*m*n+index[l]*sz1];
}
}
float minval=*min_element(mind1,mind1+len2);
float sumnoise=0.0f;
for(int l=0;l<len2;l++){
mind1[l]-=minval;
sumnoise+=mind1[l];
}
float noise1=max(sumnoise/(float)len2,1e-6f);
for(int l=0;l<len2;l++){
mind1[l]/=noise1;
}
unsigned long long accum=0;
unsigned long long tabled1=1;
for(int l=0;l<len2;l++){
//mind1[l]=exp(-mind1[l]);
int mind1val=0;
for(int c=0;c<val-1;c++){
mind1val+=compare[c]>mind1[l]?1:0;
}
//int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1);
accum+=tablei[mind1val]*tabled1;
tabled1*=power;
}
mindq[i+j*m+k*m*n]=accum;
}
}
}
gettimeofday(&time2, NULL);
float timeMIND2=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
delete d1;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.