source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
|---|---|
GB_binop__ge_int32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_int32
// A.*B function (eWiseMult): GB_AemultB__ge_int32
// A*D function (colscale): GB_AxD__ge_int32
// D*A function (rowscale): GB_DxB__ge_int32
// C+=B function (dense accum): GB_Cdense_accumB__ge_int32
// C+=b function (dense accum): GB_Cdense_accumb__ge_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_int32
// C=scalar+B GB_bind1st__ge_int32
// C=scalar+B' GB_bind1st_tran__ge_int32
// C=A+scalar GB_bind2nd__ge_int32
// C=A'+scalar GB_bind2nd_tran__ge_int32
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT32 || GxB_NO_GE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ge_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pstbsm.c
|
/**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pztbsm.c, normal z -> s, Fri Sep 28 17:38:14 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m,n) (float*)plasma_tile_addr(A, m, n)
#define B(m,n) (float*)plasma_tile_addr(B, m, n)
/***************************************************************************//**
* Parallel tile triangular solve - dynamic scheduling
**/
void plasma_pstbsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t trans, plasma_enum_t diag,
float alpha, plasma_desc_t A,
plasma_desc_t B,
const int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
if (side == PlasmaLeft) {
if (uplo == PlasmaUpper) {
if (trans == PlasmaNoTrans) {
// ==========================================
// PlasmaLeft / PlasmaUpper / PlasmaNoTrans
// ==========================================
for (int k = 0; k < B.mt; k++) {
int mvbk = plasma_tile_mview(B, B.mt-k-1);
int ldak = plasma_tile_mmain(A, B.mt-k-1);
int ldbk = plasma_tile_mmain(B, B.mt-k-1);
float lalpha = k == 0 ? alpha : 1.0;
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_strsm(
side, uplo, trans, diag,
mvbk, nvbn,
lalpha, A(B.mt-k-1, B.mt-k-1), ldak,
B(B.mt-k-1, n), ldbk,
sequence, request);
}
for (int m = imax(0, (B.mt-k-1)-A.kut+1); m < B.mt-k-1; m++) {
int ldam = plasma_tile_mmain(A, m);
int ldbm = plasma_tile_mmain(B, m);
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_sgemm(
PlasmaNoTrans, PlasmaNoTrans,
B.mb, nvbn, mvbk,
-1.0, A(m, B.mt-k-1), ldam,
B(B.mt-k-1, n), ldbk,
lalpha, B(m, n ), ldbm,
sequence, request);
}
}
}
}
else {
// ==============================================
// PlasmaLeft / PlasmaUpper / Plasma[Conj]Trans
// ==============================================
for (int k = 0; k < B.mt; k++) {
int mvbk = plasma_tile_mview(B, k);
int ldak = plasma_tile_mmain(A, k);
int ldbk = plasma_tile_mmain(B, k);
float lalpha = k == 0 ? alpha : 1.0;
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_strsm(
side, uplo, trans, diag,
mvbk, nvbn,
lalpha, A(k, k), ldak,
B(k, n), ldbk,
sequence, request);
}
for (int m = k+1; m < imin(A.mt, k+A.kut); m++) {
int mvbm = plasma_tile_mview(B, m);
int ldbm = plasma_tile_mmain(B, m);
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_sgemm(
trans, PlasmaNoTrans,
mvbm, nvbn, B.mb,
-1.0, A(k, m), ldak,
B(k, n), ldbk,
lalpha, B(m, n), ldbm,
sequence, request);
}
}
}
}
}
else {
if (trans == PlasmaNoTrans) {
// ==========================================
// PlasmaLeft / PlasmaLower / PlasmaNoTrans
// ==========================================
for (int k = 0; k < B.mt; k++) {
int mvbk = plasma_tile_mview(B, k);
int ldak = plasma_tile_mmain(A, k);
int ldbk = plasma_tile_mmain(B, k);
float lalpha = k == 0 ? alpha : 1.0;
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
if (ipiv != NULL) {
plasma_desc_t view = plasma_desc_view(B,
0, n*A.nb,
A.m, nvbn);
view.type = PlasmaGeneral;
// TODO: nested parallelization like getrf
#pragma omp taskwait
if (sequence->status == PlasmaSuccess) {
plasma_core_sgeswp(PlasmaRowwise, view, k*A.nb+1, k*A.nb+mvbk, ipiv, 1);
}
}
plasma_core_omp_strsm(
side, uplo, trans, diag,
mvbk, nvbn,
lalpha, A(k, k), ldak,
B(k, n), ldbk,
sequence, request);
}
for (int m = k+1; m < imin(k+A.klt, A.mt); m++) {
int mvbm = plasma_tile_mview(B, m);
int ldam = plasma_tile_mmain(A, m);
int ldbm = plasma_tile_mmain(B, m);
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_sgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvbm, nvbn, B.mb,
-1.0, A(m, k), ldam,
B(k, n), ldbk,
lalpha, B(m, n), ldbm,
sequence, request);
}
}
}
}
else {
// ==============================================
// PlasmaLeft / PlasmaLower / Plasma[Conj]Trans
// ==============================================
for (int k = 0; k < B.mt; k++) {
int mvbk = plasma_tile_mview(B, B.mt-k-1);
int ldak = plasma_tile_mmain(A, B.mt-k-1);
int ldbk = plasma_tile_mmain(B, B.mt-k-1);
float lalpha = k == 0 ? alpha : 1.0;
for (int m = (B.mt-k-1)+1; m < imin((B.mt-k-1)+A.klt, A.mt); m++) {
int mvbm = plasma_tile_mview(B, m);
int ldam = plasma_tile_mmain(A, m);
int ldbm = plasma_tile_mmain(B, m);
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_sgemm(
trans, PlasmaNoTrans,
mvbk, nvbn, mvbm,
-1.0, A(m, B.mt-k-1), ldam,
B(m, n ), ldbm,
lalpha, B(B.mt-k-1, n), ldbk,
sequence, request);
}
}
for (int n = 0; n < B.nt; n++) {
int nvbn = plasma_tile_nview(B, n);
plasma_core_omp_strsm(
side, uplo, trans, diag,
mvbk, nvbn,
lalpha, A(B.mt-k-1, B.mt-k-1), ldak,
B(B.mt-k-1, n), ldbk,
sequence, request);
if (ipiv != NULL) {
int k1 = 1+(B.mt-k-1)*A.nb;
int k2 = k1+mvbk-1;
plasma_desc_t view = plasma_desc_view(B,
0, n*A.nb,
A.m, nvbn);
view.type = PlasmaGeneral;
#pragma omp taskwait
if (sequence->status == PlasmaSuccess) {
plasma_core_sgeswp(PlasmaRowwise, view, k1, k2, ipiv, -1);
}
}
}
}
}
}
}
else {
/*
* TODO: triangular-solve from right.
*/
}
return;
}
|
convolution_5x5.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias) {
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float *kernel = _kernel;
const float *bias = _bias;
#pragma omp parallel for
for (int p = 0; p < outch; p++) {
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++) {
float *outptr = out;
float *outptr2 = outptr + outw;
const float *img0 = bottom_blob.channel(q);
const float *kernel0 = kernel + p * inch * 25 + q * 25;
const float *r0 = img0;
const float *r1 = img0 + w;
const float *r2 = img0 + w * 2;
const float *r3 = img0 + w * 3;
const float *r4 = img0 + w * 4;
const float *r5 = img0 + w * 5;
const float *k0 = kernel0;
const float *k1 = kernel0 + 5;
const float *k2 = kernel0 + 10;
const float *k3 = kernel0 + 15;
const float *k4 = kernel0 + 20;
int i = 0;
for (; i + 1 < outh; i += 2) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
|
GB_unop__bnot_int8_int8.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int8_int8)
// op(A') function: GB (_unop_tran__bnot_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
force.h
|
#pragma once
namespace STD{
class CalcDensity{
kernel_t kernel;
public:
void operator () (const EPI::Dens* const ep_i, const PS::S32 Nip, const EPJ::Dens* const ep_j, const PS::S32 Njp, RESULT::Dens* const dens){
for(PS::S32 i = 0 ; i < Nip ; ++ i){
const EPI::Dens& ith = ep_i[i];
for(PS::S32 j = 0 ; j < Njp ; ++ j){
const EPJ::Dens& jth = ep_j[j];
const PS::F64vec dr = jth.pos - ith.pos;
dens[i].dens += jth.mass * kernel.W(dr, ith.smth);
}
#ifdef FLAG_GI
dens[i].dens = std::max(5.0, dens[i].dens);
#endif
dens[i].smth = PARAM::SMTH * pow(ith.mass / dens[i].dens, 1.0/(PS::F64)(PARAM::Dim));
}
}
};
void CalcPressure(PS::ParticleSystem<STD::RealPtcl>& sph_system){
#pragma omp parallel for
for(PS::S32 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){
sph_system[i].pres = sph_system[i].EoS->Pressure(sph_system[i].dens, sph_system[i].eng);
sph_system[i].snds = sph_system[i].EoS->SoundSpeed(sph_system[i].dens, sph_system[i].eng);
}
}
class CalcDerivative{
kernel_t kernel;
public:
void operator () (const EPI::Drvt* ep_i, const PS::S32 Nip, const EPJ::Drvt* ep_j, const PS::S32 Njp, RESULT::Drvt* const drvt){
for(PS::S32 i = 0; i < Nip ; ++ i){
const EPI::Drvt& ith = ep_i[i];
for(PS::S32 j = 0; j < Njp ; ++ j){
const EPJ::Drvt& jth = ep_j[j];
const PS::F64vec dr = ith.pos - jth.pos;
const PS::F64vec dv = ith.vel - jth.vel;
drvt[i].div_v += - jth.mass * dv * kernel.gradW(dr, ith.smth);
drvt[i].rot_v += - jth.mass * dv ^ kernel.gradW(dr, ith.smth);
drvt[i].grad_smth -= jth.mass / ith.smth * (PARAM::Dim * kernel.W(dr, ith.smth) + dr * kernel.gradW(dr, ith.smth));
}
drvt[i].grad_smth = 1.0 / (1.0 + ith.smth * drvt[i].grad_smth / (PARAM::Dim * ith.dens));
drvt[i].div_v /= ith.dens;
drvt[i].rot_v /= ith.dens;
}
}
};
class CalcHydroForce{
const kernel_t kernel;
public:
void operator () (const EPI::Hydro* const ep_i, const PS::S32 Nip, const EPJ::Hydro* const ep_j, const PS::S32 Njp, RESULT::Hydro* const hydro){
for(PS::S32 i = 0; i < Nip ; ++ i){
PS::F64 v_sig_max = 0.0;
const EPI::Hydro& ith = ep_i[i];
for(PS::S32 j = 0; j < Njp ; ++ j){
const EPJ::Hydro& jth = ep_j[j];
const PS::F64vec dr = ith.pos - jth.pos;
const PS::F64vec dv = ith.vel - jth.vel;
const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const PS::F64 v_sig = ith.snds + jth.snds - 3.0 * w_ij;
v_sig_max = std::max(v_sig_max, v_sig);
PS::F64 AV = - PARAM::AV_STRENGTH * 0.5 * v_sig * w_ij / (0.5 * (ith.dens + jth.dens)) * 0.5 * (ith.Bal + jth.Bal);
if(PARAM::FLAG_R00 == true){
AV *= 0.5 * (ith.AVa + jth.AVa);
}
#if 1
const PS::F64vec gradW = 0.5 * (kernel.gradW(dr, ith.smth) * ith.grad_smth + kernel.gradW(dr, jth.smth) * jth.grad_smth);
hydro[i].acc -= jth.mass * (ith.grad_smth * ith.pres / (ith.dens * ith.dens) * kernel.gradW(dr, ith.smth) + jth.grad_smth * jth.pres / (jth.dens * jth.dens) * kernel.gradW(dr, jth.smth) + AV * gradW);
hydro[i].eng_dot += jth.mass * (ith.grad_smth * ith.pres / (ith.dens * ith.dens) + 0.5 * AV) * dv * gradW;
#else
const PS::F64vec gradW = 0.5 * (kernel.gradW(dr, ith.smth) + kernel.gradW(dr, jth.smth));
hydro[i].acc -= jth.mass * (ith.pres / (ith.dens * ith.dens) + jth.pres / (jth.dens * jth.dens) + AV) * gradW;
hydro[i].eng_dot += jth.mass * (ith.pres / (ith.dens * ith.dens) + 0.5 * AV) * dv * gradW;
#endif
}
hydro[i].dt = PARAM::C_CFL * 2.0 * ith.smth / v_sig_max;
}
}
};
template <class TPtclJ> class CalcGravityForce{
static const double G;
public:
void operator () (const EPI::Grav* const __restrict ep_i, const PS::S32 Nip, const TPtclJ* const __restrict ep_j, const PS::S32 Njp, RESULT::Grav* const grav){
for(PS::S32 i = 0; i < Nip ; ++ i){
const EPI::Grav& ith = ep_i[i];
for(PS::S32 j = 0; j < Njp ; ++ j){
const TPtclJ& jth = ep_j[j];
const PS::F64vec dr = ith.pos - jth.pos;
const PS::F64 dr2 = dr * dr;
const PS::F64 dr_inv = 1.0 / sqrt(dr2 + ith.getEps2());
const PS::F64 m_dr3_inv = jth.mass * math::pow3(dr_inv);
grav[i].acc -= G * m_dr3_inv * dr;
grav[i].pot -= G * jth.mass * dr_inv;
}
}
}
};
template <class TPtclJ>
const double CalcGravityForce<TPtclJ>::G = 6.67e-11;
}
|
trmm_x_sky_n_hi_row.c
|
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < mat->rows; i++)
for(ALPHA_INT j = 0; j < columns; j++)
alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
for (ALPHA_INT ac = 0; ac < mat->cols; ++ac)
{
ALPHA_INT start = mat->pointers[ac];
ALPHA_INT end = mat->pointers[ac + 1];
ALPHA_INT idx = 1;
ALPHA_INT eles_num = end - start;
for (ALPHA_INT ai = start; ai < end; ++ai)
{
ALPHA_INT cr = ac - eles_num + idx;
if (ac >= cr)
{
ALPHA_Number t;
alpha_mul(t, alpha, mat->values[ai]);
alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]);
}
idx++;
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
atomic-1.c
|
/* { dg-do run } */
/* { dg-options "-O2 -march=pentium" { target { { i?86-*-* x86_64-*-* } && ilp32 } } } */
#ifdef __i386__
#include "cpuid.h"
#endif
extern void abort (void);
double d;
struct
{
int i;
double e;
int j;
} x;
void
f1 (void)
{
#pragma omp atomic
d += 7.5;
#pragma omp atomic
d *= 2.5;
#pragma omp atomic
d /= 0.25;
}
void
f2 (void)
{
#pragma omp atomic
x.e += 7.5;
#pragma omp atomic
x.e *= 2.5;
#pragma omp atomic
x.e /= 0.25;
}
int
main (void)
{
#ifdef __i386__
unsigned int eax, ebx, ecx, edx;
if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))
return 0;
if (!(edx & bit_CMPXCHG8B))
return 0;
#endif
d = 1.0;
f1 ();
if (d != 85.0)
abort ();
x.e = 1.0;
f2 ();
if (x.i != 0 || x.e != 85.0 || x.j != 0)
abort ();
return 0;
}
|
for_simd_misc_messages.c
|
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd foo
void test_no_clause() {
int i;
#pragma omp for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}}
#pragma omp for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
#pragma omp for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd collapse(2)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 {{private variable cannot be reduction}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
analyze.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
int main(int argc, char * argv[])
{
FILE * fp1;
int i, j, n, fd, ppn, nnodes;
ssize_t rc;
char host[80], filename[80];
char * allhosts, * ptr;
int cpulist[40];
int iter, maxiter = 80000; // 400 sec at 5 msec per sample
double *** alldata;
double mintime, maxtime, mean, samples;
int rank, minhost, mincpu, maxhost, maxcpu;
double ssq, sigma, relative_variation;
double * compmax, * compmin;
int bin, numbins = 50;
double xhisto, hmin, histo_bin_width, prob;
double avgmin, sumtime, efficiency;
long * histo;
ppn = 40;
j = 0;
for (i=0; i<80; i+=4) {
cpulist[j] = i;
j++;
}
for (i=88; i<168; i+=4) {
cpulist[j] = i;
j++;
}
printf("starting with ppn = %d\n", ppn);
fp1 = fopen("hf", "r");
if (fp1 == NULL) {
printf("can't open hf ... exiting\n");
exit(0);
}
n = 0;
while (EOF != fscanf(fp1, "%s", host)) n++;
nnodes = n;
printf("got nnodes = %d\n", nnodes);
rewind(fp1);
histo = (long *) malloc(numbins*sizeof(long));
allhosts = (char *) malloc(nnodes*sizeof(host));
alldata = (double ***) malloc(nnodes*sizeof(double *));
alldata[0] = (double **) malloc(nnodes*ppn*sizeof(double *));
alldata[0][0] = (double *) malloc(nnodes*ppn*maxiter*sizeof(double));
for (n=0; n<nnodes; n++) alldata[n] = alldata[0] + n*ppn;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
alldata[n][i] = alldata[0][0] + i*maxiter + n*ppn*maxiter;
}
}
// read in all data
for (n=0; n<nnodes; n++) {
fscanf(fp1, "%s", host);
strcpy(allhosts + n*sizeof(host), host);
printf("reading data for host %d = %s ...\n", n, host);
#pragma omp parallel private(i,j,filename,fd,rc)
for (i=0; i<ppn; i++) {
j = cpulist[i];
sprintf(filename, "%s.%d", host, j);
fd = open(filename, O_RDONLY);
if (fd < 0) {
printf("missing data file %s ... exiting\n", filename);
exit(0);
}
rc = read(fd, &alldata[n][i][0], maxiter*sizeof(double));
if (rc < 0) {
printf("read failed for file %s ... exiting\n", filename);
exit(0);
}
close(fd);
}
}
// compute global min/max per rank
mintime = 1.0e30;
maxtime = 0.0;
minhost = 0;
mincpu = 0;
maxhost = 0;
maxcpu = 0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
mean += alldata[n][i][iter];
if (alldata[n][i][iter] > maxtime) {
maxtime = alldata[n][i][iter];
maxhost = n;
maxcpu = cpulist[i];
}
if (alldata[n][i][iter] < mintime) {
mintime = alldata[n][i][iter];
minhost = n;
mincpu = cpulist[i];
}
}
}
}
samples = ((double) nnodes) * ((double) ppn) * ((double) maxiter);
mean = mean / samples;
printf("\n");
printf("global min time = %.3lf msec on host %s cpu %d \n", mintime, allhosts + minhost*sizeof(host), mincpu);
printf("\n");
printf("global max time = %.3lf msec on host %s cpu %d \n", maxtime, allhosts + maxhost*sizeof(host), maxcpu);
printf("\n");
printf("global avg time = %.3lf\n", mean);
printf("\n");
ssq = 0.0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
ssq += (alldata[n][i][iter] - mean) * (alldata[n][i][iter] - mean);
}
}
}
sigma = sqrt(ssq/samples);
relative_variation = 100.0 * sigma / mean;
printf("overall relative variation = %.2lf percent\n", relative_variation);
// compute mean and relative variation by host
samples = ((double) ppn) * ((double) maxiter);
// compmax is the max compute time in any given iteration
compmax = (double *) malloc(maxiter*sizeof(double));
// compmin is the min compute time per node
compmin = (double *) malloc(nnodes*sizeof(double));
for (iter=0; iter<maxiter; iter++) compmax[iter] = 0.0;
for (n=0; n<nnodes; n++) compmin[n] = 1.0e30;
printf("\n");
printf("percent variation = 100*sigma/mean for the max computation times per step by node\n");
printf(" host mean(msec) percent variation\n");
for (n=0; n<nnodes; n++) {
// the compmax used here is node-local
mean = 0.0;
for (iter=0; iter<maxiter; iter++) {
compmax[iter] = 0.0;
for (i=0; i<ppn; i++) {
if (alldata[n][i][iter] > compmax[iter]) compmax[iter] = alldata[n][i][iter];
if (alldata[n][i][iter] < compmin[n]) compmin[n] = alldata[n][i][iter];
mean += alldata[n][i][iter];
}
}
mean = mean / samples;
ssq = 0.0;
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
ssq += (alldata[n][i][iter] - compmax[iter]) * (alldata[n][i][iter] - compmax[iter]);
}
}
sigma = sqrt(ssq/samples);
relative_variation = 100.0 * sigma / mean;
printf("%14s %10.2lf %10.2lf\n", allhosts + n*sizeof(host), mean, relative_variation);
}
avgmin = 0.0;
for (n=0; n<nnodes; n++) avgmin += compmin[n];
// avgmin is the min compute time per node, averaged over all nodes
avgmin = avgmin / ((double) nnodes);
// re-define compmax to be the max time in any rank for a given iteration
for (iter=0; iter<maxiter; iter++) {
compmax[iter] = 0.0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
if (alldata[n][i][iter] > compmax[iter]) compmax[iter] = alldata[n][i][iter];
}
}
}
// sumtime is the time expected for a parallel job
sumtime = 0.0;
for (iter=0; iter<maxiter; iter++) sumtime += compmax[iter];
efficiency = ((double) maxiter) * avgmin / sumtime;
printf("\n");
printf("estimated overall efficiency = %.3lf\n", efficiency);
// histogram all samples
histo_bin_width = (maxtime - mintime)/((double) (numbins - 1));
hmin = mintime - 0.5*histo_bin_width;
for (bin = 0; bin < numbins; bin++) histo[bin] = 0L;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
if (histo_bin_width > 0.0) bin = (int) ((alldata[n][i][iter] - hmin)/histo_bin_width);
else bin = 0;
if ((bin >= 0) && (bin < numbins)) histo[bin]++;
}
}
}
printf("\n");
printf("histogram of step times for all ranks\n");
printf(" msec count density\n");
for (bin = 0; bin < numbins; bin++) {
xhisto = mintime + histo_bin_width*((double) bin);
prob = 1.0e-3*((double) histo[bin]) / histo_bin_width;
printf("%10.3lf %10ld %20.4lf\n", xhisto, histo[bin], prob);
}
printf("\n");
printf("summary data by rank:\n");
printf(" host cpu mean(msec) relative variation (percent)\n");
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
mean = 0.0;
for (iter=0; iter<maxiter; iter++) mean += alldata[n][i][iter];
mean = mean / ((double) maxiter);
ssq = 0.0;
for (iter=0; iter<maxiter; iter++) ssq += (alldata[n][i][iter] - mean) * (alldata[n][i][iter] - mean);
sigma = sqrt(ssq/((double) maxiter));
relative_variation = 100.0 * sigma / mean;
printf("%14s %6d %8.2lf %8.2lf\n", allhosts + n*sizeof(host), cpulist[i], mean, relative_variation);
}
}
return 0;
}
|
libimagequant.c
|
/*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */
#else
#define LIQ_TEMP_ROW_WIDTH(img_width) (img_width)
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "kmeans.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char liq_attr_magic[] = "liq_attr";
static const char liq_image_magic[] = "liq_image";
static const char liq_result_magic[] = "liq_result";
static const char liq_histogram_magic[] = "liq_histogram";
static const char liq_remapping_result_magic[] = "liq_remapping_result";
static const char liq_freed_magic[] = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, kmeans_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int kmeans_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps;
unsigned char use_dither_map;
unsigned char speed;
unsigned char progress_stage1, progress_stage2, progress_stage3;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *importance_map, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
liq_image *background;
float min_opaque_val;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
bool free_pixels, free_rows, free_rows_internal;
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
unsigned char use_dither_map;
unsigned char progress_stage1;
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
unsigned char use_dither_map;
};
struct liq_histogram {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
struct acolorhash_table *acht;
double gamma;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
unsigned short ignorebits;
bool had_image_added;
};
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL;
static void contrast_maps(liq_image *image) LIQ_NONNULL;
static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL;
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL;
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL;
static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL;
static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL;
LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
LIQ_ARRAY(char, buf, required_space);
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
}
}
LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent)
{
return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info);
}
LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent)
{
return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info);
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64))
return true;
#elif _MSC_VER
int info[4];
__cpuid(info, 1);
/* bool is implemented as a built-in type of size 1 in MSVC */
return info[3] & (1<<26) ? true : false;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
/** internally MSE is a sum of all channels with pixels 0..1 range,
but other software gives per-RGB-channel MSE for 0..255 range */
static double mse_to_standard_mse(double mse) {
return mse * 65536.0/6.0;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
unsigned int iterations = MAX(8-speed, 0);
iterations += iterations * iterations/2;
attr->kmeans_iterations = iterations;
attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
if (attr->use_dither_map && speed < 3) {
attr->use_dither_map = 2; // always
}
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8;
if (attr->feedback_loop_trials < 2) {
attr->progress_stage1 += 30;
}
attr->progress_stage3 = 50 / (1+speed);
attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255.f, 256.f * attr->min_opaque_val);
}
LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->progress_callback = callback;
attr->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return;
result->progress_callback = callback;
result->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
LIQ_NONNULL static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 4);
return attr;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return LIQ_OK;
}
LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color)
{
if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
hist->fixed_colors[hist->fixed_colors_count++] = color;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455);
const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return liq_histogram_add_fixed_color_f(hist, px);
}
LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads());
return img->temp_f_row != NULL;
}
LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image);
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image);
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) {
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER;
const size_t required_size = img->width * img->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
if (ownership == LIQ_COPY_PIXELS) {
unsigned char *tmp = img->malloc(required_size);
if (!tmp) {
return LIQ_OUT_OF_MEMORY;
}
memcpy(tmp, importance_map, required_size);
importance_map = tmp;
} else if (ownership != LIQ_OWN_PIXELS) {
return LIQ_UNSUPPORTED;
}
liq_image_free_importance_map(img);
img->importance_map = importance_map;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER;
if (background->background) {
return LIQ_UNSUPPORTED;
}
if (img->width != background->width || img->height != background->height) {
return LIQ_BUFFER_TOO_SMALL;
}
if (img->background) {
liq_image_destroy(img->background);
}
img->background = background;
liq_image_free_maps(img); // Force them to be re-analyzed with the background
return LIQ_OK;
}
LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *const pixels = (rgba_pixel *const)bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
if (!image) {
attr->free(rows);
return NULL;
}
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
return false;
}
return img->rows || (img->temp_row && img->row_callback);
}
LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img)
{
assert(liq_image_has_rgba_pixels(img));
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rgba_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]);
}
}
LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img)
{
assert(omp_get_thread_num() == 0);
if (img->f_pixels) {
return true;
}
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
return liq_image_use_low_memory(img);
}
if (!liq_image_has_rgba_pixels(img)) {
return false;
}
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut);
}
return true;
}
LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
assert(img->temp_f_row); // init should have done that
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, gamma_lut);
return row_for_thread;
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
LIQ_NONNULL static free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) {
if (input_image->importance_map) {
input_image->free(input_image->importance_map);
input_image->importance_map = NULL;
}
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) {
liq_image_free_importance_map(input_image);
if (input_image->edges) {
input_image->free(input_image->edges);
input_image->edges = NULL;
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
input_image->dither_map = NULL;
}
}
LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
liq_image_free_maps(input_image);
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
if (input_image->temp_f_row) {
input_image->free(input_image->temp_f_row);
}
if (input_image->background) {
liq_image_destroy(input_image->background);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return NULL;
}
liq_histogram *hist = attr->malloc(sizeof(liq_histogram));
if (!hist) return NULL;
*hist = (liq_histogram) {
.magic_header = liq_histogram_magic,
.malloc = attr->malloc,
.free = attr->free,
.ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input),
};
return hist;
}
LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return;
hist->magic_header = liq_freed_magic;
pam_freeacolorhash(hist->acht);
hist->free(hist);
}
LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
liq_result *res;
if (LIQ_OK != liq_image_quantize(img, attr, &res)) {
return NULL;
}
return res;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!liq_image_has_rgba_pixels(img)) {
return LIQ_UNSUPPORTED;
}
liq_histogram *hist = liq_histogram_create(attr);
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_error err = liq_histogram_add_image(hist, attr, img);
if (LIQ_OK != err) {
return err;
}
err = liq_histogram_quantize_internal(hist, attr, false, result_output);
liq_histogram_destroy(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) {
return liq_histogram_quantize_internal(input_hist, attr, true, result_output);
}
LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output)
{
if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER;
*result_output = NULL;
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (liq_progress(attr, 0)) return LIQ_ABORTED;
histogram *hist;
liq_error err = finalize_histogram(input_hist, attr, &hist);
if (err != LIQ_OK) {
return err;
}
err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output);
pam_freeacolorhist(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (dither_level < 0 || dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
.progress_callback = result->progress_callback,
.progress_callback_user_info = result->progress_callback_user_info,
.progress_stage1 = result->use_dither_map ? 20 : 0,
};
return res;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_standard_mse(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_standard_mse(result->remapping->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return -1;
}
LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem)
{
if (!nelem) return;
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.f/256.f) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
unsigned int non_fixed_colors = 0;
for(unsigned int i = 0; i < map->colors; i++) {
if (map->palette[i].fixed) {
break;
}
non_fixed_colors++;
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent = 0;
for(unsigned int i = 0; i < non_fixed_colors; i++) {
if (map->palette[i].acolor.a < 255.f/256.f) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent);
if (non_fixed_colors > 9 && map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize)
{
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma);
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a && !map->palette[x].fixed) {
px.r = 71; px.g = 112; px.b = 76;
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output);
}
return &result->int_palette;
}
LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
double remapping_error=0;
if (!liq_image_get_row_f_init(input_image)) {
return -1;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return -1;
}
const colormap_item *acolormap = map->palette;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
const unsigned int max_threads = omp_get_max_threads();
LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads);
kmeans_init(map, max_threads, average_color);
#if __GNUC__ >= 9
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(acolormap,average_color,cols,input_image,map,n,output_pixels,rows,transparent_index) reduction(+:remapping_error)
#else
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error)
#endif
for(int row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
float diff;
last_match = nearest_search(n, &row_pixels[col], last_match, &diff);
if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) {
last_match = transparent_index;
}
output_pixels[row][col] = last_match;
remapping_error += diff;
kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
kmeans_finalize(map, max_threads, average_color);
nearest_free(n);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
const float max_overflow = 1.1f;
const float max_underflow = -0.1f;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr);
else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); }
if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg);
else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); }
if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb);
else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); }
float a = px.a + sa;
if (a > 1.f) { a = 1.f; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8f;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel) {
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped)
{
const int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const colormap *map = quant->palette;
const colormap_item *acolormap = map->palette;
if (!liq_image_get_row_f_init(input_image)) {
return false;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return false;
}
/* Initialize Floyd-Steinberg error vectors. */
const size_t errwidth = cols+2;
f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access
if (!thiserr) return false;
f_pixel *restrict nexterr = thiserr + errwidth;
memset(thiserr, 0, errwidth * sizeof(thiserr[0]));
bool ok = true;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
float base_dithering_level = quant->dither_level;
base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.f/255.f; // convert byte to float
}
base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating
int fs_direction = 1;
unsigned int last_match=0;
for (int row = 0; row < rows; ++row) {
if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) {
ok = false;
break;
}
memset(nexterr, 0, errwidth * sizeof(nexterr[0]));
int col = (fs_direction > 0) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
float diff;
last_match = nearest_search(n, &spx, guessed_match, &diff);
f_pixel output_px = acolormap[last_match].acolor;
if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) {
output_px = bg_pixels[col];
output_pixels[row][col] = transparent_index;
} else {
output_pixels[row][col] = last_match;
}
f_pixel err = {
.r = (spx.r - output_px.r),
.g = (spx.g - output_px.g),
.b = (spx.b - output_px.b),
.a = (spx.a - output_px.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
err.r *= 0.75f;
err.g *= 0.75f;
err.b *= 0.75f;
err.a *= 0.75f;
}
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction > 0) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
col += fs_direction;
if (fs_direction > 0) {
if (col >= cols) break;
} else {
if (col < 0) break;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = -fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
return ok;
}
/* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */
LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse)
{
const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f);
if (fixed_colors_count) {
for(int j=0; j < hist->size; j++) {
for(unsigned int i=0; i < fixed_colors_count; i++) {
if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) {
hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry
j--; break; // continue searching histogram
}
}
}
}
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER;
if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE;
if (input_hist->ignorebits > 0 && input_hist->had_image_added) {
return LIQ_UNSUPPORTED;
}
input_hist->ignorebits = 0;
input_hist->had_image_added = true;
input_hist->gamma = gamma ? gamma : 0.45455;
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free);
if (!input_hist->acht) {
return LIQ_OUT_OF_MEMORY;
}
}
// Fake image size. It's only for hash size estimates.
if (!input_hist->acht->cols) {
input_hist->acht->cols = num_entries;
}
input_hist->acht->rows += num_entries;
const unsigned int hash_size = input_hist->acht->hash_size;
for(int i=0; i < num_entries; i++) {
const rgba_pixel rgba = {
.r = entries[i].color.r,
.g = entries[i].color.g,
.b = entries[i].color.b,
.a = entries[i].color.a,
};
union rgba_as_int px = {rgba};
unsigned int hash;
if (px.rgba.a) {
hash = px.l % hash_size;
} else {
hash=0; px.l=0;
}
if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) {
return LIQ_OUT_OF_MEMORY;
}
}
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->importance_map && options->use_contrast_maps) {
contrast_maps(input_image);
}
input_hist->gamma = input_image->gamma;
for(int i = 0; i < input_image->fixed_colors_count; i++) {
liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]);
if (res != LIQ_OK) {
return res;
}
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
if (liq_progress(options, options->progress_stage1 * 0.4f)) {
return LIQ_ABORTED;
}
const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image);
// Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not
// the first image added
const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries;
do {
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free);
}
if (!input_hist->acht) return LIQ_OUT_OF_MEMORY;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL);
}
if (!added_ok) {
input_hist->ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED;
break;
}
}
} while(!input_hist->acht);
input_hist->had_image_added = true;
liq_image_free_importance_map(input_image);
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
return LIQ_OK;
}
LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output)
{
if (liq_progress(options, options->progress_stage1 * 0.9f)) {
return LIQ_ABORTED;
}
if (!input_hist->acht) {
return LIQ_BITMAP_NOT_AVAILABLE;
}
histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse);
*hist_output = hist;
return LIQ_OK;
}
LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
LIQ_NONNULL static void contrast_maps(liq_image *image)
{
const unsigned int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows);
image->importance_map = NULL;
unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows);
image->edges = NULL;
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) {
image->free(noise);
image->free(edges);
image->free(tmp);
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (unsigned int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (unsigned int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
// 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely).
const unsigned int z_int = 85 + (unsigned int)(z * 171.f);
noise[j*cols+i] = MIN(z_int, 255);
const int e_int = 255 - (int)(edge * 256.f);
edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->importance_map = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) {
// Transparency may or may not create an edge. When there's an explicit background set, assume no edge.
continue;
}
if (px != lastpixel || col == width-1) {
int neighbor_count = 10 * (col-lastcol);
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 15;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 15;
}
i++;
}
while(lastcol <= col) {
int e = edges[row*width + lastcol];
edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count));
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
/**
* Palette can be NULL, in which case it creates a new palette from scratch.
*/
static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*))
{
if (!fixed_colors_count) return palette;
colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free);
unsigned int i=0;
if (palette && fixed_colors_count < max_colors) {
unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count);
for(; i < palette_max; i++) {
newpal->palette[i] = palette->palette[i];
}
}
for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) {
newpal->palette[i++] = (colormap_item){
.acolor = fixed_colors[j],
.fixed = true,
};
}
if (palette) pam_freecolormap(palette);
return newpal;
}
LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1);
do {
colormap *newmap;
if (hist->size && fixed_colors_count < max_colors) {
newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
} else {
feedback_loop_trials = 0;
newmap = NULL;
}
newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time K-Means iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// K-Means iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials);
if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break;
liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done));
}
while(feedback_loop_trials > 0);
*palette_error_p = least_error;
return acolormap;
}
static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) {
if (!hist->size) {
return NULL;
}
colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
return acolormap;
}
LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output)
{
colormap *acolormap;
double palette_error = -1;
assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1));
const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors;
if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error);
if (!acolormap) {
return LIQ_VALUE_OUT_OF_RANGE;
}
// K-Means iteration approaches local minimum for the palette
double iteration_limit = options->kmeans_iteration_limit;
unsigned int iterations = options->kmeans_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
// likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;}
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = kmeans_do_iteration(hist, acolormap, NULL);
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) {
break;
}
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
mse_to_standard_mse(palette_error), mse_to_quality(palette_error),
mse_to_standard_mse(max_mse), mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return LIQ_QUALITY_TOO_LOW;
}
}
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) {
pam_freecolormap(acolormap);
return LIQ_ABORTED;
}
sort_palette(acolormap, options);
// If palette was created from a multi-image histogram,
// then it shouldn't be optimized for one image during remapping
if (fixed_result_colors) {
for(unsigned int i=0; i < acolormap->colors; i++) {
acolormap->palette[i].fixed = true;
}
}
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return LIQ_OUT_OF_MEMORY;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.use_dither_map = options->use_dither_map,
.gamma = gamma,
.min_posterization_output = options->min_posterization_output,
};
*result_output = result;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = input_image->width * input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
LIQ_ARRAY(unsigned char *, rows, input_image->height);
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
return liq_write_remapped_image_rows(result, input_image, rows);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) {
return LIQ_ABORTED;
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
} else {
const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000;
const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map);
const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
update_dither_map(input_image, row_pointers, result->palette);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) {
return LIQ_ABORTED;
}
// remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) {
return LIQ_ABORTED;
}
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
mujoco_deriv_struct.c
|
// -*- evil-shift-width: 4 -*-
/* Copyright © 2018, Roboti LLC
This file is licensed under the MuJoCo Resource License (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.roboti.us/resourcelicense.txt
*/
#include "mujoco.h"
#include <stdio.h>
#include <errno.h> /* errno, perror */
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <assert.h>
#include "mujoco_deriv_struct.h"
// global variables: user-defined, with defaults
#define MAXTHREAD 64 // maximum number of threads allowed
#define MAXSTATEN 8 // max allowed state members
// enable compilation with and without OpenMP support
#if defined(_OPENMP)
#include <omp.h>
#else
// omp timer replacement
double omp_get_wtime(void)
{
struct timeval start;
struct timezone tz;
gettimeofday(&start, &tz);
struct timeval end;
gettimeofday(&end, &tz);
return (double)start.tv_sec + 1e-6 * (double)start.tv_usec;
}
// omp functions used below
void omp_set_dynamic(int x) {}
void omp_set_num_threads(int x) {}
int omp_get_num_procs(void) {return 1;}
#endif
mjtNum* alloc_deriv(const mjModel* m)
{
// allocate derivatives
return (mjtNum*) mju_malloc(6*sizeof(mjtNum)*m->nv*m->nv);
}
void mj_copyStateCtrlData(mjData* d, const mjModel* m, const mjData* dmain) {
d->time = dmain->time;
mju_copy(d->qpos, dmain->qpos, m->nq);
mju_copy(d->qvel, dmain->qvel, m->nv);
mju_copy(d->qacc, dmain->qacc, m->nv);
mju_copy(d->qacc_warmstart, dmain->qacc_warmstart, m->nv);
mju_copy(d->qfrc_applied, dmain->qfrc_applied, m->nv);
mju_copy(d->xfrc_applied, dmain->xfrc_applied, 6*m->nbody);
mju_copy(d->ctrl, dmain->ctrl, m->nu);
}
////////////////////////////////////////////////////////////////////////////////
// mjData Getters
////////////////////////////////////////////////////////////////////////////////
#define MJDATA_GET_PTR(attr) &mjd_get_ ## attr
#define MJDATA_GETTER(attr) \
mjtNum* mjd_get_ ## attr(const mjData* d) \
{ \
return d-> attr; \
}
MJDATA_GETTER(ctrl)
MJDATA_GETTER(qpos)
MJDATA_GETTER(qvel)
MJDATA_GETTER(qacc)
MJDATA_GETTER(qfrc_inverse)
MJDATA_GETTER(qfrc_applied)
void mjDGetters_free(mjDGetters* g)
{
mju_free(g->fs);
mju_free(g);
}
mjDGetter* mjd_enum2getter(const MJDGetter_enum mjdg)
{
switch (mjdg)
{
case MJDGetter_ctrl:
return MJDATA_GET_PTR(ctrl);
case MJDGetter_qpos:
return MJDATA_GET_PTR(qpos);
case MJDGetter_qvel:
return MJDATA_GET_PTR(qvel);
case MJDGetter_qacc:
return MJDATA_GET_PTR(qacc);
case MJDGetter_qfrc_inverse:
return MJDATA_GET_PTR(qfrc_inverse);
case MJDGetter_qfrc_applied:
return MJDATA_GET_PTR(qfrc_applied);
default:
printf("Error: Bad enum %d", mjdg);
exit(EXIT_FAILURE);
break;
}
}
char* mjd_getter2str(mjDGetter* mjdg)
{
if (MJDATA_GET_PTR(ctrl) == mjdg)
return "ctrl";
else if (MJDATA_GET_PTR(qpos) == mjdg)
return "qpos";
else if (MJDATA_GET_PTR(qvel) == mjdg)
return "qvel";
else if (MJDATA_GET_PTR(qacc) == mjdg)
return "qacc";
else if (MJDATA_GET_PTR(qfrc_inverse) == mjdg)
return "qfrc_inverse";
else if (MJDATA_GET_PTR(qfrc_applied) == mjdg)
return "qfrc_applied";
else {
printf("Error: Bad getter %p", (void*) mjdg);
exit(EXIT_FAILURE);
}
}
void mjDGetters_print(const mjDGetters* mjdg)
{
for (size_t i = 0; i < mjdg->len; i++)
printf("%s %s", i ? "," : "", mjd_getter2str(mjdg->fs[i]));
printf("\n");
}
mjDGetters* mjDGetters_new(size_t len, MJDGetter_enum* attr)
{
mjDGetters* g = (mjDGetters*) mju_malloc(sizeof(mjDGetters));
g->len = len;
g->fs = (mjDGetter**) mju_malloc(len * sizeof(mjDGetter*));
for (size_t i = 0; i < len; i++)
g->fs[i] = mjd_enum2getter(attr[i]);
g->free = &mjDGetters_free;
return g;
}
// mj_warmup_t
void mj_warmup_fwd(const mjModel* m, mjData* d, int nwarmup) {
mj_forward(m, d);
// extra solver iterations to improve warmstart (qacc) at center point
for( int rep=1; rep<nwarmup; rep++ )
mj_forwardSkip(m, d, mjSTAGE_VEL, 1);
}
// mj_warmup_t
void mj_warmup_inv(const mjModel* m, mjData* d, int nwarmup) {
mj_inverse(m, d);
}
size_t mjDeriv_deriv_size(const mjModel* m, int xN, int fN)
{
int nv = m->nv;
return fN*xN*nv*nv;
}
// An implementation of mj_moveSkip
void mj_warmFowardSkip(const mjModel* m,
mjData* d,
mjtStage stage,
int n,
mjtNum* warmstart)
{
mju_copy(d->qacc_warmstart, warmstart, m->nv);
mj_forwardSkip(m, d, stage, n);
}
// An implementation of mj_moveSkip
void mj_invSkip(const mjModel* m,
mjData* d,
mjtStage stage,
int n,
mjtNum* warmstart)
{
mj_inverseSkip(m, d, stage, n);
}
void perturb_fwd_inv(mjDeriv* deriv,
int xk,
int threadid,
int i,
mj_moveSkip move)
{
mjtNum* warmstart = deriv->warmstart;
mjtStage stage = deriv->stages[xk];
mjtNum eps = deriv->eps;
const mjModel* m = deriv->m;
mjData* d = deriv->d[threadid];
// save output for center point and warmstart (needed in forward only)
mjtNum* target = deriv->xs->fs[xk](d);
const mjtNum originali = deriv->xs->fs[xk](deriv->dmain)[i];
// perturb selected target
target[i] += eps;
// move forward or backward by
(*move)(m, d, stage, 1, warmstart);
// undo perturbation
target[i] = originali;
}
// perturb_t interface
void perturb_fwd(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv(deriv, xk, threadid, i, &mj_warmFowardSkip);
}
// perturb_t interface
void perturb_inv(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv(deriv, xk, threadid, i, &mj_invSkip);
}
void perturb_fwd_inv_pos(mjDeriv* deriv,
int xk,
int threadid,
int i,
mj_moveSkip* move)
{
mjtNum* warmstart = deriv->warmstart;
mjtStage stage = deriv->stages[xk];
mjtNum eps = deriv->eps;
const mjModel* m = deriv->m;
mjData* d = deriv->d[threadid];
const mjData* dmain = deriv->dmain;
// get joint id for this dof
int jid = m->dof_jntid[i];
// get quaternion address and dof position within quaternion (-1: not in quaternion)
int quatadr = -1, dofpos = 0;
if( m->jnt_type[jid]==mjJNT_BALL )
{
quatadr = m->jnt_qposadr[jid];
dofpos = i - m->jnt_dofadr[jid];
}
else if( m->jnt_type[jid]==mjJNT_FREE && i>=m->jnt_dofadr[jid]+3 )
{
quatadr = m->jnt_qposadr[jid] + 3;
dofpos = i - m->jnt_dofadr[jid] - 3;
}
// apply quaternion or simple perturbation
if( quatadr>=0 )
{
mjtNum angvel[3] = {0,0,0};
angvel[dofpos] = eps;
mju_quatIntegrate(d->qpos+quatadr, angvel, 1);
}
else
d->qpos[m->jnt_qposadr[jid] + i - m->jnt_dofadr[jid]] += eps;
// evaluate dynamics, with center warmstart
move(m, d, stage, 1, warmstart);
// undo perturbation
mju_copy(d->qpos, dmain->qpos, m->nq);
}
// perturb_t interface
void perturb_fwd_pos(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv_pos(deriv, xk, threadid, i, &mj_warmFowardSkip);
}
// perturb_t interface
void perturb_inv_pos(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv_pos(deriv, xk, threadid, i, &mj_invSkip);
}
perturb_t mjd_enum2perturber(MJDGetter_enum attr, int isforward)
{
if (isforward) {
if (MJDGetter_qpos == attr)
return &perturb_fwd_pos;
else
return &perturb_fwd;
} else {
if (MJDGetter_qpos == attr)
return &perturb_inv_pos;
else
return &perturb_inv;
}
}
perturb_t* mjPerturb_new(size_t len, MJDGetter_enum* attrs, int isforward)
{
perturb_t* perturbers = (perturb_t*) mju_malloc(len * sizeof(perturb_t));
for (int i = 0; i < len; i++)
perturbers[i] = mjd_enum2perturber(attrs[i], isforward);
return perturbers;
}
void mjPerturb_free(perturb_t* perturbers)
{
mju_free(perturbers);
}
mjtStage mjd_enum2stage(MJDGetter_enum attr)
{
switch (attr)
{
case MJDGetter_qpos:
return mjSTAGE_NONE;
case MJDGetter_qvel:
return mjSTAGE_POS;
default:
return mjSTAGE_VEL;
}
}
mjtStage* mjStages_new(size_t len, MJDGetter_enum* attrs)
{
mjtStage* stages = (mjtStage*) mju_malloc(len * sizeof(mjtStage));
for (int i = 0; i < len; i++)
stages[i] = mjd_enum2stage(attrs[i]);
return stages;
}
void mjStages_print(mjtStage* stages, size_t len)
{
for (size_t i = 0; i < len; i++)
printf("%s%d", i ? ", " : "", stages[i]);
printf("\n");
}
void mjStages_free(mjtStage* stages)
{
mju_free(stages);
}
void mjDeriv_compute(mjDeriv* deriv, int threadid)
{
mjData* d = deriv->d[threadid];
const mjModel* m = deriv->m;
const mjData* dmain = deriv->dmain;
mjDGetters* fs = deriv->fs;
mjDGetters* xs = deriv->xs;
int nthread = deriv->nthread;
mjtNum eps = deriv->eps;
int nv = m->nv;
// allocate stack space for result at center
mjMARKSTACK
mjtNum* center = mj_stackAlloc(d, nv);
mjtNum* warmstart = mj_stackAlloc(d, nv);
// prepare static schedule: range of derivative columns to be computed by this thread
int chunk = (m->nv + nthread-1) / nthread;
int istart = threadid * chunk;
int iend = mjMIN(istart + chunk, m->nv);
// copy state and control from dmain to thread-specific d
mj_copyStateCtrlData(d, m, dmain);
// run full computation at center point (usually faster than copying dmain)
(*deriv->warmer)(m, d, deriv->nwarmup);
for (int fk=0; fk < fs->len; fk++) {
// select target vector and original vector for force or acceleration derivative
mjtNum* output = fs->fs[fk](d); // d->qacc
// save output for center point and warmstart (needed in forward only)
mju_copy(center, output, nv);
mju_copy(warmstart, d->qacc_warmstart, nv);
for (int xk=0; xk < xs->len; xk++) {
deriv->warmstart = warmstart;
for( int i=istart; i<iend; i++ ) {
(deriv->perturbers[xk])(deriv, xk, threadid, i);
// compute column i of derivative 2
for( int j=0; j<nv; j++ ) {
size_t idx = (fk*xs->len + xs->len-xk-1)*nv*nv + j*nv + i;
deriv->deriv[idx] = (output[j] - center[j])/eps;
}
}
}
}
mjFREESTACK
}
void mjDeriv_print(mjDeriv* mjd)
{
printf("mjd->m->nv: %d\n", mjd->m->nv);
printf("mjd->dmain->nefc: %d\n", mjd->dmain->nefc);
printf("mjd->deriv: %p\n", (void*) mjd->deriv);
printf("mjd->fs: "); mjDGetters_print(mjd->fs);
printf("mjd->xs: "); mjDGetters_print(mjd->xs);
printf("mjd->stages:"); mjStages_print(mjd->stages, mjd->xs->len);
printf("mjd->eps: %f\n", mjd->eps);
printf("mjd->nthread: %d\n", mjd->nthread);
printf("mjd->nwarmup: %d\n", mjd->nwarmup);
printf("mjd->warmer: %s\n", mjd->warmer == mj_warmup_fwd ? "fwd" :
(mjd->warmer == mj_warmup_inv ? "inv" : "error"));
}
void mjDeriv_compute_mp(mjDeriv* deriv)
{
int nthread = deriv->nthread;
const mjModel* m = deriv->m;
// set up OpenMP (if not enabled, this does nothing)
omp_set_dynamic(0);
omp_set_num_threads(nthread);
//mjData** d = deriv->d;
mjData* d[MAXTHREAD];
for( int n=0; n<nthread; n++ ) {
mjData* dn = mj_makeData(m);
d[n] = dn;
if ( d[n] == NULL ) {
perror("Unable to allocate memory\n");
exit(EXIT_FAILURE);
}
}
deriv->d = d;
// run worker threads in parallel if OpenMP is enabled
#pragma omp parallel for schedule(static)
for( int n=0; n<nthread; n++ ) {
(*deriv->compute)(deriv, n);
}
for( int n=0; n<nthread; n++ )
mj_deleteData(d[n]);
}
void mjDeriv_free(mjDeriv* mjderiv)
{
mju_free(mjderiv);
}
mjDeriv* mjDeriv_new(const mjModel* m, const mjData* dmain, mjtNum* deriv,
mjDGetters* fs, mjDGetters* xs, perturb_t* perturbers,
mjtStage* stages,
mj_warmup_t* warmer, int nwarmup, double eps, int nthread)
{
mjDeriv* mjd = (mjDeriv*) mju_malloc(sizeof(mjDeriv));
mjd->m = m;
mjd->dmain = dmain;
mjd->deriv = deriv;
mjd->fs = fs;
mjd->xs = fs;
mjd->perturbers = perturbers;
mjd->nwarmup = nwarmup;
mjd->warmer = warmer;
mjd->xs = xs;
mjd->perturbers = perturbers;
mjd->stages = stages;
mjd->nthread = nthread;
// mjd->d = (mjData**) mju_malloc(nthread * sizeof(mjData*));
mjd->eps = eps;
mjd->compute = &mjDeriv_compute;
mjd->compute_mp = &mjDeriv_compute_mp;
mjd->free = &mjDeriv_free;
return mjd;
}
void mjDeriv_free_default(mjDeriv* mjderiv)
{
// mju_free(mjderiv->d);
mju_free(mjderiv->stages);
mju_free(mjderiv->perturbers);
(*mjderiv->xs->free)(mjderiv->xs);
(*mjderiv->fs->free)(mjderiv->fs);
mju_free(mjderiv);
}
mjDeriv* mjDeriv_new_default(const mjModel* m, const mjData* dmain, mjtNum* deriv,
int isforward, int nwarmup, double eps, int nthread)
{
size_t fN = 1;
size_t xN = 3;
mjDeriv* mjd = (mjDeriv*) mju_malloc(sizeof(mjDeriv));
mjd->m = m;
mjd->dmain = dmain;
mjd->deriv = deriv;
MJDGetter_enum fs_attr[1] = { MJDGetter_qacc };
if (isforward) {
fs_attr[0] = MJDGetter_qacc ;
} else {
fs_attr[0] = MJDGetter_qfrc_inverse;
}
mjDGetters* fs = mjDGetters_new(fN, fs_attr);
mjd->fs = fs;
MJDGetter_enum xs_attr[3] = { MJDGetter_qfrc_applied, MJDGetter_qvel, MJDGetter_qpos };
if (isforward) {
xs_attr[0] = MJDGetter_qfrc_applied;
} else {
xs_attr[0] = MJDGetter_qacc;
}
mjDGetters* xs = mjDGetters_new(xN, xs_attr);
perturb_t* perturbers = (perturb_t*) mju_malloc(xN * sizeof(perturb_t));
if (isforward) {
perturbers[0] = &perturb_fwd;
perturbers[1] = &perturb_fwd;
perturbers[2] = &perturb_fwd_pos;
mjd->nwarmup = nwarmup;
mjd->warmer = &mj_warmup_fwd;
} else {
perturbers[0] = &perturb_inv;
perturbers[1] = &perturb_inv;
perturbers[2] = &perturb_inv_pos;
mjd->nwarmup = 0;
mjd->warmer = &mj_warmup_inv;
}
mjd->xs = xs;
mjd->perturbers = perturbers;
mjd->stages = (mjtStage*) mju_malloc(xN * sizeof(mjtStage));
mjd->stages[0] = mjSTAGE_VEL;
mjd->stages[1] = mjSTAGE_POS;
mjd->stages[2] = mjSTAGE_NONE;
mjd->nthread = nthread;
// mjd->d = (mjData**) mju_malloc(nthread * sizeof(mjData*));
mjd->eps = eps;
mjd->compute = &mjDeriv_compute;
mjd->compute_mp = &mjDeriv_compute_mp;
mjd->free = &mjDeriv_free_default;
return mjd;
}
double relnorm(mjtNum* residual, mjtNum* base, int n)
{
mjtNum L1res = 0, L1base = 0;
for( int i=0; i<n; i++ )
{
L1res += mju_abs(residual[i]);
L1base += mju_abs(base[i]);
}
return (double) mju_log10(mju_max(mjMINVAL,L1res/mju_max(mjMINVAL,L1base)));
}
// names of residuals for accuracy check
const char* accuracy[8] = {
"G2*F2 - I ",
"G2 - G2' ",
"G1 - G1' ",
"F2 - F2' ",
"G1 + G2*F1",
"G0 + G2*F0",
"F1 + F2*G1",
"F0 + F2*G0"
};
// check accuracy of derivatives using known mathematical identities
void checkderiv(const mjModel* m, mjtNum* deriv, mjtNum error[8])
{
mjData* d = mj_makeData(m);
int nv = m->nv;
// allocate space
mjMARKSTACK
mjtNum* mat = mj_stackAlloc(d, nv*nv);
// get pointers to derivative matrices
mjtNum* G0 = deriv; // dinv/dpos
mjtNum* G1 = deriv + nv*nv; // dinv/dvel
mjtNum* G2 = deriv + 2*nv*nv; // dinv/dacc = dqfrc_inverse / dacc
mjtNum* F0 = deriv + 3*nv*nv; // dacc/dpos
mjtNum* F1 = deriv + 4*nv*nv; // dacc/dvel
mjtNum* F2 = deriv + 5*nv*nv; // dacc/dfrc = dacc / dfrc_applied
// G2*F2 - I
mju_mulMatMat(mat, G2, F2, nv, nv, nv);
for( int i=0; i<nv; i++ )
mat[i*(nv+1)] -= 1;
error[0] = relnorm(mat, G2, nv*nv);
// G2 - G2'
mju_transpose(mat, G2, nv, nv);
mju_sub(mat, mat, G2, nv*nv);
error[1] = relnorm(mat, G2, nv*nv);
// G1 - G1'
mju_transpose(mat, G1, nv, nv);
mju_sub(mat, mat, G1, nv*nv);
error[2] = relnorm(mat, G1, nv*nv);
// F2 - F2'
mju_transpose(mat, F2, nv, nv);
mju_sub(mat, mat, F2, nv*nv);
error[3] = relnorm(mat, F2, nv*nv);
// G1 + G2*F1
mju_mulMatMat(mat, G2, F1, nv, nv, nv);
mju_addTo(mat, G1, nv*nv);
error[4] = relnorm(mat, G1, nv*nv);
// G0 + G2*F0
mju_mulMatMat(mat, G2, F0, nv, nv, nv);
mju_addTo(mat, G0, nv*nv);
error[5] = relnorm(mat, G0, nv*nv);
// F1 + F2*G1
mju_mulMatMat(mat, F2, G1, nv, nv, nv);
mju_addTo(mat, F1, nv*nv);
error[6] = relnorm(mat, F1, nv*nv);
// F0 + F2*G0
mju_mulMatMat(mat, F2, G0, nv, nv, nv);
mju_addTo(mat, F0, nv*nv);
error[7] = relnorm(mat, F0, nv*nv);
mjFREESTACK
mj_deleteData(d);
}
int main(int argc, char** argv)
{
// gloval variables: internal
const int MAXEPOCH = 100; // maximum number of epochs
mjtNum* deriv = 0; // dynamics derivatives (6*nv*nv):
// dinv/dpos, dinv/dvel, dinv/dacc, dacc/dpos, dacc/dvel, dacc/dfrc
int nthread = 0;
int niter = 30; // fixed number of solver iterations for finite-differencing
int nwarmup = 3; // center point repetitions to improve warmstart
int nepoch = 20; // number of timing epochs
int nstep = 500; // number of simulation steps per epoch
double eps = 1e-6; // finite-difference epsilon
// print help if not enough arguments
if( argc<2 )
{
printf("\n Arguments: modelfile [nthread niter nwarmup nepoch nstep eps]\n\n");
return 1;
}
// default nthread = number of logical cores (usually optimal)
nthread = omp_get_num_procs();
// get numeric command-line arguments
if( argc>2 )
sscanf(argv[2], "%d", &nthread);
if( argc>3 )
sscanf(argv[3], "%d", &niter);
if( argc>4 )
sscanf(argv[4], "%d", &nwarmup);
if( argc>5 )
sscanf(argv[5], "%d", &nepoch);
if( argc>6 )
sscanf(argv[6], "%d", &nstep);
if( argc>7 )
sscanf(argv[7], "%lf", &eps);
// check number of threads
if( nthread<1 || nthread>MAXTHREAD )
{
printf("nthread must be between 1 and %d\n", MAXTHREAD);
return 1;
}
// check number of epochs
if( nepoch<1 || nepoch>MAXEPOCH )
{
printf("nepoch must be between 1 and %d\n", MAXEPOCH);
return 1;
}
// activate and load model
mj_activate("mjkey.txt");
mjModel* m = 0;
if( strlen(argv[1])>4 && !strcmp(argv[1]+strlen(argv[1])-4, ".mjb") )
m = mj_loadModel(argv[1], NULL);
else
m = mj_loadXML(argv[1], NULL, NULL, 0);
if( !m )
{
printf("Could not load modelfile '%s'\n", argv[1]);
return 1;
}
// print arguments
#if defined(_OPENMP)
printf("\nnthread : %d (OpenMP)\n", nthread);
#else
printf("\nnthread : %d (serial)\n", nthread);
#endif
printf("niter : %d\n", niter);
printf("nwarmup : %d\n", nwarmup);
printf("nepoch : %d\n", nepoch);
printf("nstep : %d\n", nstep);
printf("eps : %g\n\n", eps);
// make mjData: main, per-thread
mjData* dmain = mj_makeData(m);
int nv = m->nv;
deriv = alloc_deriv(m);
mjDeriv* mjderiv_inv = mjDeriv_new_default(m, dmain, deriv, 0, nwarmup, eps, nthread);
mjDeriv* mjderiv_fwd = mjDeriv_new_default(m, dmain, deriv + 3*nv*nv, 1, nwarmup, eps, nthread);
mjDeriv* mj_derivs[2] = {mjderiv_inv, mjderiv_fwd};
// save solver options
int save_iterations = m->opt.iterations;
mjtNum save_tolerance = m->opt.tolerance;
// allocate statistics
int nefc = 0;
double cputm[MAXEPOCH][2];
mjtNum error[MAXEPOCH][8];
// run epochs, collect statistics
for( int epoch=0; epoch<nepoch; epoch++ )
{
// set solver options for main simulation
m->opt.iterations = save_iterations;
m->opt.tolerance = save_tolerance;
// advance main simulation for nstep
for( int i=0; i<nstep; i++ )
mj_step(m, dmain);
// count number of active constraints
nefc += dmain->nefc;
// set solver options for finite differences
m->opt.iterations = niter;
m->opt.tolerance = 0;
// start timer
double starttm = omp_get_wtime();
// test forward and inverse
for( int isforward=0; isforward<2; isforward++ ) {
mjDeriv* mjderiv = mj_derivs[isforward];
(*mjderiv->compute_mp)(mjderiv);
// record duration in ms
cputm[epoch][isforward] = 1000*(omp_get_wtime() - starttm);
}
// check derivatives
checkderiv(m, deriv, error[epoch]);
}
// compute statistics
double mcputm[2] = {0,0}, merror[8] = {0,0,0,0,0,0,0,0};
for( int epoch=0; epoch<nepoch; epoch++ )
{
mcputm[0] += cputm[epoch][0];
mcputm[1] += cputm[epoch][1];
for( int ie=0; ie<8; ie++ )
merror[ie] += error[epoch][ie];
}
// print sizes, timing, accuracy
printf("sizes : nv %d, nefc %d\n\n", m->nv, nefc/nepoch);
printf("inverse : %.2f ms\n", mcputm[0]/nepoch);
printf("forward : %.2f ms\n\n", mcputm[1]/nepoch);
printf("accuracy: log10(residual L1 relnorm)\n");
printf("------------------------------------\n");
for( int ie=0; ie<8; ie++ )
printf(" %s : %.2g\n", accuracy[ie], merror[ie]/nepoch);
printf("\n");
// shut down
for (int isforward = 0; isforward < 2; isforward++) {
mjDeriv* mjderiv = mj_derivs[isforward];
(*mjderiv->free)(mjderiv);
}
mju_free(deriv);
mj_deleteData(dmain);
mj_deleteModel(m);
mj_deactivate();
return 0;
}
|
par1.c
|
#include <stdio.h>
#include <stdlib.h>
#include<omp.h>
#define N 100
int main (int argc, char *argv[])
{
double start,end;
int array[N],i,count,num,randindex;
omp_set_num_threads(3);
FILE *fptr;
fptr = fopen("A.txt", "w");
#pragma parallel
{
for(i=0;i<N;i++){
array[i]= rand() % 100;
fprintf(fptr, "%d ", array[i]);
}
int b[10];
for(i=1;i<6;i++){
randindex= (rand()+i)%100;
b[i-1] = array[randindex];
}
for(i=0;i<5;i++){
count = 0;
num = b[i];
start = omp_get_wtime();
#pragma omp for
for(i=0;i<N;i++)
{
if(array[i]==num)
count++;
}
printf("Occurrence of %d is: %d\n", num, count);
}}
end = omp_get_wtime() - start;
printf("Time = %.6g\n",end);
return 0;
}
|
prettyfunc.c
|
// Test the handling of pretty function
// There should be a hidden variable declaration inserted under the closest enclosing scope
// Liao 2013-1-10
int main()
{
int i=100,sum=0;
#pragma omp parallel
{
__PRETTY_FUNCTION__;
}
return 0;
}
|
fox_floats_timer_caching_omp_fileIO_benchmark.c
|
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 1024
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 2
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
stream.c
|
// Copyright 2009-2021 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2021, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
truedepscalar-orig-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// loop carried true dep between tmp =.. and ..= tmp.
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
tmp = 10;
int len=100;
int a[100];
#pragma omp parallel for
for (i=0;i<len;i++)
{
a[i] = tmp;
tmp =a[i]+i;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
GB_binop__hypot_fp32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp32)
// C=scalar+B GB (_bind1st__hypot_fp32)
// C=scalar+B' GB (_bind1st_tran__hypot_fp32)
// C=A+scalar GB (_bind2nd__hypot_fp32)
// C=A'+scalar GB (_bind2nd_tran__hypot_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = hypotf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = hypotf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_HYPOT || GxB_NO_FP32 || GxB_NO_HYPOT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__hypot_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__hypot_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__hypot_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = hypotf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__hypot_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = hypotf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = hypotf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__hypot_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = hypotf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LBLT.h
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <immintrin.h>
int binary_search_right_boundary_kernel_LBLT(const int *row_pointer,
const int key_input,
const int size)
{
int start = 0;
int stop = size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = row_pointer[median];
if (key_input >= key_median)
start = median + 1;
else
stop = median - 1;
}
return start;
}
//int main(int argc, char ** argv)
int spmvLBLT(int new_row,int nthreads,int m,int n,int nnzR,int* coo_new_rowidx,int *coo_new_matrix_ptr,
int *coo_new_matrix_colidx,double* coo_new_matrix_value,int* csrSplitter_yid,int* Yid,
int* Start1,int* End1,int* label,int* Start2,int* End2)
{
int stridennz = ceil((double)nnzR/(double)nthreads);
//#pragma omp parallel for
for (int tid = 0; tid <= nthreads; tid++)
{
// compute partition boundaries by partition of size stride
int boundary_yid = tid * stridennz;
// clamp partition boundaries to [0, nnzR]
boundary_yid = boundary_yid > nnzR ? nnzR : boundary_yid;
// binary search
csrSplitter_yid[tid] = binary_search_right_boundary_kernel_LBLT(coo_new_matrix_ptr, boundary_yid, new_row + 1) - 1;
//printf("csrSplitter_yid[%d] is %d\n", tid, csrSplitter_yid[tid]);
}
int *Apinter = (int *)malloc(nthreads * sizeof(int));
memset(Apinter, 0, nthreads *sizeof(int) );
//每个线程执行行数
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
Apinter[tid] = csrSplitter_yid[tid+1] - csrSplitter_yid[tid];
//printf("A[%d] is %d\n", tid, Apinter[tid]);
}
int *Bpinter = (int *)malloc(nthreads * sizeof(int));
memset(Bpinter, 0, nthreads *sizeof(int) );
//每个线程执行非零元数
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
int num = 0;
for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid+1]; u++)
{
num += coo_new_matrix_ptr[ u + 1 ] - coo_new_matrix_ptr[u];
}
Bpinter[tid] = num;
//printf("B [%d]is %d\n",tid, Bpinter[tid]);
}
memset (Yid, 0, sizeof(int) * nthreads);
//每个线程
int flag = -2;
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
//printf("tid = %i, csrSplitter: %i -> %i\n", tid, csrSplitter_yid[tid], csrSplitter_yid[tid+1]);
if (csrSplitter_yid[tid + 1] - csrSplitter_yid[tid] == 0 && tid != 0)
{
Yid[tid] = csrSplitter_yid[tid];
flag = 1;
}
else if (flag == 1)
{
Yid[tid] = csrSplitter_yid[tid];
flag = -2;
}
else
{
Yid[tid] = -1;
}
//printf("Yid[%d] = %d\n",tid,Yid[tid]);
}
//行平均用在多行上
//int sto = nthreads > nnzR ? nthreads : nnzR;
memset (Start1, 0, sizeof(int) * nthreads);
memset (End1, 0, sizeof(int) * nthreads);
memset (label, 0, sizeof(int) * nthreads);
int start1, search1 = 0;
//#pragma omp parallel for
for (int tid = 0;tid < nthreads;tid++)
{
if (Apinter[tid] == 0)
{
if(search1 == 0)
{
start1 = tid;
search1 = 1;
}
}
if(search1 == 1 && Apinter[tid]!= 0)
{
int nntz = floor((double)Apinter[tid] / (double)(tid-start1+1));
if( nntz != 0)
{
for(int i = start1;i <= tid;i++)
{
label[i] = i;
}
}
else if((tid-start1+1) >= Apinter[tid] && Apinter[tid] != 0)
{
for(int i = start1;i <= tid;i++)
{
label[i] = i;
}
}
int mntz = Apinter[tid] - (nntz * (tid-start1));
//start and end
int n = start1;
Start1[n] = csrSplitter_yid[tid];
End1[n] = Start1[n] + nntz;
//printf("start1a[%d] = %d, end1a[%d] = %d\n",n,Start1[n],n, End1[n]);
for (int p = start1 + 1; p <= tid ; p++)
{
if(p == tid)
{
Start1[p] = End1[p - 1];
End1[p] = Start1[p] + mntz;
}
else
{
Start1[p] = End1[p-1];
End1[p] = Start1[p] + nntz;
}
//printf("start1b[%d] = %d, end1b[%d] = %d\n",n,Start1[n],n, End1[n]);
}
search1 = 0;
}
}
//非零元平均用在行数小于线程数
memset (Start2, 0, sizeof(int) * nthreads);
memset (End2, 0, sizeof(int) * nthreads);
int start2, search2 = 0;
//#pragma omp parallel for
for (int tid = 0;tid < nthreads;tid++)
{
if (Bpinter[tid] == 0)
{
if(search2 == 0)
{
start2 = tid;
search2 = 1;
}
}
if(search2 == 1 && Bpinter[tid]!= 0)
{
int nntz2 = floor((double)Bpinter[tid] / (double)(tid-start2+1));
int mntz2 = Bpinter[tid] - (nntz2 * (tid-start2));
//start and end
int n = start2;
for (int i = start2; i >= 0; i--)
{
Start2[n] += Bpinter[i];
End2[n] = Start2[n] + nntz2;
}
for (n = start2 + 1; n < tid ; n++)
{
Start2[n] = End2[n-1];
End2[n] = Start2[n] + nntz2;
}
if (n == tid)
{
Start2[n] = End2[n - 1];
End2[n] = Start2[n] + mntz2;
}
search2 = 0;
}
}
return 0;
}
|
utils.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include "../operator/mxnet_op.h"
namespace mxnet {
namespace common {
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const TShape shape = input.shape();
const TShape idx_shape = input.aux_shape(csr::kIdx);
const TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask";
return nullptr;
}
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
BlockHandlerAVX.h
|
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full licence information.
//
#pragma once
#include "BlockMultiplierPlatform.h"
#include <immintrin.h>
#include <emmintrin.h>
#include <assert.h>
#include <cstdint>
#define FOR_CNTK
#ifdef FOR_CNTK
#include "CommonMatrix.h"
#endif
namespace Microsoft { namespace MSR { namespace CNTK {
class MATH_API BlockHandlerAVX
{
private:
//USE SSE for the blocks of 8, borrowed from BlockHandlerSSE
FORCEINLINE static void kernelsse8x4(__m128i xmmRow0, __m128i xmmRow1, __m128i xmmRow2, __m128i xmmRow3,
short* B, __m128i* return1, __m128i* return2, __m128i* return3, __m128i* return4);
FORCEINLINE static void kernelavx16x4(__m256i xmmRow0B0a, __m256i xmmRow1B0a, __m256i xmmRow2B0a, __m256i xmmRow3B0a,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4);
FORCEINLINE static void kernelavx32x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b,
__m256i xmmRow1B0a, __m256i xmmRow1B0b,
__m256i xmmRow2B0a, __m256i xmmRow2B0b,
__m256i xmmRow3B0a, __m256i xmmRow3B0b,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4);
FORCEINLINE static void kernelavx64x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d,
__m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d,
__m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4);
FORCEINLINE static void kernelavx128x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h,
__m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d,
__m256i xmmRow1B0e, __m256i xmmRow1B0f, __m256i xmmRow1B0g, __m256i xmmRow1B0h,
__m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d,
__m256i xmmRow2B0e, __m256i xmmRow2B0f, __m256i xmmRow2B0g, __m256i xmmRow2B0h,
__m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d,
__m256i xmmRow3B0e, __m256i xmmRow3B0f, __m256i xmmRow3B0g, __m256i xmmRow3B0h,
short* B, __m256i* return1, __m256i* return2, __m256i* return3, __m256i* return4);
FORCEINLINE static void kernelsse8x1(__m128i xmmRow0,
short* B, __m128i* return1);
FORCEINLINE static void kernelavx16x1(__m256i xmmRow0B0a,
short* B, __m256i* return1 );
FORCEINLINE static void kernelavx32x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b,
short* B, __m256i* return1);
FORCEINLINE static void kernelavx64x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
short* B, __m256i* return1) ;
FORCEINLINE static void kernelavx128x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h,
short* B, __m256i* return1);
//TODO: Should these be refactored somewhere else? Any BlockHandler will need access to these functions.
//Separate class with static functions? Maybe move the Block rewriting functions as well as these to a new
//static class.
static int RowToColOffsetRewrittenB(int col, int kOffset, int blockSize, int origCols);
static int RowToColOffsetRewrittenA(int row, int kOffset, int blockSize, int rowsPerBlock, int origCols);
static void DumpM256(__m256i dumpMe);
public:
typedef __m256i VectorT;
typedef int16_t ScalarAT;
typedef int16_t ScalarBT;
typedef int32_t ScalarCT;
FORCEINLINE static void HandleBlock8x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m128i* resultStorage);
FORCEINLINE static void HandleBlock32x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
FORCEINLINE static void HandleBlock64x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
FORCEINLINE static void HandleBlock128x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage, VectorT* subtractMe);
FORCEINLINE static void HandleBlock8x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m128i* resultStorage);
FORCEINLINE static void HandleBlock16x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
FORCEINLINE static void HandleBlock64x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
FORCEINLINE static void HandleBlock128x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage, VectorT* subtractMe);
FORCEINLINE static void HandleBlock16x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
//FORCEINLINE static void HandleBlock128x4(int currBlock, int startRow, int m, int k, int n, short* newA, short* B,
FORCEINLINE static void HandleBlock32x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage);
static VectorT* PrepareExtraB(const ScalarBT* /*prepareMe*/, int /*k*/, int /*n*/)
{
return nullptr;
}
static void FreePreparedB(VectorT* freeMe) { freeMe; assert(nullptr == freeMe); }
};
#define LOADAVX2_128x4 \
__m256i r0b0a2 = _mm256_load_si256((__m256i*)currA2); \
__m256i r0b0b2 = _mm256_load_si256((__m256i*)(currA2 + 16)); \
__m256i r0b0c2 = _mm256_load_si256((__m256i*)(currA2 + 32)); \
__m256i r0b0d2 = _mm256_load_si256((__m256i*)(currA2 + 48)); \
__m256i r0b0e2 = _mm256_load_si256((__m256i*)(currA2 + 64)); \
__m256i r0b0f2 = _mm256_load_si256((__m256i*)(currA2 + 80)); \
__m256i r0b0g2 = _mm256_load_si256((__m256i*)(currA2 + 96)); \
__m256i r0b0h2 = _mm256_load_si256((__m256i*)(currA2 + 112));\
\
__m256i r1b0a2 = _mm256_load_si256((__m256i*)(currA2 + 128));\
__m256i r1b0b2 = _mm256_load_si256((__m256i*)(currA2 + 144));\
__m256i r1b0c2 = _mm256_load_si256((__m256i*)(currA2 + 160));\
__m256i r1b0d2 = _mm256_load_si256((__m256i*)(currA2 + 176));\
__m256i r1b0e2 = _mm256_load_si256((__m256i*)(currA2 + 192));\
__m256i r1b0f2 = _mm256_load_si256((__m256i*)(currA2 + 208));\
__m256i r1b0g2 = _mm256_load_si256((__m256i*)(currA2 + 224));\
__m256i r1b0h2 = _mm256_load_si256((__m256i*)(currA2 + 240));\
\
__m256i r2b0a2 = _mm256_load_si256((__m256i*)(currA2 + 256));\
__m256i r2b0b2 = _mm256_load_si256((__m256i*)(currA2 + 272));\
__m256i r2b0c2 = _mm256_load_si256((__m256i*)(currA2 + 288));\
__m256i r2b0d2 = _mm256_load_si256((__m256i*)(currA2 + 304));\
__m256i r2b0e2 = _mm256_load_si256((__m256i*)(currA2 + 320));\
__m256i r2b0f2 = _mm256_load_si256((__m256i*)(currA2 + 336));\
__m256i r2b0g2 = _mm256_load_si256((__m256i*)(currA2 + 352));\
__m256i r2b0h2 = _mm256_load_si256((__m256i*)(currA2 + 368));\
\
__m256i r3b0a2 = _mm256_load_si256((__m256i*)(currA2 + 384));\
__m256i r3b0b2 = _mm256_load_si256((__m256i*)(currA2 + 400));\
__m256i r3b0c2 = _mm256_load_si256((__m256i*)(currA2 + 416));\
__m256i r3b0d2 = _mm256_load_si256((__m256i*)(currA2 + 432));\
__m256i r3b0e2 = _mm256_load_si256((__m256i*)(currA2 + 448));\
__m256i r3b0f2 = _mm256_load_si256((__m256i*)(currA2 + 464));\
__m256i r3b0g2 = _mm256_load_si256((__m256i*)(currA2 + 480));\
__m256i r3b0h2 = _mm256_load_si256((__m256i*)(currA2 + 496));\
#define LOADAVX2_128x1 \
__m256i r0b0a2 = _mm256_load_si256((__m256i*)currA2); \
__m256i r0b0b2 = _mm256_load_si256((__m256i*)(currA2 + 16)); \
__m256i r0b0c2 = _mm256_load_si256((__m256i*)(currA2 + 32)); \
__m256i r0b0d2 = _mm256_load_si256((__m256i*)(currA2 + 48)); \
__m256i r0b0e2 = _mm256_load_si256((__m256i*)(currA2 + 64)); \
__m256i r0b0f2 = _mm256_load_si256((__m256i*)(currA2 + 80)); \
__m256i r0b0g2 = _mm256_load_si256((__m256i*)(currA2 + 96)); \
__m256i r0b0h2 = _mm256_load_si256((__m256i*)(currA2 + 112));
#define LOADAVX_128x1 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)(currA + 16)); \
__m256i r0b0c = _mm256_load_si256((__m256i*)(currA + 32)); \
__m256i r0b0d = _mm256_load_si256((__m256i*)(currA + 48)); \
__m256i r0b0e = _mm256_load_si256((__m256i*)(currA + 64)); \
__m256i r0b0f = _mm256_load_si256((__m256i*)(currA + 80)); \
__m256i r0b0g = _mm256_load_si256((__m256i*)(currA + 96)); \
__m256i r0b0h = _mm256_load_si256((__m256i*)(currA + 112));
#define LOADAVX_128x4 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)(currA + 16)); \
__m256i r0b0c = _mm256_load_si256((__m256i*)(currA + 32)); \
__m256i r0b0d = _mm256_load_si256((__m256i*)(currA + 48)); \
__m256i r0b0e = _mm256_load_si256((__m256i*)(currA + 64)); \
__m256i r0b0f = _mm256_load_si256((__m256i*)(currA + 80)); \
__m256i r0b0g = _mm256_load_si256((__m256i*)(currA + 96)); \
__m256i r0b0h = _mm256_load_si256((__m256i*)(currA + 112));\
\
__m256i r1b0a = _mm256_load_si256((__m256i*)(currA + 128));\
__m256i r1b0b = _mm256_load_si256((__m256i*)(currA + 144));\
__m256i r1b0c = _mm256_load_si256((__m256i*)(currA + 160));\
__m256i r1b0d = _mm256_load_si256((__m256i*)(currA + 176));\
__m256i r1b0e = _mm256_load_si256((__m256i*)(currA + 192));\
__m256i r1b0f = _mm256_load_si256((__m256i*)(currA + 208));\
__m256i r1b0g = _mm256_load_si256((__m256i*)(currA + 224));\
__m256i r1b0h = _mm256_load_si256((__m256i*)(currA + 240));\
\
__m256i r2b0a = _mm256_load_si256((__m256i*)(currA + 256));\
__m256i r2b0b = _mm256_load_si256((__m256i*)(currA + 272));\
__m256i r2b0c = _mm256_load_si256((__m256i*)(currA + 288));\
__m256i r2b0d = _mm256_load_si256((__m256i*)(currA + 304));\
__m256i r2b0e = _mm256_load_si256((__m256i*)(currA + 320));\
__m256i r2b0f = _mm256_load_si256((__m256i*)(currA + 336));\
__m256i r2b0g = _mm256_load_si256((__m256i*)(currA + 352));\
__m256i r2b0h = _mm256_load_si256((__m256i*)(currA + 368));\
\
__m256i r3b0a = _mm256_load_si256((__m256i*)(currA + 384));\
__m256i r3b0b = _mm256_load_si256((__m256i*)(currA + 400));\
__m256i r3b0c = _mm256_load_si256((__m256i*)(currA + 416));\
__m256i r3b0d = _mm256_load_si256((__m256i*)(currA + 432));\
__m256i r3b0e = _mm256_load_si256((__m256i*)(currA + 448));\
__m256i r3b0f = _mm256_load_si256((__m256i*)(currA + 464));\
__m256i r3b0g = _mm256_load_si256((__m256i*)(currA + 480));\
__m256i r3b0h = _mm256_load_si256((__m256i*)(currA + 496));\
#define LOADAVX_64x4 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); \
__m256i r0b0c = _mm256_load_si256((__m256i*)currA + 2); \
__m256i r0b0d = _mm256_load_si256((__m256i*)currA + 3); \
\
__m256i r1b0a = _mm256_load_si256((__m256i*)currA + 4);\
__m256i r1b0b = _mm256_load_si256((__m256i*)currA + 5);\
__m256i r1b0c = _mm256_load_si256((__m256i*)currA + 6);\
__m256i r1b0d = _mm256_load_si256((__m256i*)currA + 7);\
\
__m256i r2b0a = _mm256_load_si256((__m256i*)currA + 8);\
__m256i r2b0b = _mm256_load_si256((__m256i*)currA + 9);\
__m256i r2b0c = _mm256_load_si256((__m256i*)currA + 10);\
__m256i r2b0d = _mm256_load_si256((__m256i*)currA + 11);\
\
__m256i r3b0a = _mm256_load_si256((__m256i*)currA + 12);\
__m256i r3b0b = _mm256_load_si256((__m256i*)currA + 13);\
__m256i r3b0c = _mm256_load_si256((__m256i*)currA + 14);\
__m256i r3b0d = _mm256_load_si256((__m256i*)currA + 15);
#define LOADAVX_64x1 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); \
__m256i r0b0c = _mm256_load_si256((__m256i*)currA + 2); \
__m256i r0b0d = _mm256_load_si256((__m256i*)currA + 3);
#define LOADAVX_32x4 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); \
\
__m256i r1b0a = _mm256_load_si256((__m256i*)currA + 2);\
__m256i r1b0b = _mm256_load_si256((__m256i*)currA + 3);\
\
__m256i r2b0a = _mm256_load_si256((__m256i*)currA + 4);\
__m256i r2b0b = _mm256_load_si256((__m256i*)currA + 5);\
\
__m256i r3b0a = _mm256_load_si256((__m256i*)currA + 6);\
__m256i r3b0b = _mm256_load_si256((__m256i*)currA + 7);\
#define LOADAVX_32x1 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1);
#define LOADAVX_16x4 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA); \
__m256i r1b0a = _mm256_load_si256((__m256i*)currA + 1);\
__m256i r2b0a = _mm256_load_si256((__m256i*)currA + 2);\
__m256i r3b0a = _mm256_load_si256((__m256i*)currA + 3);\
#define LOADAVX_16x1 \
__m256i r0b0a = _mm256_load_si256((__m256i*)currA);
#define LOAD_8x4 \
__m128i r0b0a = _mm_load_si128((__m128i*)currA);\
__m128i r1b0a = _mm_load_si128((__m128i*)currA + 1);\
__m128i r2b0a = _mm_load_si128((__m128i*)currA + 2);\
__m128i r3b0a = _mm_load_si128((__m128i*)currA + 3);\
#define LOAD_8x1 \
__m128i r0b0a = _mm_load_si128((__m128i*)currA);
FORCEINLINE void BlockHandlerAVX::HandleBlock8x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m128i* resultStorage)
{
blockCnt; //warning 4100
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 8, 4, k);
short* currA = &newA[aOffset];
LOAD_8x4;
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 8, n)];
__m128i accum1 = _mm_set_epi32(0, 0, 0, 0);
__m128i accum2 = _mm_set_epi32(0, 0, 0, 0);
__m128i accum3 = _mm_set_epi32(0, 0, 0, 0);
__m128i accum4 = _mm_set_epi32(0, 0, 0, 0);
kernelsse8x4(r0b0a, r1b0a, r2b0a, r3b0a,
currB, &accum1, &accum2, &accum3, &accum4);
resultStorage[RowColToOffset(0, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1);
resultStorage[RowColToOffset(1, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(1, c, n)], accum2);
resultStorage[RowColToOffset(2, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(2, c, n)], accum3);
resultStorage[RowColToOffset(3, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(3, c, n)], accum4);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock8x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m128i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 8, 4, k);
short* currA = &newA[aOffset];
LOAD_8x1;
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 8, n)];
__m128i accum1 = _mm_set_epi32(0, 0, 0, 0);
kernelsse8x1(r0b0a,
currB, &accum1);
resultStorage[RowColToOffset(0, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock16x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 16, 4, k);
short* currA = &newA[aOffset];
LOADAVX_16x4;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 16, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
__m256i accum2 = _mm256_set1_epi16(0);
__m256i accum3 = _mm256_set1_epi16(0);
__m256i accum4 = _mm256_set1_epi16(0);
kernelavx16x4(r0b0a, r1b0a, r2b0a, r3b0a,
currB, &accum1, &accum2, &accum3, &accum4);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1);
resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(1, c, n)], accum2);
resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(2, c, n)], accum3);
resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(3, c, n)], accum4);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock16x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 16, 1, k);
short* currA = &newA[aOffset];
LOADAVX_16x1;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 16, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
kernelavx16x1(r0b0a, currB, &accum1);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock32x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 32, 4, k);
short* currA = &newA[aOffset];
LOADAVX_32x4;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 32, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
__m256i accum2 = _mm256_set1_epi16(0);
__m256i accum3 = _mm256_set1_epi16(0);
__m256i accum4 = _mm256_set1_epi16(0);
kernelavx32x4(
r0b0a, r0b0b,
r1b0a, r1b0b,
r2b0a, r2b0b,
r3b0a, r3b0b,
currB, &accum1, &accum2, &accum3, &accum4);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1);
resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], accum2);
resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], accum3);
resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], accum4);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock32x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 32, 1, k);
short* currA = &newA[aOffset];
LOADAVX_32x1;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 32, n)];
__m256i accum1 = _mm256_set1_epi16(0);
kernelavx32x1(
r0b0a, r0b0b, currB, &accum1);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock64x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 64, 4, k);
short* currA = &newA[aOffset];
LOADAVX_64x4;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 64, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
__m256i accum2 = _mm256_set1_epi16(0);
__m256i accum3 = _mm256_set1_epi16(0);
__m256i accum4 = _mm256_set1_epi16(0);
kernelavx64x4(
r0b0a, r0b0b, r0b0c, r0b0d,
r1b0a, r1b0b, r1b0c, r1b0d,
r2b0a, r2b0b, r2b0c, r2b0d,
r3b0a, r3b0b, r3b0c, r3b0d,
currB, &accum1, &accum2, &accum3, &accum4);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1);
resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], accum2);
resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], accum3);
resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], accum4);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock64x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int /*blockCnt*/, __m256i* resultStorage)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 64, 4, k);
short* currA = &newA[aOffset];
LOADAVX_64x1;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 64, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
kernelavx64x1(
r0b0a, r0b0b, r0b0c, r0b0d,
currB, &accum1);
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1);
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock128x4(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage, VectorT* /*subtractMe*/)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 128, 4, k);
int aOffset2 = RowToColOffsetRewrittenA(startRow, currBlock + 1, 128, 4, k);
short* currA = &newA[aOffset];
short* currA2 = &newA[aOffset2];
LOADAVX_128x4;
LOADAVX2_128x4;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 128, n)];
short* currB2 = &B[RowToColOffsetRewrittenB(c, currBlock + 1, 128, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
__m256i accum2 = _mm256_set1_epi16(0);
__m256i accum3 = _mm256_set1_epi16(0);
__m256i accum4 = _mm256_set1_epi16(0);
__m256i accum5 = _mm256_set1_epi16(0);
__m256i accum6 = _mm256_set1_epi16(0);
__m256i accum7 = _mm256_set1_epi16(0);
__m256i accum8 = _mm256_set1_epi16(0);
kernelavx128x4(
r0b0a, r0b0b, r0b0c, r0b0d, r0b0e, r0b0f, r0b0g, r0b0h,
r1b0a, r1b0b, r1b0c, r1b0d, r1b0e, r1b0f, r1b0g, r1b0h,
r2b0a, r2b0b, r2b0c, r2b0d, r2b0e, r2b0f, r2b0g, r2b0h,
r3b0a, r3b0b, r3b0c, r3b0d, r3b0e, r3b0f, r3b0g, r3b0h,
currB, &accum1, &accum2, &accum3, &accum4);
if (blockCnt > 1)
{
kernelavx128x4(
r0b0a2, r0b0b2, r0b0c2, r0b0d2, r0b0e2, r0b0f2, r0b0g2, r0b0h2,
r1b0a2, r1b0b2, r1b0c2, r1b0d2, r1b0e2, r1b0f2, r1b0g2, r1b0h2,
r2b0a2, r2b0b2, r2b0c2, r2b0d2, r2b0e2, r2b0f2, r2b0g2, r2b0h2,
r3b0a2, r3b0b2, r3b0c2, r3b0d2, r3b0e2, r3b0f2, r3b0g2, r3b0h2,
currB2, &accum5, &accum6, &accum7, &accum8);
}
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], _mm256_add_epi32(accum1, accum5));
resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], _mm256_add_epi32(accum2, accum6));
resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], _mm256_add_epi32(accum3, accum7));
resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], _mm256_add_epi32(accum4, accum8));
}
}
FORCEINLINE void BlockHandlerAVX::HandleBlock128x1(int currBlock, int startRow, int k, int n, short* newA, short* B,
int blockCnt, __m256i* resultStorage, VectorT* /*subtractMe*/)
{
int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 128, 4, k);
int aOffset2 = RowToColOffsetRewrittenA(startRow, currBlock + 1, 128, 4, k);
short* currA = &newA[aOffset];
short* currA2 = &newA[aOffset2];
LOADAVX_128x1;
LOADAVX2_128x1;
//#pragma omp parallel for
for (int c = 0; c < n; ++c)
{
short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 128, n)];
short* currB2 = &B[RowToColOffsetRewrittenB(c, currBlock + 1, 128, n)];
//The gain comes when we have all the row values loaded up
//together and we multiply them all times each column, saving m_rowsPerBlock column
//loads.
__m256i accum1 = _mm256_set1_epi16(0);
__m256i accum2 = _mm256_set1_epi16(0);
kernelavx128x1(
r0b0a, r0b0b, r0b0c, r0b0d, r0b0e, r0b0f, r0b0g, r0b0h,
currB, &accum1);
if (blockCnt > 1)
{
kernelavx128x1(
r0b0a2, r0b0b2, r0b0c2, r0b0d2, r0b0e2, r0b0f2, r0b0g2, r0b0h2,
currB2, &accum1);
}
resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], _mm256_add_epi32(accum1, accum2));
}
}
FORCEINLINE void BlockHandlerAVX::kernelsse8x1(__m128i xmmRow0,
short* B, __m128i* return1)
{
__m128i xmmCol0 = _mm_load_si128((__m128i*)B);
__m128i result1 = _mm_madd_epi16(xmmRow0, xmmCol0);
*return1 = result1;
}
FORCEINLINE void BlockHandlerAVX::kernelsse8x4(__m128i xmmRow0, __m128i xmmRow1, __m128i xmmRow2, __m128i xmmRow3,
short* B, __m128i* return1, __m128i* return2, __m128i* return3, __m128i* return4)
{
__m128i xmmCol0 = _mm_load_si128((__m128i*)B);
__m128i result1 = _mm_madd_epi16(xmmRow0, xmmCol0);
__m128i result2 = _mm_madd_epi16(xmmRow1, xmmCol0);
__m128i result3 = _mm_madd_epi16(xmmRow2, xmmCol0);
__m128i result4 = _mm_madd_epi16(xmmRow3, xmmCol0);
*return1 = result1;
*return2 = result2;
*return3 = result3;
*return4 = result4;
}
FORCEINLINE void BlockHandlerAVX::kernelavx16x1(__m256i xmmRow0B0a,
short* B, __m256i* return1)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
*return1 = r0b0axc0b0a;
}
FORCEINLINE void BlockHandlerAVX::kernelavx16x4(__m256i xmmRow0B0a, __m256i xmmRow1B0a, __m256i xmmRow2B0a, __m256i xmmRow3B0a,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
//Result for row 1
__m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a);
//Result for row 2
__m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a);
//Result for row 3
__m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a);
*return1 = r0b0axc0b0a;
*return2 = r1b0axc0b0a;
*return3 = r2b0axc0b0a;
*return4 = r3b0axc0b0a;
}
FORCEINLINE void BlockHandlerAVX::kernelavx32x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b,
short* B, __m256i* return1)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1);
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
*return1 = result1a;
}
FORCEINLINE void BlockHandlerAVX::kernelavx32x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b,
__m256i xmmRow1B0a, __m256i xmmRow1B0b,
__m256i xmmRow2B0a, __m256i xmmRow2B0b,
__m256i xmmRow3B0a, __m256i xmmRow3B0b,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1);
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
//Result for row 1
__m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a);
__m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b);
__m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b);
//Result for row 2
__m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a);
__m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b);
__m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b);
//Result for row 3
__m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a);
__m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b);
__m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b);
*return1 = result1a;
*return2 = result2a;
*return3 = result3a;
*return4 = result4a;
}
FORCEINLINE void BlockHandlerAVX::kernelavx64x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
short* B, __m256i* return1)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1);
__m256i xmmCol0B0c = _mm256_load_si256((__m256i*)B + 2);
__m256i xmmCol0B0d = _mm256_load_si256((__m256i*)B + 3);
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c);
__m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
__m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d);
__m256i result1ab = _mm256_add_epi32(result1a, result1b);
*return1 = result1ab;
//std::cout << "Returning " << u.i[0] << " + " << u.i[4] << "(" << u.i[0] + u.i[4] << ") for first row" << std::endl;
}
FORCEINLINE void BlockHandlerAVX::kernelavx64x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d,
__m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d,
__m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1);
__m256i xmmCol0B0c = _mm256_load_si256((__m256i*)B + 2);
__m256i xmmCol0B0d = _mm256_load_si256((__m256i*)B + 3);
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c);
__m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
__m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d);
__m256i result1ab = _mm256_add_epi32(result1a, result1b);
//Result for row 1
__m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a);
__m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b);
__m256i r1b0cxc0b0c = _mm256_madd_epi16(xmmRow1B0c, xmmCol0B0c);
__m256i r1b0dxc0b0d = _mm256_madd_epi16(xmmRow1B0d, xmmCol0B0d);
__m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b);
__m256i result2b = _mm256_add_epi32(r1b0cxc0b0c, r1b0dxc0b0d);
__m256i result2ab = _mm256_add_epi32(result2a, result2b);
//Result for row 2
__m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a);
__m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b);
__m256i r2b0cxc0b0c = _mm256_madd_epi16(xmmRow2B0c, xmmCol0B0c);
__m256i r2b0dxc0b0d = _mm256_madd_epi16(xmmRow2B0d, xmmCol0B0d);
__m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b);
__m256i result3b = _mm256_add_epi32(r2b0cxc0b0c, r2b0dxc0b0d);
__m256i result3ab = _mm256_add_epi32(result3a, result3b);
//Result for row 3
__m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a);
__m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b);
__m256i r3b0cxc0b0c = _mm256_madd_epi16(xmmRow3B0c, xmmCol0B0c);
__m256i r3b0dxc0b0d = _mm256_madd_epi16(xmmRow3B0d, xmmCol0B0d);
__m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b);
__m256i result4b = _mm256_add_epi32(r3b0cxc0b0c, r3b0dxc0b0d);
__m256i result4ab = _mm256_add_epi32(result4a, result4b);
*return1 = result1ab;
*return2 = result2ab;
*return3 = result3ab;
*return4 = result4ab;
}
FORCEINLINE void BlockHandlerAVX::kernelavx128x1(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h,
short* B, __m256i* return1)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)(B + 16));
__m256i xmmCol0B0c = _mm256_load_si256((__m256i*)(B + 32));
__m256i xmmCol0B0d = _mm256_load_si256((__m256i*)(B + 48));
__m256i xmmCol0B0e = _mm256_load_si256((__m256i*)(B + 64));
__m256i xmmCol0B0f = _mm256_load_si256((__m256i*)(B + 80));
__m256i xmmCol0B0g = _mm256_load_si256((__m256i*)(B + 96));
__m256i xmmCol0B0h = _mm256_load_si256((__m256i*)(B + 112));
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c);
__m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d);
__m256i r0b0exc0b0e = _mm256_madd_epi16(xmmRow0B0e, xmmCol0B0e);
__m256i r0b0fxc0b0f = _mm256_madd_epi16(xmmRow0B0f, xmmCol0B0f);
__m256i r0b0gxc0b0g = _mm256_madd_epi16(xmmRow0B0g, xmmCol0B0g);
__m256i r0b0hxc0b0h = _mm256_madd_epi16(xmmRow0B0h, xmmCol0B0h);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
__m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d);
__m256i result1c = _mm256_add_epi32(r0b0exc0b0e, r0b0fxc0b0f);
__m256i result1d = _mm256_add_epi32(r0b0gxc0b0g, r0b0hxc0b0h);
__m256i result1ab = _mm256_add_epi32(result1a, result1b);
__m256i result1cd = _mm256_add_epi32(result1c, result1d);
__m256i result1abcd = _mm256_add_epi32(result1ab, result1cd);
*return1 = result1abcd;
//std::cout << "Returning " << u.i[0] << " + " << u.i[4] << "(" << u.i[0] + u.i[4] << ") for first row" << std::endl;
}
FORCEINLINE void BlockHandlerAVX::kernelavx128x4(
__m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d,
__m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h,
__m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d,
__m256i xmmRow1B0e, __m256i xmmRow1B0f, __m256i xmmRow1B0g, __m256i xmmRow1B0h,
__m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d,
__m256i xmmRow2B0e, __m256i xmmRow2B0f, __m256i xmmRow2B0g, __m256i xmmRow2B0h,
__m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d,
__m256i xmmRow3B0e, __m256i xmmRow3B0f, __m256i xmmRow3B0g, __m256i xmmRow3B0h,
short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4)
{
__m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B);
__m256i xmmCol0B0b = _mm256_load_si256((__m256i*)(B + 16));
__m256i xmmCol0B0c = _mm256_load_si256((__m256i*)(B + 32));
__m256i xmmCol0B0d = _mm256_load_si256((__m256i*)(B + 48));
__m256i xmmCol0B0e = _mm256_load_si256((__m256i*)(B + 64));
__m256i xmmCol0B0f = _mm256_load_si256((__m256i*)(B + 80));
__m256i xmmCol0B0g = _mm256_load_si256((__m256i*)(B + 96));
__m256i xmmCol0B0h = _mm256_load_si256((__m256i*)(B + 112));
//Result for row 0
//Nomenclature:
//r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers))
__m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a);
__m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b);
__m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c);
__m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d);
__m256i r0b0exc0b0e = _mm256_madd_epi16(xmmRow0B0e, xmmCol0B0e);
__m256i r0b0fxc0b0f = _mm256_madd_epi16(xmmRow0B0f, xmmCol0B0f);
__m256i r0b0gxc0b0g = _mm256_madd_epi16(xmmRow0B0g, xmmCol0B0g);
__m256i r0b0hxc0b0h = _mm256_madd_epi16(xmmRow0B0h, xmmCol0B0h);
__m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b);
__m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d);
__m256i result1c = _mm256_add_epi32(r0b0exc0b0e, r0b0fxc0b0f);
__m256i result1d = _mm256_add_epi32(r0b0gxc0b0g, r0b0hxc0b0h);
__m256i result1ab = _mm256_add_epi32(result1a, result1b);
__m256i result1cd = _mm256_add_epi32(result1c, result1d);
__m256i result1abcd = _mm256_add_epi32(result1ab, result1cd);
//Result for row 1
__m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a);
__m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b);
__m256i r1b0cxc0b0c = _mm256_madd_epi16(xmmRow1B0c, xmmCol0B0c);
__m256i r1b0dxc0b0d = _mm256_madd_epi16(xmmRow1B0d, xmmCol0B0d);
__m256i r1b0exc0b0e = _mm256_madd_epi16(xmmRow1B0e, xmmCol0B0e);
__m256i r1b0fxc0b0f = _mm256_madd_epi16(xmmRow1B0f, xmmCol0B0f);
__m256i r1b0gxc0b0g = _mm256_madd_epi16(xmmRow1B0g, xmmCol0B0g);
__m256i r1b0hxc0b0h = _mm256_madd_epi16(xmmRow1B0h, xmmCol0B0h);
__m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b);
__m256i result2b = _mm256_add_epi32(r1b0cxc0b0c, r1b0dxc0b0d);
__m256i result2c = _mm256_add_epi32(r1b0exc0b0e, r1b0fxc0b0f);
__m256i result2d = _mm256_add_epi32(r1b0gxc0b0g, r1b0hxc0b0h);
__m256i result2ab = _mm256_add_epi32(result2a, result2b);
__m256i result2cd = _mm256_add_epi32(result2c, result2d);
__m256i result2abcd = _mm256_add_epi32(result2ab, result2cd);
//Result for row 2
__m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a);
__m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b);
__m256i r2b0cxc0b0c = _mm256_madd_epi16(xmmRow2B0c, xmmCol0B0c);
__m256i r2b0dxc0b0d = _mm256_madd_epi16(xmmRow2B0d, xmmCol0B0d);
__m256i r2b0exc0b0e = _mm256_madd_epi16(xmmRow2B0e, xmmCol0B0e);
__m256i r2b0fxc0b0f = _mm256_madd_epi16(xmmRow2B0f, xmmCol0B0f);
__m256i r2b0gxc0b0g = _mm256_madd_epi16(xmmRow2B0g, xmmCol0B0g);
__m256i r2b0hxc0b0h = _mm256_madd_epi16(xmmRow2B0h, xmmCol0B0h);
__m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b);
__m256i result3b = _mm256_add_epi32(r2b0cxc0b0c, r2b0dxc0b0d);
__m256i result3c = _mm256_add_epi32(r2b0exc0b0e, r2b0fxc0b0f);
__m256i result3d = _mm256_add_epi32(r2b0gxc0b0g, r2b0hxc0b0h);
__m256i result3ab = _mm256_add_epi32(result3a, result3b);
__m256i result3cd = _mm256_add_epi32(result3c, result3d);
__m256i result3abcd = _mm256_add_epi32(result3ab, result3cd);
//Result for row 3
__m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a);
__m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b);
__m256i r3b0cxc0b0c = _mm256_madd_epi16(xmmRow3B0c, xmmCol0B0c);
__m256i r3b0dxc0b0d = _mm256_madd_epi16(xmmRow3B0d, xmmCol0B0d);
__m256i r3b0exc0b0e = _mm256_madd_epi16(xmmRow3B0e, xmmCol0B0e);
__m256i r3b0fxc0b0f = _mm256_madd_epi16(xmmRow3B0f, xmmCol0B0f);
__m256i r3b0gxc0b0g = _mm256_madd_epi16(xmmRow3B0g, xmmCol0B0g);
__m256i r3b0hxc0b0h = _mm256_madd_epi16(xmmRow3B0h, xmmCol0B0h);
__m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b);
__m256i result4b = _mm256_add_epi32(r3b0cxc0b0c, r3b0dxc0b0d);
__m256i result4c = _mm256_add_epi32(r3b0exc0b0e, r3b0fxc0b0f);
__m256i result4d = _mm256_add_epi32(r3b0gxc0b0g, r3b0hxc0b0h);
__m256i result4ab = _mm256_add_epi32(result4a, result4b);
__m256i result4cd = _mm256_add_epi32(result4c, result4d);
__m256i result4abcd = _mm256_add_epi32(result4ab, result4cd);
//Now we can just add horizontally
*return1 = result1abcd;
*return2 = result2abcd;
*return3 = result3abcd;
*return4 = result4abcd;
}
}}}
|
repair.c
|
#include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <stdio.h>
/*
* NOTE: The repair phase is essentially a mesh-wide scattering stencil.
* Essentially the whole stencil needs to be owned by a single thread to stop
* data races...
*
* One method that could be employed here is to essentially break the problem
* down and analyse the dependencies at runtime.
*
* Steps:
*
* 1) determine the quantities needed to repair extrema
* 2) check the 2 deep stencil of each node/cell to check if we actually have a
* dependency.
* 3) construct an indirection with all independent work and one of the
* dependent elements from each chain
* 4) perform all of the work on that indirection in parallel
* 5) construct another list that contains another single item of the work that
* was considered dependent
* 6) perform all of the individual dependent element's work
* 7) repeat 5 and 6 until completion.
*/
// Repairs the subcell extrema for mass
void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets,
const int* subcells_to_subcells_offsets,
const int* subcells_to_subcells,
double* subcell_volume, double* subcell_mass);
// Repairs the extrema at the nodal velocities
void repair_velocity_extrema(const int nnodes,
const int* nodes_to_nodes_offsets,
const int* nodes_to_nodes, double* velocity_x,
double* velocity_y, double* velocity_z);
// Repairs the subcell extrema for mass
void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces,
const int* faces_to_cells0,
const int* faces_to_cells1, double* energy);
// Redistributes the mass according to the determined neighbour availability
void redistribute_subcell_mass(double* mass, const int subcell_index,
const int nsubcell_neighbours,
const int* subcells_to_subcells,
const int subcell_to_subcells_off,
const double* dmass_avail_neighbour,
const double dmass_avail,
const double dmass_need, const double g,
const double subcell_vol, const int is_min);
// Performs a conservative repair of the mesh
void mass_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
// Advects mass and energy through the subcell faces using swept edge approx
repair_subcell_extrema(umesh->ncells, umesh->cells_to_nodes_offsets,
hale_data->subcells_to_subcells_offsets,
hale_data->subcells_to_subcells,
hale_data->subcell_volume, hale_data->subcell_mass);
}
// Repairs the nodal velocities
void velocity_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
repair_velocity_extrema(umesh->nnodes, umesh->nodes_to_nodes_offsets,
umesh->nodes_to_nodes, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0);
}
// Repairs the energy
void energy_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
repair_energy_extrema(umesh->ncells, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->faces_to_cells0,
umesh->faces_to_cells1, hale_data->energy0);
}
// Repairs the subcell extrema for mass
void repair_velocity_extrema(const int nnodes,
const int* nodes_to_nodes_offsets,
const int* nodes_to_nodes, double* velocity_x,
double* velocity_y, double* velocity_z) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_nodes_off = nodes_to_nodes_offsets[(nn)];
const int nnodes_by_node =
nodes_to_nodes_offsets[(nn + 1)] - node_to_nodes_off;
double gmax_vx = -DBL_MAX;
double gmin_vx = DBL_MAX;
double gmax_vy = -DBL_MAX;
double gmin_vy = DBL_MAX;
double gmax_vz = -DBL_MAX;
double gmin_vz = DBL_MAX;
double dvx_total_avail_donate = 0.0;
double dvx_total_avail_receive = 0.0;
double dvy_total_avail_donate = 0.0;
double dvy_total_avail_receive = 0.0;
double dvz_total_avail_donate = 0.0;
double dvz_total_avail_receive = 0.0;
double dvx_avail_donate_neighbour[(nnodes_by_node)];
double dvx_avail_receive_neighbour[(nnodes_by_node)];
double dvy_avail_donate_neighbour[(nnodes_by_node)];
double dvy_avail_receive_neighbour[(nnodes_by_node)];
double dvz_avail_donate_neighbour[(nnodes_by_node)];
double dvz_avail_receive_neighbour[(nnodes_by_node)];
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
const int neighbour_to_nodes_off =
nodes_to_nodes_offsets[(neighbour_index)];
const int nnodes_by_neighbour =
nodes_to_nodes_offsets[(neighbour_index + 1)] -
neighbour_to_nodes_off;
vec_t neighbour_v = {velocity_x[(neighbour_index)],
velocity_y[(neighbour_index)],
velocity_z[(neighbour_index)]};
double neighbour_gmax_vx = -DBL_MAX;
double neighbour_gmin_vx = DBL_MAX;
double neighbour_gmax_vy = -DBL_MAX;
double neighbour_gmin_vy = DBL_MAX;
double neighbour_gmax_vz = -DBL_MAX;
double neighbour_gmin_vz = DBL_MAX;
for (int nn3 = 0; nn3 < nnodes_by_neighbour; ++nn3) {
const int neighbour_neighbour_index =
nodes_to_nodes[(neighbour_to_nodes_off + nn3)];
if (neighbour_neighbour_index == -1) {
continue;
}
neighbour_gmax_vx =
max(neighbour_gmax_vx, velocity_x[(neighbour_neighbour_index)]);
neighbour_gmin_vx =
min(neighbour_gmin_vx, velocity_x[(neighbour_neighbour_index)]);
neighbour_gmax_vy =
max(neighbour_gmax_vy, velocity_y[(neighbour_neighbour_index)]);
neighbour_gmin_vy =
min(neighbour_gmin_vy, velocity_y[(neighbour_neighbour_index)]);
neighbour_gmax_vz =
max(neighbour_gmax_vz, velocity_z[(neighbour_neighbour_index)]);
neighbour_gmin_vz =
min(neighbour_gmin_vz, velocity_z[(neighbour_neighbour_index)]);
}
dvx_avail_donate_neighbour[(nn2)] =
max(neighbour_v.x - neighbour_gmin_vx, 0.0);
dvx_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vx - neighbour_v.x, 0.0);
dvy_avail_donate_neighbour[(nn2)] =
max(neighbour_v.y - neighbour_gmin_vy, 0.0);
dvy_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vy - neighbour_v.y, 0.0);
dvz_avail_donate_neighbour[(nn2)] =
max(neighbour_v.z - neighbour_gmin_vz, 0.0);
dvz_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vz - neighbour_v.z, 0.0);
dvx_total_avail_donate += dvx_avail_donate_neighbour[(nn2)];
dvx_total_avail_receive += dvx_avail_receive_neighbour[(nn2)];
dvy_total_avail_donate += dvy_avail_donate_neighbour[(nn2)];
dvy_total_avail_receive += dvy_avail_receive_neighbour[(nn2)];
dvz_total_avail_donate += dvz_avail_donate_neighbour[(nn2)];
dvz_total_avail_receive += dvz_avail_receive_neighbour[(nn2)];
gmax_vx = max(gmax_vx, neighbour_v.x);
gmin_vx = min(gmin_vx, neighbour_v.x);
gmax_vy = max(gmax_vy, neighbour_v.y);
gmin_vy = min(gmin_vy, neighbour_v.y);
gmax_vz = max(gmax_vz, neighbour_v.z);
gmin_vz = min(gmin_vz, neighbour_v.z);
}
vec_t cell_v = {velocity_x[(nn)], velocity_y[(nn)], velocity_z[(nn)]};
const double dvx_need_receive = gmin_vx - cell_v.x;
const double dvx_need_donate = cell_v.x - gmax_vx;
const double dvy_need_receive = gmin_vy - cell_v.y;
const double dvy_need_donate = cell_v.y - gmax_vy;
const double dvz_need_receive = gmin_vz - cell_v.z;
const double dvz_need_donate = cell_v.z - gmax_vz;
if (dvx_need_receive > 0.0) {
velocity_x[(nn)] = gmin_vx;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_x[(neighbour_index)] -=
(dvx_avail_donate_neighbour[(nn2)] / dvx_total_avail_donate) *
dvx_need_receive;
}
} else if (dvx_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_x[(nn)] = gmax_vx;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_x[(neighbour_index)] +=
(dvx_avail_receive_neighbour[(nn2)] / dvx_total_avail_receive) *
dvx_need_donate;
}
}
if (dvy_need_receive > 0.0) {
velocity_y[(nn)] = gmin_vy;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_y[(neighbour_index)] -=
(dvy_avail_donate_neighbour[(nn2)] / dvy_total_avail_donate) *
dvy_need_receive;
}
} else if (dvy_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_y[(nn)] = gmax_vy;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_y[(neighbour_index)] +=
(dvy_avail_receive_neighbour[(nn2)] / dvy_total_avail_receive) *
dvy_need_donate;
}
}
if (dvz_need_receive > 0.0) {
velocity_z[(nn)] = gmin_vz;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_z[(neighbour_index)] -=
(dvz_avail_donate_neighbour[(nn2)] / dvz_total_avail_donate) *
dvz_need_receive;
}
} else if (dvz_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_z[(nn)] = gmax_vz;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_z[(neighbour_index)] +=
(dvz_avail_receive_neighbour[(nn2)] / dvz_total_avail_receive) *
dvz_need_donate;
}
}
if (dvx_total_avail_donate < dvx_need_receive ||
dvx_total_avail_receive < dvx_need_donate ||
dvy_total_avail_donate < dvy_need_receive ||
dvy_total_avail_receive < dvy_need_donate ||
dvz_total_avail_donate < dvz_need_receive ||
dvz_total_avail_receive < dvz_need_donate) {
printf("Repair stage needs additional level.\n");
continue;
}
}
}
// Repairs the subcell extrema for mass
void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces,
const int* faces_to_cells0,
const int* faces_to_cells1, double* energy) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
double gmax_ie = -DBL_MAX;
double gmin_ie = DBL_MAX;
double die_total_avail_donate = 0.0;
double die_total_avail_receive = 0.0;
double die_avail_donate_neighbour[(nfaces_by_cell)];
double die_avail_receive_neighbour[(nfaces_by_cell)];
const double cell_ie = energy[(cc)];
// Loop over the nodes attached to this node
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
const double neighbour_ie = energy[(neighbour_index)];
double neighbour_gmax_ie = -DBL_MAX;
double neighbour_gmin_ie = DBL_MAX;
const int neighbour_to_faces_off =
cells_to_faces_offsets[(neighbour_index)];
const int nfaces_by_neighbour =
cells_to_faces_offsets[(neighbour_index + 1)] -
neighbour_to_faces_off;
for (int ff2 = 0; ff2 < nfaces_by_neighbour; ++ff2) {
const int neighbour_face_index =
cells_to_faces[(neighbour_to_faces_off + ff2)];
const int neighbour_neighbour_index =
(faces_to_cells0[(neighbour_face_index)] == neighbour_index)
? faces_to_cells1[(neighbour_face_index)]
: faces_to_cells0[(neighbour_face_index)];
if (neighbour_neighbour_index == -1) {
continue;
}
neighbour_gmax_ie =
max(neighbour_gmax_ie, energy[(neighbour_neighbour_index)]);
neighbour_gmin_ie =
min(neighbour_gmin_ie, energy[(neighbour_neighbour_index)]);
}
die_avail_donate_neighbour[(ff)] =
max(neighbour_ie - neighbour_gmin_ie, 0.0);
die_avail_receive_neighbour[(ff)] =
max(neighbour_gmax_ie - neighbour_ie, 0.0);
die_total_avail_donate += die_avail_donate_neighbour[(ff)];
die_total_avail_receive += die_avail_receive_neighbour[(ff)];
gmax_ie = max(gmax_ie, neighbour_ie);
gmin_ie = min(gmin_ie, neighbour_ie);
}
const double die_need_receive = gmin_ie - cell_ie;
const double die_need_donate = cell_ie - gmax_ie;
if (die_need_receive > 0.0) {
energy[(cc)] = gmin_ie;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
energy[(neighbour_index)] -=
(die_avail_donate_neighbour[(ff)] / die_total_avail_donate) *
die_need_receive;
}
} else if (die_need_donate > 0.0) {
// Loop over the nodes attached to this node
energy[(cc)] = gmax_ie;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
energy[(neighbour_index)] +=
(die_avail_receive_neighbour[(ff)] / die_total_avail_receive) *
die_need_donate;
}
}
if (die_total_avail_donate < die_need_receive ||
die_total_avail_receive < die_need_donate) {
printf("Repair stage needs additional level.\n");
continue;
}
}
}
// Repairs the subcell extrema for mass
void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets,
const int* subcells_to_subcells_offsets,
const int* subcells_to_subcells,
double* subcell_volume, double* subcell_mass) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
// Looping over corner subcells here
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int subcell_index = cell_to_nodes_off + nn;
const int subcell_to_subcells_off =
subcells_to_subcells_offsets[(subcell_index)];
const int nsubcell_neighbours =
subcells_to_subcells_offsets[(subcell_index + 1)] -
subcell_to_subcells_off;
const double subcell_vol = subcell_volume[(subcell_index)];
const double subcell_m_density =
subcell_mass[(subcell_index)] / subcell_vol;
double gmax_m = -DBL_MAX;
double gmin_m = DBL_MAX;
double dm_avail_donate = 0.0;
double dm_avail_receive = 0.0;
double dm_avail_donate_neighbour[(nsubcell_neighbours)];
double dm_avail_receive_neighbour[(nsubcell_neighbours)];
// Loop over neighbours
for (int ss = 0; ss < nsubcell_neighbours; ++ss) {
const int neighbour_index =
subcells_to_subcells[(subcell_to_subcells_off + ss)];
// Ignore boundary neighbours
if (neighbour_index == -1) {
continue;
}
const int neighbour_to_subcells_off =
subcells_to_subcells_offsets[(neighbour_index)];
const int nneighbour_neighbours =
subcells_to_subcells_offsets[(neighbour_index + 1)] -
neighbour_to_subcells_off;
const double neighbour_vol = subcell_volume[(neighbour_index)];
const double neighbour_m_density =
subcell_mass[(neighbour_index)] / neighbour_vol;
double neighbour_gmax_m = -DBL_MAX;
double neighbour_gmin_m = DBL_MAX;
// Loop over neighbour's neighbours
for (int ss2 = 0; ss2 < nneighbour_neighbours; ++ss2) {
const int neighbour_neighbour_index =
subcells_to_subcells[(neighbour_to_subcells_off + ss2)];
// Ignore boundary neighbours
if (neighbour_neighbour_index == -1) {
continue;
}
const double neighbour_neighbour_vol =
subcell_volume[(neighbour_neighbour_index)];
const double neighbour_neighbour_m_density =
subcell_mass[(neighbour_neighbour_index)] /
neighbour_neighbour_vol;
// Store the maximum / minimum values for rho in the neighbourhood
neighbour_gmax_m =
max(neighbour_gmax_m, neighbour_neighbour_m_density);
neighbour_gmin_m =
min(neighbour_gmin_m, neighbour_neighbour_m_density);
}
dm_avail_donate_neighbour[(ss)] =
max((neighbour_m_density - neighbour_gmin_m) * subcell_vol, 0.0);
dm_avail_receive_neighbour[(ss)] =
max((neighbour_gmax_m - neighbour_m_density) * subcell_vol, 0.0);
dm_avail_donate += dm_avail_donate_neighbour[(ss)];
dm_avail_receive += dm_avail_receive_neighbour[(ss)];
gmax_m = max(gmax_m, neighbour_m_density);
gmin_m = min(gmin_m, neighbour_m_density);
}
const double dm_need_receive = (gmin_m - subcell_m_density) * subcell_vol;
const double dm_need_donate = (subcell_m_density - gmax_m) * subcell_vol;
if (dm_need_receive > 0.0) {
redistribute_subcell_mass(subcell_mass, subcell_index,
nsubcell_neighbours, subcells_to_subcells,
subcell_to_subcells_off,
dm_avail_donate_neighbour, dm_avail_donate,
dm_need_receive, gmin_m, subcell_vol, 1);
} else if (dm_need_donate > 0.0) {
redistribute_subcell_mass(subcell_mass, subcell_index,
nsubcell_neighbours, subcells_to_subcells,
subcell_to_subcells_off,
dm_avail_receive_neighbour, dm_avail_receive,
dm_need_donate, gmax_m, subcell_vol, 0);
}
if (dm_avail_donate < dm_need_receive ||
dm_avail_receive < dm_need_donate) {
printf("dm_avail_donate %.12e dm_need_receive %.12e dm_avail_receive "
"%.12e dm_need_donate %.12e\n",
dm_avail_donate, dm_need_receive, dm_avail_receive,
dm_need_donate);
printf("Repair stage needs additional level.\n");
continue;
}
}
}
}
// Redistributes the mass according to the determined neighbour availability
void redistribute_subcell_mass(double* mass, const int subcell_index,
const int nsubcell_neighbours,
const int* subcells_to_subcells,
const int subcell_to_subcells_off,
const double* dmass_avail_neighbour,
const double dmass_avail,
const double dmass_need, const double g,
const double subcell_vol, const int is_min) {
mass[(subcell_index)] = g * subcell_vol;
// Loop over neighbours
for (int ss = 0; ss < nsubcell_neighbours; ++ss) {
const int neighbour_index =
subcells_to_subcells[(subcell_to_subcells_off + ss)];
mass[(neighbour_index)] += (is_min ? -1.0 : 1.0) *
(dmass_avail_neighbour[(ss)] / dmass_avail) *
dmass_need;
}
}
|
openmp_sample.c
|
/*
* Copyright (C) 2009-2014 Intel Corporation. All Rights Reserved.
*
* The source code contained or described herein and all
* documents related to the source code ("Material") are owned by
* Intel Corporation or its suppliers or licensors. Title to the
* Material remains with Intel Corporation or its suppliers and
* licensors. The Material is protected by worldwide copyright
* laws and treaty provisions. No part of the Material may be
* used, copied, reproduced, modified, published, uploaded,
* posted, transmitted, distributed, or disclosed in any way
* except as expressly provided in the license provided with the
* Materials. No license under any patent, copyright, trade
* secret or other intellectual property right is granted to or
* conferred upon you by disclosure or delivery of the Materials,
* either expressly, by implication, inducement, estoppel or
* otherwise, except as expressly provided in the license
* provided with the Materials.
*
* [DESCRIPTION]
* Each element of the product matrix c[i][j] is
* computed from a unique row and
* column of the factor matrices, a[i][k] and b[k][j].
*
* In the multithreaded implementation, each thread can
* concurrently compute some submatrix of the product without
* needing OpenMP data or control synchronization.
*
* The algorithm uses OpenMP* to parallelize the outer-most loop,
* using the "i" row index.
*
* Both the outer-most "i" loop and middle "k" loop are manually
* unrolled by 4. The inner-most "j" loop iterates one-by-one
* over the columns of the product and factor matrices.
*
* [COMPILE]
* Use the following compiler options to compile both multi- and
* single-threaded versions.
*
* Parallel compilation:
* You must set the stacksize to an appropriate size; otherwise,
* the application will generate a segmentation fault.
* Linux* and OS X*: appropriate ulimit commands are shown for
* bash shell.
*
* Windows*: /Qstd=c99 /Qopenmp /F256000000
*
* Linux*: ulimit -s unlimited
* -std=c99 -qopenmp
*
* OS X*: ulimit -s 64000
* -std=c99 -qopenmp
*
* Serial compilation:
*
* Use the same command, but omit the -qopenmp (Linux and OS X)
* or /Qopenmp (Windows) option.
*
*/
#include <stdio.h>
#include <time.h>
#include <float.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef __cplusplus
#define bool _Bool
#define true 1
#define false 0
#endif
// Matrix size constants
// Be careful to set your shell's stacksize limit to a high value if you
// wish to increase the SIZE.
#define SIZE 4800 // Must be a multiple of 8.
#define M SIZE/8
#define N SIZE/4
#define P SIZE/2
#define NTIMES 5 // product matrix calculations
int main(void)
{
double a[M][N], b[N][P], c[M][P], walltime;
bool nthr_checked=false;
time_t start;
int i, j, k, l, i1, i2, i3, k1, k2, k3, nthr=1;
printf("Using time() for wall clock time\n");
printf("Problem size: c(%d,%d) = a(%d,%d) * b(%d,%d)\n",
M, P, M, N, N, P);
printf("Calculating product %d time(s)\n", NTIMES);
// a is identity matrix
for (i=0; i<M; i++)
for (j=0; j<N; j++)
a[i][j] = 1.0;
// each column of b is the sequence 1,2,...,N
for (i=0; i<N; i++)
for (j=0; j<P; j++)
b[i][j] = i+1.;
start = time(NULL);
for (l=0; l<NTIMES; l++) {
#pragma omp parallel private(i,j,k)
{
#pragma omp single nowait
if (!nthr_checked) {
#ifdef _OPENMP
nthr = omp_get_num_threads();
#endif
printf( "\nWe are using %d thread(s)\n", nthr);
nthr_checked = true;
}
// Initialize product matrix
#pragma omp for nowait
for (i=0; i<M; i++)
for (j=0; j<P; j++)
c[i][j] = 0.0;
// Parallelize by row. The threads don't need to synchronize at
// loop end, so "nowait" can be used.
#pragma omp for nowait
for (i=0; i<M; i++) {
for (k=0; k<N; k++) {
// Each element of the product is just the sum 1+2+...+n
for (j=0; j<P; j++) {
c[i][j] += a[i][k] * b[k][j];
}
}
}
} // #pragma omp parallel private(i,j,k)
} // l=0,...NTIMES-1
walltime = time(NULL) - start;
printf("\nFinished calculations.\n");
printf("Matmul kernel wall clock time = %.2f sec\n", walltime);
printf("Wall clock time/thread = %.2f sec\n", walltime/nthr);
printf("MFlops = %f\n",
(double)(NTIMES)*(double)(N*M*2)*(double)(P)/walltime/1.0e6);
return 0;
}
|
GB_to_hyper.c
|
//------------------------------------------------------------------------------
// GB_to_hyper: convert a matrix to hyperspasre
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// On input, the matrix may have shallow A->p content; it is safely removed.
// On output, the matrix is always hypersparse (even if out of memory). If the
// input matrix is non-hypersparse, it is given new A->p and A->h that are not
// shallow. If the input matrix is already hypersparse, nothing is changed
// (and in that case A->p and A->h remain shallow on output if shallow on
// input). The A->x and A->i content is not changed; it remains in whatever
// shallow/non-shallow state that it had on input).
// If an out-of-memory condition occurs, all content of the matrix is cleared.
// The input matrix may be jumbled; this is not an error condition.
#include "GB.h"
GrB_Info GB_to_hyper // convert a matrix to hypersparse
(
GrB_Matrix A, // matrix to convert to hypersparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_OK_OR_JUMBLED (GB_check (A, "A converting to hypersparse", GB0)) ;
int64_t anz = GB_NNZ (A) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
//--------------------------------------------------------------------------
// convert A to hypersparse form
//--------------------------------------------------------------------------
if (!A->is_hyper)
{
//----------------------------------------------------------------------
// determine the number of threads to use
//----------------------------------------------------------------------
int64_t n = A->vdim ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//----------------------------------------------------------------------
// count the number of non-empty vectors in A in each slice
//----------------------------------------------------------------------
A->is_hyper = true ; // A becomes hypersparse
ASSERT (A->h == NULL) ;
ASSERT (A->nvec == A->plen && A->plen == n) ;
const int64_t *restrict Ap_old = A->p ;
bool Ap_old_shallow = A->p_shallow ;
int64_t Count [ntasks+1] ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, my_nvec_nonempty = 0 ; ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ;
}
Count [tid] = my_nvec_nonempty ;
}
//----------------------------------------------------------------------
// compute cumulative sum of Counts and nvec_nonempty
//----------------------------------------------------------------------
GB_cumsum (Count, ntasks, NULL, 1) ;
int64_t nvec_nonempty = Count [ntasks] ;
A->nvec_nonempty = nvec_nonempty ;
//----------------------------------------------------------------------
// allocate the new A->p and A->h
//----------------------------------------------------------------------
int64_t *restrict Ap_new ;
int64_t *restrict Ah_new ;
GB_MALLOC_MEMORY (Ap_new, nvec_nonempty+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Ah_new, nvec_nonempty, sizeof (int64_t)) ;
if (Ap_new == NULL || Ah_new == NULL)
{
// out of memory
GB_FREE_MEMORY (Ap_new, nvec_nonempty+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ah_new, nvec_nonempty, sizeof (int64_t)) ;
GB_PHIX_FREE (A) ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// transplant the new A->p and A->h into the matrix
//----------------------------------------------------------------------
A->plen = nvec_nonempty ;
A->nvec = nvec_nonempty ;
A->p = Ap_new ;
A->h = Ah_new ;
A->p_shallow = false ;
A->h_shallow = false ;
//----------------------------------------------------------------------
// construct the new hyperlist in the new A->p and A->h
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap_old [j] < Ap_old [j+1])
{
// vector index j is the kth vector in the new Ah
Ap_new [k] = Ap_old [j] ;
Ah_new [k] = j ;
k++ ;
}
}
ASSERT (k == Count [tid+1]) ;
}
Ap_new [nvec_nonempty] = anz ;
A->magic = GB_MAGIC ;
ASSERT (A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ;
//----------------------------------------------------------------------
// free the old A->p unless it's shallow
//----------------------------------------------------------------------
if (!Ap_old_shallow)
{
GB_FREE_MEMORY (Ap_old, n+1, sizeof (int64_t)) ;
}
}
//--------------------------------------------------------------------------
// A is now in hypersparse form
//--------------------------------------------------------------------------
ASSERT (anz == GB_NNZ (A)) ;
ASSERT_OK_OR_JUMBLED (GB_check (A, "A converted to hypersparse", GB0)) ;
ASSERT (A->is_hyper) ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__abs_uint8_uint32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_uint32
// op(A') function: GB_tran__abs_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_uint32
(
uint8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
10.norace3.c
|
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
int main() {
int x = 100, y = 200;
#pragma omp parallel num_threads(8)
{
#pragma omp sections firstprivate(x) private(y)
{
{
y = x * 3;
}
#pragma omp section
{
y = 4 * x;
x = y - x;
}
}
}
return 0;
}
// CHECK: Region is Data Race Free.
// END
|
sin.c
|
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
static long N = 100000000;
#define NUM_THREADS 4
int main ()
{
//float start_time, run_time;
float *x = (float *) calloc(N, sizeof(float));
float *y = (float *) calloc(N, sizeof(float));
if (x == NULL || y == NULL) exit(1);
memset(x, 1, sizeof(float) * N);
//start_time = omp_get_wtime();
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; ++i) {
float foo = x[i];
y[i] = sinf(cos(sin(cos(tan(log(sin(exp(sin(foo)))))))));
}
//run_time = omp_get_wtime() - start_time;
//printf("\n pi with %ld size is %lf in %lf seconds\n ", N, y[1], run_time);
return 0;
}
// Compile: `gcc -fopenmp sin.c -lm`
|
GB_binop__times_fc64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_01__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64)
// A*D function (colscale): GB (_AxD__times_fc64)
// D*A function (rowscale): GB (_DxB__times_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64)
// C=scalar+B GB (_bind1st__times_fc64)
// C=scalar+B' GB (_bind1st_tran__times_fc64)
// C=A+scalar GB (_bind2nd__times_fc64)
// C=A'+scalar GB (_bind2nd_tran__times_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_mul (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zcgesv.c
|
/**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions mixed zc -> ds
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
#include <omp.h>
#include <stdbool.h>
/***************************************************************************//**
*
* @ingroup plasma_gesv
*
* Computes the solution to a system of linear equations A * X = B, where A is
* an n-by-n matrix and X and B are n-by-nrhs matrices.
*
* plasma_zcgesv first factorizes the matrix using plasma_cgetrf and uses
* this factorization within an iterative refinement procedure to produce a
* solution with COMPLEX*16 normwise backward error quality (see below). If
* the approach fails the method falls back to a COMPLEX*16 factorization and
* solve.
*
* The iterative refinement is not going to be a winning strategy if
* the ratio COMPLEX performance over COMPLEX*16 performance is too
* small. A reasonable strategy should take the number of right-hand
* sides and the size of the matrix into account. This might be done
* with a call to ILAENV in the future. Up to now, we always try
* iterative refinement.
*
* The iterative refinement process is stopped if iter > itermax or
* for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps, where:
*
* - iter is the number of the current iteration in the iterative refinement
* process
* - Rnorm is the Infinity-norm of the residual
* - Xnorm is the Infinity-norm of the solution
* - Anorm is the Infinity-operator-norm of the matrix A
* - eps is the machine epsilon returned by DLAMCH('Epsilon').
* The values itermax is fixed to 30.
*
*******************************************************************************
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrix B. nrhs >= 0.
*
* @param[in,out] pA
* The n-by-n matrix A.
* On exit, contains the LU factors of A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] pB
* The n-by-nrhs matrix of right hand side matrix B.
* This matrix remains unchanged.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
* @param[out] pX
* If return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldx
* The leading dimension of the array X. ldx >= max(1,n).
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zcgesv
* @sa plasma_dsgesv
* @sa plasma_zgesv
*
******************************************************************************/
int plasma_zcgesv(int n, int nrhs,
plasma_complex64_t *pA, int lda, int *ipiv,
plasma_complex64_t *pB, int ldb,
plasma_complex64_t *pX, int ldx, int *iter)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
if (ldx < imax(1, n)) {
plasma_error("illegal value of ldx");
return -9;
}
// quick return
*iter = 0;
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaComplexFloat, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t X;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &X);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Create additional tile matrices.
plasma_desc_t R, As, Xs;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
B.m, B.n, 0, 0, B.m, B.n, &R);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
A.m, A.n, 0, 0, A.m, A.n, &As);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
X.m, X.n, 0, 0, X.m, X.n, &Xs);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
return retval;
}
// Allocate tiled workspace for Infinity norm calculations.
size_t lwork = imax((size_t)A.nt*A.n+A.n, (size_t)X.mt*X.n+(size_t)R.mt*R.n);
double *work = (double*)malloc((lwork)*sizeof(double));
double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double));
double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double));
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_zcgesv(A, ipiv, B, X, As, Xs, R, work, Rnorm, Xnorm, iter,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(X, pX, ldx, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
plasma_desc_destroy(&Xs);
free(work);
free(Rnorm);
free(Xnorm);
// Return status.
int status = sequence.status;
return status;
}
// Checks, that convergence criterion is true for all columns of R and X
static bool conv(double *Rnorm, double *Xnorm, int n, double cte) {
bool value = true;
for (int i = 0; i < n; i++) {
if (Rnorm[i] > Xnorm[i] * cte) {
value = false;
break;
}
}
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_gesv
*
* Solves a general linear system of equations using iterative refinement
* with the LU factor computed using plasma_cgetrf.
* Non-blocking tile version of plasma_zcgesv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in,out] X
* Descriptor of matrix X.
*
* @param[out] As
* Descriptor of auxiliary matrix A in single complex precision.
*
* @param[out] Xs
* Descriptor of auxiliary matrix X in single complex precision.
*
* @param[out] R
* Descriptor of auxiliary remainder matrix R.
*
* @param[out] work
* Workspace needed to compute infinity norm of the matrix A.
*
* @param[out] Rnorm
* Workspace needed to store the max value in each of resudual vectors.
*
* @param[out] Xnorm
* Workspace needed to store the max value in each of currenct solution
* vectors.
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PLASMA_SUCCESS (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zcgesv
* @sa plasma_omp_dsgesv
* @sa plasma_omp_zgesv
*
******************************************************************************/
void plasma_omp_zcgesv(plasma_desc_t A, int *ipiv,
plasma_desc_t B, plasma_desc_t X,
plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R,
double *work, double *Rnorm, double *Xnorm, int *iter,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
const int itermax = 30;
const plasma_complex64_t zmone = -1.0;
const plasma_complex64_t zone = 1.0;
*iter = 0;
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(X) != PlasmaSuccess) {
plasma_error("invalid X");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(As) != PlasmaSuccess) {
plasma_error("invalid As");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Xs) != PlasmaSuccess) {
plasma_error("invalid Xs");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(R) != PlasmaSuccess) {
plasma_error("invalid R");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// workspaces for dzamax
double *workX = work;
double *workR = &work[X.mt*X.n];
// Compute some constants.
double cte;
double eps = LAPACKE_dlamch_work('E');
double Anorm;
plasma_pzlange(PlasmaInfNorm, A, work, &Anorm, sequence, request);
// Convert B from double to single precision, store result in Xs.
plasma_pzlag2c(B, Xs, sequence, request);
// Convert A from double to single precision, store result in As.
plasma_pzlag2c(A, As, sequence, request);
// Compute the LU factorization of As.
//#pragma omp taskwait
plasma_pcgetrf(As, ipiv, sequence, request);
//#pragma omp taskwait
// Solve the system As * Xs = Bs.
plasma_pcgeswp(PlasmaRowwise, Xs, ipiv, 1, sequence, request);
plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, sequence, request);
plasma_pctrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, sequence, request);
// Convert Xs to double precision.
plasma_pclag2z(Xs, X, sequence, request);
// Compute R = B - A * X.
plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pzgemm(PlasmaNoTrans, PlasmaNoTrans,
zmone, A, X, zone, R, sequence, request);
// Check whether the nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter=0 and return.
plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
cte = Anorm * eps * sqrt((double)A.n);
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = 0;
return;
}
}
// iterative refinement
for (int iiter = 0; iiter < itermax; iiter++) {
// Convert R from double to single precision, store result in Xs.
plasma_pzlag2c(R, Xs, sequence, request);
// Solve the system As * Xs = Rs.
//#pragma omp taskwait
plasma_pcgeswp(PlasmaRowwise, Xs, ipiv, 1, sequence, request);
plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, sequence, request);
plasma_pctrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, sequence, request);
// Convert Xs back to double precision and update the current iterate.
plasma_pclag2z(Xs, R, sequence, request);
plasma_pzgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request);
// Compute R = B - A * X.
plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pzgemm(PlasmaNoTrans, PlasmaNoTrans, zmone, A, X, zone, R,
sequence, request);
// Check whether nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter = iiter > 0 and return.
plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = iiter+1;
return;
}
}
}
// If we are at this place of the code, this is because we have performed
// iter = itermax iterations and never satisfied the stopping criterion,
// set up the iter flag accordingly and follow up with double precision
// routine.
*iter = -itermax - 1;
//#if !defined(PLASMA_ZCGESV_WORKAROUND)
// Compute LU factorization of A.
//#pragma omp taskwait
plasma_pzgetrf(A, ipiv, sequence, request);
// Solve the system A * X = B.
plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request);
//#pragma omp taskwait
plasma_pzgeswp(PlasmaRowwise, X, ipiv, 1, sequence, request);
plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A, X, sequence, request);
plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A, X, sequence, request);
//#endif
}
|
GB_binop__pair_int16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pair_int16
// A.*B function (eWiseMult): GB_AemultB__pair_int16
// A*D function (colscale): GB_AxD__pair_int16
// D*A function (rowscale): GB_DxB__pair_int16
// C+=B function (dense accum): GB_Cdense_accumB__pair_int16
// C+=b function (dense accum): GB_Cdense_accumb__pair_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_int16
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = 1
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = 1 ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT16 || GxB_NO_PAIR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pair_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pair_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pair_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__pair_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__pair_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pair_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pair_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unop__minv_uint64_uint64.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint64_uint64)
// op(A') function: GB (_unop_tran__minv_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 64) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 64) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 64) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr67501.c
|
/* PR c/67501 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo (void)
{
int i, j;
#pragma omp for simd copyprivate(j /* { dg-error "before end of line" } */
for (i = 0; i < 16; ++i) /* { dg-error "is not valid for" "" { target *-*-* } .-1 } */
;
}
|
countersparallel.c
|
//counters parallel version HPC Felix Feliu
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <memory.h>
#include <malloc.h>
#include <papi.h>
#include <omp.h>
#define SIZE 1000
int main(int argc, char** argv) {
float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE];
int i, j, k;
int events[2] = { PAPI_TOT_INS, PAPI_TOT_IIS, PAPI_LD_INS, PAPI_FP_OPS, PAPI_FP_INS, PAPI_SR_INS }, ret; //hardware counter used
long long values[2];
if (PAPI_num_counters() < 2) {
fprintf(stderr, "No hardware counters here, or PAPI not supported.\n");
exit(1);
}
if ((ret = PAPI_start_counters(events, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
/* Initialize the Matrix arrays */
for (i = 0; i < SIZE * SIZE; i++) {
mresult[0][i] = 0.0;
matrixa[0][i] = matrixb[0][i] = rand() * (float)1.1;
}
//parallel block start
/* Matrix-Matrix multiply */
#pragma omp parallel for schedule(static,4) share(SIZE) private(i,j,k)
for (i = 0; i < SIZE; i++)
for (j = 0; j < SIZE; j++)
for (k = 0; k < SIZE; k++)
mresult[i][j] = mresult[i][j] + matrixa[i][k] * matrixb[k][j];
if ((ret = PAPI_read_counters(values, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
//printing option
printf("strore instructions = %lld\n", values[5]);
printf("floating point instructions = %lld\n", values[4]);
printf("floating point operations = %lld\n", values[1]);
printf("load instructions = %lld\n", values[2]);
printf("instructions issued = %lld\n", values[1]);
printf("instructions completed = %lld\n", values[0]);
exit(0);
}
|
perftest.c
|
/**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "api/libperf.h"
#include "lib/libperf_int.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <netdb.h>
#include <getopt.h>
#include <string.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <locale.h>
#if HAVE_MPI
# include <mpi.h>
#elif HAVE_RTE
# include<rte.h>
#endif
#define MAX_BATCH_FILES 32
#define TL_RESOURCE_NAME_NONE "<none>"
#define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:"
enum {
TEST_FLAG_PRINT_RESULTS = UCS_BIT(0),
TEST_FLAG_PRINT_TEST = UCS_BIT(1),
TEST_FLAG_SET_AFFINITY = UCS_BIT(8),
TEST_FLAG_NUMERIC_FMT = UCS_BIT(9),
TEST_FLAG_PRINT_FINAL = UCS_BIT(10),
TEST_FLAG_PRINT_CSV = UCS_BIT(11)
};
typedef struct sock_rte_group {
int is_server;
int connfd;
} sock_rte_group_t;
typedef struct test_type {
const char *name;
ucx_perf_api_t api;
ucx_perf_cmd_t command;
ucx_perf_test_type_t test_type;
const char *desc;
} test_type_t;
struct perftest_context {
ucx_perf_params_t params;
const char *server_addr;
int port;
int mpi;
unsigned cpu;
unsigned flags;
unsigned num_batch_files;
char *batch_files[MAX_BATCH_FILES];
char *test_names[MAX_BATCH_FILES];
sock_rte_group_t sock_rte_group;
};
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency"},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency"},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate"},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate"},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate"},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate"},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate"},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate"},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency"},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth"},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency"},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth"},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth"},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate"},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate"},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate"},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate"},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth"},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency"},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, (void*)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
static void print_progress(char **test_names, unsigned num_names,
const ucx_perf_result_t *result, unsigned flags,
int final)
{
static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n";
static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n";
static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n";
unsigned i;
if (!(flags & TEST_FLAG_PRINT_RESULTS) ||
(!final && (flags & TEST_FLAG_PRINT_FINAL)))
{
return;
}
if (flags & TEST_FLAG_PRINT_CSV) {
for (i = 0; i < num_names; ++i) {
printf("%s,", test_names[i]);
}
}
printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv :
(flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric :
fmt_plain,
(double)result->iters,
result->latency.typical * 1000000.0,
result->latency.moment_average * 1000000.0,
result->latency.total_average * 1000000.0,
result->bandwidth.moment_average / (1024.0 * 1024.0),
result->bandwidth.total_average / (1024.0 * 1024.0),
result->msgrate.moment_average,
result->msgrate.total_average);
fflush(stdout);
}
static void print_header(struct perftest_context *ctx)
{
const char *test_api_str;
const char *test_data_str;
test_type_t *test;
unsigned i;
if (ctx->flags & TEST_FLAG_PRINT_TEST) {
for (test = tests; test->name; ++test) {
if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) {
break;
}
}
if (test->name != NULL) {
if (test->api == UCX_PERF_API_UCT) {
test_api_str = "transport layer";
switch (ctx->params.uct.data_layout) {
case UCT_PERF_DATA_LAYOUT_SHORT:
test_data_str = "short";
break;
case UCT_PERF_DATA_LAYOUT_BCOPY:
test_data_str = "bcopy";
break;
case UCT_PERF_DATA_LAYOUT_ZCOPY:
test_data_str = "zcopy";
break;
default:
test_data_str = "(undefined)";
break;
}
} else if (test->api == UCX_PERF_API_UCP) {
test_api_str = "protocol layer";
test_data_str = "(automatic)"; /* TODO contig/stride/stream */
} else {
return;
}
printf("+------------------------------------------------------------------------------------------+\n");
printf("| API: %-60s |\n", test_api_str);
printf("| Test: %-60s |\n", test->desc);
printf("| Data layout: %-60s |\n", test_data_str);
printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params));
}
}
if (ctx->flags & TEST_FLAG_PRINT_CSV) {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
for (i = 0; i < ctx->num_batch_files; ++i) {
printf("%s,", basename(ctx->batch_files[i]));
}
printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n");
}
} else {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("+--------------+-----------------------------+---------------------+-----------------------+\n");
printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
printf("| # iterations | typical | average | overall | average | overall | average | overall |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
} else if (ctx->flags & TEST_FLAG_PRINT_TEST) {
printf("+------------------------------------------------------------------------------------------+\n");
}
}
}
static void print_test_name(struct perftest_context *ctx)
{
char buf[200];
unsigned i, pos;
if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) {
strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+");
pos = 1;
for (i = 0; i < ctx->num_batch_files; ++i) {
if (i != 0) {
buf[pos++] = '/';
}
memcpy(&buf[pos], ctx->test_names[i],
ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1));
pos += strlen(ctx->test_names[i]);
}
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("%s\n", buf);
}
}
}
static void usage(const struct perftest_context *ctx, const char *program)
{
static const char* api_names[] = {
[UCX_PERF_API_UCT] = "UCT",
[UCX_PERF_API_UCP] = "UCP"
};
test_type_t *test;
int UCS_V_UNUSED rank;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ctx->mpi && (rank != 0)) {
return;
}
#endif
#if HAVE_MPI
printf(" Note: test can be also launched as an MPI application\n");
printf("\n");
#elif HAVE_RTE
printf(" Note: this test can be also launched as an libRTE application\n");
printf("\n");
#endif
printf(" Usage: %s [ server-hostname ] [ options ]\n", program);
printf("\n");
printf(" Common options:\n");
printf(" -t <test> test to run:\n");
for (test = tests; test->name; ++test) {
printf(" %13s - %s %s\n", test->name,
api_names[test->api], test->desc);
}
printf("\n");
printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n",
ctx->params.msg_size_list[0]);
printf(" for example: \"-s 16,48,8192,8192,14\"\n");
printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter);
printf(" -w <iters> number of warm-up iterations (%zu)\n",
ctx->params.warmup_iter);
printf(" -c <cpu> set affinity to this CPU (off)\n");
printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n",
ctx->params.max_outstanding);
printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n",
ctx->params.iov_stride);
printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n",
ctx->params.thread_count);
printf(" -B register memory with NONBLOCK flag\n");
printf(" -b <file> read and execute tests from a batch file: every line in the\n");
printf(" file is a test to run, first word is test name, the rest of\n");
printf(" the line is command-line arguments for the test.\n");
printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port);
#if HAVE_MPI
printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi);
#endif
printf(" -h show this help message\n");
printf("\n");
printf(" Output format:\n");
printf(" -N use numeric formatting (thousands separator)\n");
printf(" -f print only final numbers\n");
printf(" -v print CSV-formatted output\n");
printf("\n");
printf(" UCT only:\n");
printf(" -d <device> device to use for testing\n");
printf(" -x <tl> transport to use for testing\n");
printf(" -D <layout> data layout for sender side:\n");
printf(" short - short messages (default, cannot be used for get)\n");
printf(" bcopy - copy-out (cannot be used for atomics)\n");
printf(" zcopy - zero-copy (cannot be used for atomics)\n");
printf(" iov - scatter-gather list (iovec)\n");
printf(" -W <count> flow control window size, for active messages (%u)\n",
ctx->params.uct.fc_window);
printf(" -H <size> active message header size (%zu)\n",
ctx->params.am_hdr_size);
printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n");
printf(" thread_spinlock - separate progress thread with spin locking\n");
printf(" thread_mutex - separate progress thread with mutex locking\n");
printf(" signal - signal-based timer\n");
printf("\n");
printf(" UCP only:\n");
printf(" -M <thread> thread support level for progress engine (single)\n");
printf(" single - only the master thread can access\n");
printf(" serialized - one thread can access at a time\n");
printf(" multi - multiple threads can access\n");
printf(" -D <layout>[,<layout>]\n");
printf(" data layout for sender and receiver side (contig)\n");
printf(" contig - Continuous datatype\n");
printf(" iov - Scatter-gather list\n");
printf(" -C use wild-card tag for tag tests\n");
printf(" -U force unexpected flow by using tag probe\n");
printf(" -r <mode> receive mode for stream tests (recv)\n");
printf(" recv : Use ucp_stream_recv_nb\n");
printf(" recv_data : Use ucp_stream_recv_data_nb\n");
printf(" -m <mem type> memory type of messages\n");
printf(" host - system memory(default)\n");
if (ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA] != NULL) {
printf(" cuda - NVIDIA GPU memory\n");
}
if (ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA_MANAGED] != NULL) {
printf(" cuda-managed - NVIDIA cuda managed/unified memory\n");
}
printf("\n");
printf(" NOTE: When running UCP tests, transport and device should be specified by\n");
printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n");
printf("\n");
}
static const char *__basename(const char *path)
{
const char *p = strrchr(path, '/');
return (p == NULL) ? path : (p + 1);
}
static ucs_status_t parse_ucp_datatype_params(const char *optarg,
ucp_perf_datatype_t *datatype)
{
const char *iov_type = "iov";
const size_t iov_type_size = strlen("iov");
const char *contig_type = "contig";
const size_t contig_type_size = strlen("contig");
if (0 == strncmp(optarg, iov_type, iov_type_size)) {
*datatype = UCP_PERF_DATATYPE_IOV;
} else if (0 == strncmp(optarg, contig_type, contig_type_size)) {
*datatype = UCP_PERF_DATATYPE_CONTIG;
} else {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t parse_message_sizes_params(const char *optarg,
ucx_perf_params_t *params)
{
char *optarg_ptr, *optarg_ptr2;
size_t token_num, token_it;
const char delim = ',';
optarg_ptr = (char *)optarg;
token_num = 0;
/* count the number of given message sizes */
while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) {
++optarg_ptr;
++token_num;
}
++token_num;
params->msg_size_list = realloc(params->msg_size_list,
sizeof(*params->msg_size_list) * token_num);
if (NULL == params->msg_size_list) {
return UCS_ERR_NO_MEMORY;
}
optarg_ptr = (char *)optarg;
errno = 0;
for (token_it = 0; token_it < token_num; ++token_it) {
params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10);
if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) ||
((errno != 0) && (params->msg_size_list[token_it] == 0)) ||
(optarg_ptr == optarg_ptr2)) {
free(params->msg_size_list);
params->msg_size_list = NULL; /* prevent double free */
ucs_error("Invalid option substring argument at position %lu", token_it);
return UCS_ERR_INVALID_PARAM;
}
optarg_ptr = optarg_ptr2 + 1;
}
params->msg_size_cnt = token_num;
return UCS_OK;
}
static void init_test_params(ucx_perf_params_t *params)
{
memset(params, 0, sizeof(*params));
params->api = UCX_PERF_API_LAST;
params->command = UCX_PERF_CMD_LAST;
params->test_type = UCX_PERF_TEST_TYPE_LAST;
params->thread_mode = UCS_THREAD_MODE_SINGLE;
params->thread_count = 1;
params->async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->max_outstanding = 1;
params->warmup_iter = 10000;
params->am_hdr_size = 8;
params->alignment = ucs_get_page_size();
params->max_iter = 1000000l;
params->max_time = 0.0;
params->report_interval = 1.0;
params->flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->mem_type = UCT_MD_MEM_TYPE_HOST;
params->msg_size_cnt = 1;
params->iov_stride = 0;
params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE);
params->msg_size_list = malloc(sizeof(*params->msg_size_list) *
params->msg_size_cnt);
params->msg_size_list[0] = 8;
}
static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg)
{
test_type_t *test;
char *optarg2 = NULL;
switch (opt) {
case 'd':
ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name),
"%s", optarg);
return UCS_OK;
case 'x':
ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name),
"%s", optarg);
return UCS_OK;
case 't':
for (test = tests; test->name; ++test) {
if (!strcmp(optarg, test->name)) {
params->api = test->api;
params->command = test->command;
params->test_type = test->test_type;
break;
}
}
if (test->name == NULL) {
ucs_error("Invalid option argument for -t");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'D':
if (!strcmp(optarg, "short")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
} else if (!strcmp(optarg, "bcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY;
} else if (!strcmp(optarg, "zcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY;
} else if (UCS_OK == parse_ucp_datatype_params(optarg,
¶ms->ucp.send_datatype)) {
optarg2 = strchr(optarg, ',');
if (optarg2) {
if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1,
¶ms->ucp.recv_datatype)) {
return -1;
}
}
} else {
ucs_error("Invalid option argument for -D");
return -1;
}
return UCS_OK;
case 'i':
params->iov_stride = atol(optarg);
return UCS_OK;
case 'n':
params->max_iter = atol(optarg);
return UCS_OK;
case 's':
return parse_message_sizes_params(optarg, params);
case 'H':
params->am_hdr_size = atol(optarg);
return UCS_OK;
case 'W':
params->uct.fc_window = atoi(optarg);
return UCS_OK;
case 'O':
params->max_outstanding = atoi(optarg);
return UCS_OK;
case 'w':
params->warmup_iter = atol(optarg);
return UCS_OK;
case 'o':
params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED;
return UCS_OK;
case 'B':
params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK;
return UCS_OK;
case 'q':
params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE;
return UCS_OK;
case 'C':
params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD;
return UCS_OK;
case 'U':
params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE;
return UCS_OK;
case 'M':
if (!strcmp(optarg, "single")) {
params->thread_mode = UCS_THREAD_MODE_SINGLE;
return UCS_OK;
} else if (!strcmp(optarg, "serialized")) {
params->thread_mode = UCS_THREAD_MODE_SERIALIZED;
return UCS_OK;
} else if (!strcmp(optarg, "multi")) {
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -M");
return UCS_ERR_INVALID_PARAM;
}
case 'T':
params->thread_count = atoi(optarg);
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
case 'A':
if (!strcmp(optarg, "thread") || !strcmp(optarg, "thread_spinlock")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK;
return UCS_OK;
} else if (!strcmp(optarg, "thread_mutex")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_MUTEX;
return UCS_OK;
} else if (!strcmp(optarg, "signal")) {
params->async_mode = UCS_ASYNC_MODE_SIGNAL;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -A");
return UCS_ERR_INVALID_PARAM;
}
case 'r':
if (!strcmp(optarg, "recv_data")) {
params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
} else if (!strcmp(optarg, "recv")) {
params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
case 'm':
if (!strcmp(optarg, "host")) {
params->mem_type = UCT_MD_MEM_TYPE_HOST;
return UCS_OK;
} else if (!strcmp(optarg, "cuda") &&
(ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA] != NULL)) {
params->mem_type = UCT_MD_MEM_TYPE_CUDA;
return UCS_OK;
} else if (!strcmp(optarg, "cuda-managed") &&
(ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA_MANAGED] != NULL)) {
params->mem_type = UCT_MD_MEM_TYPE_CUDA_MANAGED;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
default:
return UCS_ERR_INVALID_PARAM;
}
}
static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name,
int *line_num, ucx_perf_params_t *params,
char** test_name_p)
{
#define MAX_SIZE 256
#define MAX_ARG_SIZE 2048
ucs_status_t status;
char buf[MAX_ARG_SIZE];
int argc;
char *argv[MAX_SIZE + 1];
int c;
char *p;
do {
if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) {
return UCS_ERR_NO_ELEM;
}
++(*line_num);
argc = 0;
p = strtok(buf, " \t\n\r");
while (p && (argc < MAX_SIZE)) {
argv[argc++] = p;
p = strtok(NULL, " \t\n\r");
}
argv[argc] = NULL;
} while ((argc == 0) || (argv[0][0] == '#'));
optind = 1;
while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) {
status = parse_test_params(params, c, optarg);
if (status != UCS_OK) {
ucs_error("in batch file '%s' line %d: -%c %s: %s",
file_name, *line_num, c, optarg, ucs_status_string(status));
return status;
}
}
*test_name_p = strdup(argv[0]);
return UCS_OK;
}
static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized,
int argc, char **argv)
{
ucs_status_t status;
int c;
ucs_trace_func("");
ucx_perf_global_init(); /* initialize memory types */
init_test_params(&ctx->params);
ctx->server_addr = NULL;
ctx->num_batch_files = 0;
ctx->port = 13337;
ctx->flags = 0;
ctx->mpi = mpi_initialized;
optind = 1;
while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) {
switch (c) {
case 'p':
ctx->port = atoi(optarg);
break;
case 'b':
if (ctx->num_batch_files < MAX_BATCH_FILES) {
ctx->batch_files[ctx->num_batch_files++] = optarg;
}
break;
case 'N':
ctx->flags |= TEST_FLAG_NUMERIC_FMT;
break;
case 'f':
ctx->flags |= TEST_FLAG_PRINT_FINAL;
break;
case 'v':
ctx->flags |= TEST_FLAG_PRINT_CSV;
break;
case 'c':
ctx->flags |= TEST_FLAG_SET_AFFINITY;
ctx->cpu = atoi(optarg);
break;
case 'P':
#if HAVE_MPI
ctx->mpi = atoi(optarg) && mpi_initialized;
break;
#endif
case 'h':
usage(ctx, __basename(argv[0]));
return UCS_ERR_CANCELED;
default:
status = parse_test_params(&ctx->params, c, optarg);
if (status != UCS_OK) {
usage(ctx, __basename(argv[0]));
return status;
}
break;
}
}
if (optind < argc) {
ctx->server_addr = argv[optind];
}
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
return 2;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp master
{
sock_rte_group_t *group = rte_group;
const unsigned magic = 0xdeadbeef;
unsigned sync;
sync = magic;
safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg);
sync = 0;
safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg);
ucs_assert(sync == magic);
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->connfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
int group_index;
size_t size;
group_index = sock_rte_group_index(rte_group);
if (src == group_index) {
return;
}
ucs_assert_always(src == (1 - group_index));
safe_recv(group->connfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->connfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
optval = 1;
ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (ret < 0) {
ucs_error("setsockopt(SO_REUSEADDR) failed: %m");
status = UCS_ERR_INVALID_PARAM;
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
ctx->params.msg_size_list = malloc(sizeof(*ctx->params.msg_size_list) *
ctx->params.msg_size_cnt);
if (NULL == ctx->params.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
safe_recv(connfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = connfd;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
safe_send(sockfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = sockfd;
ctx->sock_rte_group.is_server = 0;
}
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = &ctx->sock_rte_group;
ctx->params.rte = &sock_rte;
ctx->params.report_arg = ctx;
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
close(ctx->sock_rte_group.connfd);
return UCS_OK;
}
#if HAVE_MPI
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp master
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest == my_rank) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
size_t offset;
int my_rank;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (src == my_rank) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
#elif HAVE_RTE
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
ucs_trace_func("");
#if HAVE_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2) {
ucs_error("This test should run with exactly 2 processes (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 1) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
#elif HAVE_RTE
rte_group_t group;
rte_init(NULL, NULL, &group);
if (1 == rte_group_rank(group)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = group;
ctx->params.rte = &ext_rte;
ctx->params.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#if HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
cpu_set_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = sysconf(_SC_NPROCESSORS_CONF);
if (ret < 0) {
ucs_error("failed to get local cpu count: %m");
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
if (ctx->cpu >= nr_cpus) {
ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
CPU_SET(ctx->cpu, &cpuset);
ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
static void clone_params(ucx_perf_params_t *dest, const ucx_perf_params_t *src)
{
size_t msg_size_list_size;
*dest = *src;
msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list);
dest->msg_size_list = malloc(msg_size_list_size);
memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size);
}
static ucs_status_t run_test_recurs(struct perftest_context *ctx,
ucx_perf_params_t *parent_params,
unsigned depth)
{
ucx_perf_params_t params;
ucx_perf_result_t result;
ucs_status_t status;
FILE *batch_file;
int line_num;
ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files);
if (parent_params->api == UCX_PERF_API_UCP) {
if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.dev_name);
}
if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.tl_name);
}
}
if (depth >= ctx->num_batch_files) {
print_test_name(ctx);
return ucx_perf_run(parent_params, &result);
}
batch_file = fopen(ctx->batch_files[depth], "r");
if (batch_file == NULL) {
ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]);
return UCS_ERR_IO_ERROR;
}
clone_params(¶ms, parent_params);
line_num = 0;
while ((status = read_batch_file(batch_file, ctx->batch_files[depth],
&line_num, ¶ms,
&ctx->test_names[depth])) == UCS_OK) {
status = run_test_recurs(ctx, ¶ms, depth + 1);
free(params.msg_size_list);
free(ctx->test_names[depth]);
ctx->test_names[depth] = NULL;
clone_params(¶ms, parent_params);
}
free(params.msg_size_list);
fclose(batch_file);
return UCS_OK;
}
static ucs_status_t run_test(struct perftest_context *ctx)
{
ucs_status_t status;
ucs_trace_func("");
setlocale(LC_ALL, "en_US");
print_header(ctx);
status = run_test_recurs(ctx, &ctx->params, 0);
if (status != UCS_OK) {
ucs_error("Failed to run test: %s", ucs_status_string(status));
}
return status;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#if HAVE_MPI
mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0);
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#if HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out:
if (ctx.params.msg_size_list) {
free(ctx.params.msg_size_list);
}
if (mpi_initialized) {
#if HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
OpenMPClause.h
|
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for clauses.
// //
///////////////////////////////////////////////////////////////////////////////
/// \brief This is a basic class for representing single OpenMP clause.
///
class OMPClause {
/// \brief Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// \brief Ending location of the clause.
SourceLocation EndLoc;
/// \brief Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// \brief Returns the starting location of the clause.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns the ending location of the clause.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
StmtRange children();
ConstStmtRange children() const {
return const_cast<OMPClause *>(this)->children();
}
static bool classof(const OMPClause *) { return true; }
};
/// \brief This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of variables in the list.
unsigned NumVars;
protected:
/// \brief Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
reinterpret_cast<Expr **>(
reinterpret_cast<char *>(this) +
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())),
NumVars);
}
/// \brief Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(
VL.begin(), VL.end(),
reinterpret_cast<Expr **>(
reinterpret_cast<char *>(this) +
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())));
}
/// \brief Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
public:
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
reinterpret_cast<const Expr *const *>(
reinterpret_cast<const char *>(this) +
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<const Expr *>())),
NumVars);
}
};
/// \brief This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if'
/// clause with condition 'a > 5'.
///
class OMPIfClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition;
/// \brief Set condition.
///
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// \brief Build 'if' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPIfClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// \brief Build an empty clause.
///
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Condition(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
StmtRange children() { return StmtRange(&Condition, &Condition + 1); }
};
/// \brief This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
///
class OMPFinalClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition;
/// \brief Set condition.
///
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// \brief Build 'final' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// \brief Build an empty clause.
///
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Condition(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
StmtRange children() { return StmtRange(&Condition, &Condition + 1); }
};
/// \brief This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
///
class OMPNumThreadsClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'num_threads' clause.
Stmt *NumThreads;
/// \brief Set condition.
///
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// \brief Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumThreads(NumThreads) {}
/// \brief Build an empty clause.
///
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), NumThreads(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
StmtRange children() { return StmtRange(&NumThreads, &NumThreads + 1); }
};
/// \brief This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
///
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Safelen;
/// \brief Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// \brief Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// \brief Build an empty clause.
///
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Safelen(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
StmtRange children() { return StmtRange(&Safelen, &Safelen + 1); }
};
/// \brief This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
///
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of for-loops.
Stmt *NumForLoops;
/// \brief Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// \brief Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// \brief Build an empty clause.
///
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), NumForLoops(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
StmtRange children() { return StmtRange(&NumForLoops, &NumForLoops + 1); }
};
/// \brief This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
///
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clauses.
///
/// \param K Argument of clause.
///
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// \brief Set argument location.
///
/// \param KLoc Argument location.
///
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
///
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown),
KindKwLoc(SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
///
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clause.
///
/// \param K Kind of clause.
///
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// \brief Set clause kind location.
///
/// \param KLoc Kind location.
///
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
///
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown),
KindKwLoc(SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
///
class OMPScheduleClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind;
/// \brief Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// \brief Location of ',' (if any).
SourceLocation CommaLoc;
/// \brief Chunk size and a reference to pseudo variable for combined
/// directives.
enum { CHUNK_SIZE, HELPER_CHUNK_SIZE, NUM_EXPRS };
Stmt *ChunkSizes[NUM_EXPRS];
/// \brief Set schedule kind.
///
/// \param K Schedule kind.
///
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
///
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
///
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// \brief Set location of ','.
///
/// \param Loc Location of ','.
///
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// \brief Set chunk size.
///
/// \param E Chunk size.
///
void setChunkSize(Expr *E) { ChunkSizes[CHUNK_SIZE] = E; }
/// \brief Set helper chunk size.
///
/// \param E Helper chunk size.
///
void setHelperChunkSize(Expr *E) { ChunkSizes[HELPER_CHUNK_SIZE] = E; }
public:
/// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
///
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Expr *HelperChunkSize)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc) {
ChunkSizes[CHUNK_SIZE] = ChunkSize;
ChunkSizes[HELPER_CHUNK_SIZE] = HelperChunkSize;
}
/// \brief Build an empty clause.
///
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
Kind(OMPC_SCHEDULE_unknown) {
ChunkSizes[CHUNK_SIZE] = nullptr;
ChunkSizes[HELPER_CHUNK_SIZE] = nullptr;
}
/// \brief Get kind of the clause.
///
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// \brief Get location of '('.
///
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
///
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// \brief Get location of ','.
///
SourceLocation getCommaLoc() { return CommaLoc; }
/// \brief Get chunk size.
///
Expr *getChunkSize() { return dyn_cast_or_null<Expr>(ChunkSizes[CHUNK_SIZE]); }
/// \brief Get chunk size.
///
Expr *getChunkSize() const {
return dyn_cast_or_null<Expr>(ChunkSizes[CHUNK_SIZE]);
}
/// \brief Get helper chunk size.
///
Expr *getHelperChunkSize() {
return dyn_cast_or_null<Expr>(ChunkSizes[HELPER_CHUNK_SIZE]);
}
/// \brief Get helper chunk size.
///
Expr *getHelperChunkSize() const {
return dyn_cast_or_null<Expr>(ChunkSizes[HELPER_CHUNK_SIZE]);
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
StmtRange children() {
return StmtRange(&ChunkSizes[CHUNK_SIZE], &ChunkSizes[CHUNK_SIZE] + 1);
}
};
/// \brief This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause.
///
class OMPOrderedClause : public OMPClause {
public:
/// \brief Build 'ordered' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPOrderedClause()
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
///
class OMPNowaitClause : public OMPClause {
public:
/// \brief Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
///
class OMPUntiedClause : public OMPClause {
public:
/// \brief Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
///
class OMPMergeableClause : public OMPClause {
public:
/// \brief Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
///
class OMPReadClause : public OMPClause {
public:
/// \brief Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
///
class OMPWriteClause : public OMPClause {
public:
/// \brief Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
///
class OMPUpdateClause : public OMPClause {
public:
/// \brief Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
///
class OMPCaptureClause : public OMPClause {
public:
/// \brief Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
///
class OMPSeqCstClause : public OMPClause {
public:
/// \brief Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
StmtRange children() { return StmtRange(); }
};
/// \brief This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
///
class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> {
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
///
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator private_copies_iterator;
typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator;
typedef llvm::iterator_range<private_copies_iterator> private_copies_range;
typedef llvm::iterator_range<private_copies_const_iterator>
private_copies_const_range;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// \brief This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
///
class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> {
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
///
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator private_copies_iterator;
typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator;
typedef llvm::iterator_range<private_copies_iterator> private_copies_range;
typedef llvm::iterator_range<private_copies_const_iterator>
private_copies_const_range;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
typedef MutableArrayRef<Expr *>::iterator inits_iterator;
typedef ArrayRef<const Expr *>::iterator inits_const_iterator;
typedef llvm::iterator_range<inits_iterator> inits_range;
typedef llvm::iterator_range<inits_const_iterator> inits_const_range;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// \brief This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
//
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// \brief Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
///
///
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
/// \brief Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// \brief This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
///
class OMPSharedClause : public OMPVarListClause<OMPSharedClause> {
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// \brief This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
///
class OMPReductionClause : public OMPVarListClause<OMPReductionClause> {
friend class OMPClauseReader;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// \brief Name of custom operator.
DeclarationNameInfo NameInfo;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
///
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
ColonLoc(), QualifierLoc(), NameInfo() {}
/// \brief Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// \brief Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// \brief Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// \brief Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// \brief Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// \brief Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
///
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> LHSExprs,
ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// \brief Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// \brief This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
///
class OMPLinearClause : public OMPVarListClause<OMPLinearClause> {
friend class OMPClauseReader;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// \brief Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// \brief Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
///
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
///
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
ColonLoc(SourceLocation()) {}
/// \brief Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Inits[]; Updates[]; Finals[];
/// Step; CalcStep; }
///
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// \brief Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// \brief Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// \brief Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
static OMPLinearClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL, ArrayRef<Expr *> IL,
Expr *Step, Expr *CalcStep);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
///
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// \brief Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// \brief Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// \brief Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// \brief Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// \brief Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
typedef MutableArrayRef<Expr *>::iterator inits_iterator;
typedef ArrayRef<const Expr *>::iterator inits_const_iterator;
typedef llvm::iterator_range<inits_iterator> inits_range;
typedef llvm::iterator_range<inits_const_iterator> inits_const_range;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
typedef MutableArrayRef<Expr *>::iterator updates_iterator;
typedef ArrayRef<const Expr *>::iterator updates_const_iterator;
typedef llvm::iterator_range<updates_iterator> updates_range;
typedef llvm::iterator_range<updates_const_iterator> updates_const_range;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
typedef MutableArrayRef<Expr *>::iterator finals_iterator;
typedef ArrayRef<const Expr *>::iterator finals_const_iterator;
typedef llvm::iterator_range<finals_iterator> finals_range;
typedef llvm::iterator_range<finals_const_iterator> finals_const_range;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// \brief This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
///
class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> {
friend class OMPClauseReader;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// \brief Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
///
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
///
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
ColonLoc(SourceLocation()) {}
public:
/// \brief Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
///
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// \brief Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// \brief This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
///
class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
///
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// \brief This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
///
class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> {
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
///
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// \brief This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
///
class OMPFlushClause : public OMPVarListClause<OMPFlushClause> {
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// \brief This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
///
class OMPDependClause : public OMPVarListClause<OMPDependClause> {
friend class OMPClauseReader;
/// \brief Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind;
/// \brief Dependency type location.
SourceLocation DepLoc;
/// \brief Colon location.
SourceLocation ColonLoc;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N),
DepKind(OMPC_DEPEND_unknown) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPDependClause(unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
DepKind(OMPC_DEPEND_unknown) {}
/// \brief Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// \brief Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// \brief Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
///
static OMPDependClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// \brief Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// \brief Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
} // end namespace clang
#endif
|
ompcompress.c
|
#ifdef _OPENMP
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
else
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
int sw = field->sw ? field->sw : (int)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
deprecate.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w A l l o c a t e W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAllocateWand() allocates an initial drawing wand which is an opaque
% handle required by the remaining drawing methods.
%
% The format of the DrawAllocateWand method is:
%
% DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: Initial drawing defaults. Set to NULL to use defaults.
%
% o image: the image to draw on.
%
*/
WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image)
{
return(AcquireDrawingWand(draw_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) memset(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,GetPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominant color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRadialBlurImage() radial blurs an image.
%
% The format of the MagickRadialBlurImage method is:
%
% MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
% const double angle)
% MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
% const ChannelType channel,const double angle)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o angle: the angle of the blur in degrees.
%
*/
WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
const double angle)
{
return(MagickRotationalBlurImage(wand,angle));
}
WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
const ChannelType channel,const double angle)
{
return(MagickRotationalBlurImageChannel(wand,channel,angle));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
displacement_lagrangemultiplier_residual_contact_criteria.h
|
// KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact residual
mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm)
,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
residual_dof_value = rb[dof_id];
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_residual_solution_norm += residual_dof_value * residual_dof_value;
lm_dof_num++;
} else {
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mLMCurrentResidualNorm = lm_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_lm_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm;
residual_disp_ratio = 1.0;
residual_lm_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the LM
residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance);
if (disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual
TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual
TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
|
quad.c
|
/* Calculates a definite integral by using a quadrature rule.
* Doesn't use worksharing directives, but shares loop iterations manually. */
/* For the default values of a, b, and n, sequential time on my i3-540 is 0.15 s,
* and parallel time is 0.08 s (Release x86, /O2 /Ot /Oi). */
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <time.h>
# include <omp.h>
#define MAX_NUM_THREADS 8
#define MAX 1024
#define ACCURACY 0.01
typedef struct Results Results;
struct Results {
double val;
double time;
};
// The function whose integral we calculate
inline double f(const double x) {
register const double pi = 3.141592653589793;
double value;
value = 50.0 / (pi * (2500.0 * x * x + 1.0));
return value;
}
/************************/
/* SEQUENTIAL ALGORITHM */
/************************/
void seqQuad(const unsigned n, const double a, const double b, double *total, double *execTime) {
unsigned i;
double total_q = 0.0;
double wtime;
double x;
wtime = omp_get_wtime();
for (i = 0; i < n; i++) {
x = ((double)(n - i - 1)*a + (double)(i)*b) / (double)(n - 1);
total_q = total_q + f(x);
}
wtime = omp_get_wtime() - wtime;
total_q = (b - a) * total_q / (double)n;
*total = total_q;
*execTime = (double)wtime;
}
Results sequential(const unsigned n, const double a, const double b) {
Results results;
seqQuad(n, a, b, &results.val, &results.time);
return results;
}
/**********************/
/* PARALLEL ALGORITHM */
/**********************/
void parQuad(const unsigned n, const double a, const double b, double *total, double *execTime) {
unsigned i;
unsigned start, end, chunk;
unsigned myId, numThreads;
double total_q = 0.0;
double wtime;
double x;
wtime = omp_get_wtime();
#pragma omp parallel default(none) \
private(i, x, myId, start, end, chunk) \
shared(/*n, a, b,*/ numThreads) \
reduction(+:total_q)
{
myId = omp_get_thread_num();
numThreads = omp_get_num_threads();
//chunk = (unsigned)ceil((double)n / (double)numThreads);
chunk = (n + numThreads - 1) / numThreads;
start = myId*chunk;
end = start + chunk < n ? start + chunk : n;
for (i = start; i < end; i++) {
x = ((double)(n - i - 1)*a + (double)(i)*b) / (double)(n - 1);
total_q = total_q + f(x);
}
} // omp parallel
wtime = omp_get_wtime() - wtime;
total_q = (b - a) * total_q / (double)n;
*total = total_q;
*execTime = (double)wtime;
}
Results parallel(const unsigned n, const double a, const double b) {
Results results;
parQuad(n, a, b, &results.val, &results.time);
return results;
}
void compareAndPrint(const unsigned n, const double a, const double b) {
Results seq, par;
seq = sequential(n, a, b);
par = parallel(n, a, b);
printf(" Sequential estimate quadratic rule = %24.16f\n", seq.val);
printf(" Parallel estimate quadratic rule = %24.16f\n", par.val);
printf("Sequential time quadratic rule = %f s\n", seq.time);
printf("Parallel time quadratic rule = %f s\n", par.time);
if (fabs(seq.val - par.val) < ACCURACY)
printf("\tTest PASSED!\n");
else
printf("\a\tTest FAILED!!!\n");
printf("\n");
}
int main(int argc, char *argv[]) {
unsigned n;
double a;
double b;
const double exact = 0.49936338107645674464;
if (argc != 4) {
n = 10000000;
a = 0.0;
b = 10.0;
}
else {
n = (unsigned)atoi(argv[1]);
a = atof(argv[2]);
b = atof(argv[3]);
}
printf("\n");
printf("QUAD:\n");
printf(" Estimate the integral of f(x) from A to B.\n");
printf(" f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n");
printf("\n");
printf(" A = %f\n", a);
printf(" B = %f\n", b);
printf(" N = %u\n", n);
//printf(" Exact = %24.16f\n", exact);
printf("\n");
compareAndPrint(n, a, b);
printf(" Normal end of execution.\n");
printf("\n");
getchar();
return 0;
}
|
par_csr_matrix.c
|
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_ParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "../seq_mv/HYPRE_seq_mv.h"
#include "../seq_mv/csr_matrix.h"
/* In addition to publically accessible interface in HYPRE_mv.h, the
implementation in this file uses accessor macros into the sequential matrix
structure, and so includes the .h that defines that structure. Should those
accessor functions become proper functions at some later date, this will not
be necessary. AJC 4/99 */
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*);
#endif
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCreate
*--------------------------------------------------------------------------*/
/* If create is called for HYPRE_NO_GLOBAL_PARTITION and row_starts and
col_starts are NOT null, then it is assumed that they are array of length 2
containing the start row of the calling processor followed by the start row
of the next processor - AHB 6/05 */
hypre_ParCSRMatrix*
hypre_ParCSRMatrixCreate( MPI_Comm comm,
HYPRE_BigInt global_num_rows,
HYPRE_BigInt global_num_cols,
HYPRE_BigInt *row_starts,
HYPRE_BigInt *col_starts,
HYPRE_Int num_cols_offd,
HYPRE_Int num_nonzeros_diag,
HYPRE_Int num_nonzeros_offd )
{
hypre_ParCSRMatrix *matrix;
HYPRE_Int num_procs, my_id;
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt first_row_index, first_col_diag;
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
if (!row_starts)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id,
&row_starts);
#else
hypre_GeneratePartitioning(global_num_rows, num_procs, &row_starts);
#endif
}
if (!col_starts)
{
if (global_num_rows == global_num_cols)
{
col_starts = row_starts;
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id,
&col_starts);
#else
hypre_GeneratePartitioning(global_num_cols, num_procs, &col_starts);
#endif
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = row_starts[0];
local_num_rows = row_starts[1]-first_row_index ;
first_col_diag = col_starts[0];
local_num_cols = col_starts[1]-first_col_diag;
#else
first_row_index = row_starts[my_id];
local_num_rows = row_starts[my_id+1]-first_row_index;
first_col_diag = col_starts[my_id];
local_num_cols = col_starts[my_id+1]-first_col_diag;
#endif
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixDiag(matrix) =
hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag);
hypre_ParCSRMatrixOffd(matrix) =
hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd);
hypre_ParCSRMatrixDiagT(matrix) = NULL;
hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixProcOrdering(matrix) = NULL;
hypre_ParCSRMatrixAssumedPartition(matrix) = NULL;
hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1;
/* When NO_GLOBAL_PARTITION is set we could make these null, instead
of leaving the range. If that change is made, then when this create
is called from functions like the matrix-matrix multiply, be careful
not to generate a new partition */
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
hypre_ParCSRMatrixCommPkgT(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
{
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
}
hypre_ParCSRMatrixRowindices(matrix) = NULL;
hypre_ParCSRMatrixRowvalues(matrix) = NULL;
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
matrix->bdiaginv = NULL;
matrix->bdiaginv_comm_pkg = NULL;
matrix->bdiag_size = -1;
#if defined(HYPRE_USING_CUDA)
hypre_ParCSRMatrixSocDiagJ(matrix) = NULL;
hypre_ParCSRMatrixSocOffdJ(matrix) = NULL;
#endif
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix )
{
if (matrix)
{
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix);
if ( hypre_ParCSRMatrixOwnsData(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix));
if ( hypre_ParCSRMatrixDiagT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix));
}
if ( hypre_ParCSRMatrixOffdT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix));
}
if (hypre_ParCSRMatrixColMapOffd(matrix))
{
hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST);
}
if (hypre_ParCSRMatrixDeviceColMapOffd(matrix))
{
hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE);
}
if (hypre_ParCSRMatrixCommPkg(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix));
}
if (hypre_ParCSRMatrixCommPkgT(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix));
}
}
if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST);
}
if ( hypre_ParCSRMatrixOwnsColStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST);
}
/* RL: this is actually not correct since the memory_location may have been changed after allocation
* put them in containers TODO */
hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location);
hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location);
if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) )
{
hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix));
}
if ( hypre_ParCSRMatrixProcOrdering(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST);
}
hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST);
if (matrix->bdiaginv_comm_pkg)
{
hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg);
}
#if defined(HYPRE_USING_CUDA)
hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE);
hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE);
#endif
hypre_TFree(matrix, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location);
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location);
hypre_ParCSRMatrixColMapOffd(matrix) =
hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)),
HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix));
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixClone
* Creates and returns a new copy S of the argument A
* The following variables are not copied because they will be constructed
* later if needed: CommPkg, CommPkgT, rowindices, rowvalues
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *S;
S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) );
/* !!! S does not own Row/Col-Starts */
hypre_ParCSRMatrixSetRowStartsOwner(S, 0);
hypre_ParCSRMatrixSetColStartsOwner(S, 0);
hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixInitialize_v2(S, memory_location);
hypre_ParCSRMatrixCopy(A, S, copy_data);
return S;
}
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data)
{
return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A));
}
HYPRE_Int
hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location)
{
if (!A)
{
return hypre_error_flag;
}
if ( hypre_GetActualMemLocation(memory_location) !=
hypre_GetActualMemLocation(hypre_ParCSRMatrixMemoryLocation(A)) )
{
hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A));
hypre_ParCSRMatrixDiag(A) = A_diag;
hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A));
hypre_ParCSRMatrixOffd(A) = A_offd;
}
else
{
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location;
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
/* TODO in HYPRE_DEBUG ? */
hypre_CSRMatrixCheckSetNumNonzeros(diag);
hypre_CSRMatrixCheckSetNumNonzeros(offd);
if (format[0] == 'I')
{
HYPRE_BigInt total_num_nonzeros;
HYPRE_BigInt local_num_nonzeros;
local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) +
hypre_CSRMatrixNumNonzeros(offd) );
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros;
}
else if (format[0] == 'D')
{
HYPRE_Real total_num_nonzeros;
HYPRE_Real local_num_nonzeros;
local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) +
hypre_CSRMatrixNumNonzeros(offd) );
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1,
HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros;
}
else
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int");
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double");
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_data )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsData(matrix) = owns_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetRowStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_row_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetColStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_col_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRead
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_ParCSRMatrixRead( MPI_Comm comm,
const char *file_name )
{
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
HYPRE_BigInt global_num_rows, global_num_cols;
HYPRE_Int num_cols_offd;
HYPRE_Int local_num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_BigInt *col_map_offd;
FILE *fp;
HYPRE_Int equal = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt row_s, row_e, col_s, col_e;
#endif
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
#else
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
#endif
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
fp = fopen(new_file_info, "r");
hypre_fscanf(fp, "%b", &global_num_rows);
hypre_fscanf(fp, "%b", &global_num_cols);
hypre_fscanf(fp, "%d", &num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* the bgl input file should only contain the EXACT range for local processor */
hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e);
row_starts[0] = row_s;
row_starts[1] = row_e;
col_starts[0] = col_s;
col_starts[1] = col_e;
#else
for (i=0; i < num_procs; i++)
{
hypre_fscanf(fp, "%b %b", &row_starts[i], &col_starts[i]);
}
row_starts[num_procs] = global_num_rows;
col_starts[num_procs] = global_num_cols;
#endif
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
{
hypre_fscanf(fp, "%b", &col_map_offd[i]);
}
fclose(fp);
#ifdef HYPRE_NO_GLOBAL_PARTITION
for (i=1; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#else
for (i=num_procs; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#endif
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
diag = hypre_CSRMatrixRead(new_file_d);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (num_cols_offd)
{
offd = hypre_CSRMatrixRead(new_file_o);
}
else
{
offd = hypre_CSRMatrixCreate(local_num_rows,0,0);
hypre_CSRMatrixInitialize(offd);
}
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s;
hypre_ParCSRMatrixFirstColDiag(matrix) = col_s;
hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1;
#else
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_starts[my_id];
hypre_ParCSRMatrixFirstColDiag(matrix) = col_starts[my_id];
hypre_ParCSRMatrixLastRowIndex(matrix) = row_starts[my_id+1]-1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_starts[my_id+1]-1;
#endif
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
{
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
}
hypre_ParCSRMatrixDiag(matrix) = diag;
hypre_ParCSRMatrixOffd(matrix) = offd;
if (num_cols_offd)
{
hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd;
}
else
{
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix,
const char *file_name )
{
MPI_Comm comm;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt *col_map_offd;
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
#endif
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
FILE *fp;
HYPRE_Int num_cols_offd = 0;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt row_s, row_e, col_s, col_e;
#endif
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix);
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
#endif
if (hypre_ParCSRMatrixOffd(matrix))
num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix));
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d);
if (num_cols_offd != 0)
hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o);
fp = fopen(new_file_info, "w");
hypre_fprintf(fp, "%b\n", global_num_rows);
hypre_fprintf(fp, "%b\n", global_num_cols);
hypre_fprintf(fp, "%d\n", num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_s = hypre_ParCSRMatrixFirstRowIndex(matrix);
row_e = hypre_ParCSRMatrixLastRowIndex(matrix);
col_s = hypre_ParCSRMatrixFirstColDiag(matrix);
col_e = hypre_ParCSRMatrixLastColDiag(matrix);
/* add 1 to the ends because this is a starts partition */
hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1);
#else
for (i=0; i < num_procs; i++)
hypre_fprintf(fp, "%b %b\n", row_starts[i], col_starts[i]);
#endif
for (i=0; i < num_cols_offd; i++)
hypre_fprintf(fp, "%b\n", col_map_offd[i]);
fclose(fp);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrintIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix,
const HYPRE_Int base_i,
const HYPRE_Int base_j,
const char *filename )
{
MPI_Comm comm;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Int myid, num_procs, i, j;
HYPRE_BigInt I, J;
char new_filename[255];
FILE *file;
HYPRE_Int num_nonzeros_offd;
HYPRE_BigInt ilower, iupper, jlower, jupper;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
num_rows = hypre_ParCSRMatrixNumRows(matrix);
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
ilower = row_starts[0]+(HYPRE_BigInt)base_i;
iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1;
jlower = col_starts[0]+(HYPRE_BigInt)base_j;
jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1;
#else
ilower = row_starts[myid] +(HYPRE_BigInt)base_i;
iupper = row_starts[myid+1]+(HYPRE_BigInt)base_i - 1;
jlower = col_starts[myid] +(HYPRE_BigInt)base_j;
jupper = col_starts[myid+1]+(HYPRE_BigInt)base_j - 1;
#endif
hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper);
for (i = 0; i < num_rows; i++)
{
I = first_row_index + (HYPRE_BigInt)(i + base_i);
/* print diag columns */
for (j = diag_i[i]; j < diag_i[i+1]; j++)
{
J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j);
if ( diag_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(diag_data[j]), hypre_cimag(diag_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J);
}
/* print offd columns */
if ( num_nonzeros_offd )
{
for (j = offd_i[i]; j < offd_i[i+1]; j++)
{
J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j;
if ( offd_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(offd_data[j]), hypre_cimag(offd_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J );
}
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixReadIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixReadIJ( MPI_Comm comm,
const char *filename,
HYPRE_Int *base_i_ptr,
HYPRE_Int *base_j_ptr,
hypre_ParCSRMatrix **matrix_ptr)
{
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_rows;
HYPRE_BigInt big_base_i, big_base_j;
HYPRE_Int base_i, base_j;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_BigInt *aux_offd_j;
HYPRE_BigInt I, J;
HYPRE_Int myid, num_procs, i, i2, j;
char new_filename[255];
FILE *file;
HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd;
HYPRE_Int equal, i_col, num_cols;
HYPRE_Int diag_cnt, offd_cnt, row_cnt;
HYPRE_Complex data;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols);
hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd);
hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd);
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= num_procs; i++)
hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]);
big_base_i = row_starts[0];
big_base_j = col_starts[0];
base_i = (HYPRE_Int)row_starts[0];
base_j = (HYPRE_Int)col_starts[0];
equal = 1;
for (i = 0; i <= num_procs; i++)
{
row_starts[i] -= big_base_i;
col_starts[i] -= big_base_j;
if (row_starts[i] != col_starts[i]) equal = 0;
}
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
hypre_ParCSRMatrixInitialize(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
}
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1;
diag_cnt = 0;
offd_cnt = 0;
row_cnt = 0;
for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++)
{
/* read values */
hypre_fscanf(file, "%b %b %le", &I, &J, &data);
i2 = (HYPRE_Int)(I-big_base_i-first_row_index);
J -= big_base_j;
if (i2 > row_cnt)
{
diag_i[i2] = diag_cnt;
offd_i[i2] = offd_cnt;
row_cnt++;
}
if (J < first_col_diag || J > last_col_diag)
{
tmp_j[offd_cnt] = J;
offd_data[offd_cnt++] = data;
}
else
{
diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag);
diag_data[diag_cnt++] = data;
}
}
diag_i[num_rows] = diag_cnt;
offd_i[num_rows] = offd_cnt;
fclose(file);
/* generate col_map_offd */
if (num_nonzeros_offd)
{
aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_nonzeros_offd; i++)
aux_offd_j[i] = (HYPRE_BigInt)offd_j[i];
hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
col_map_offd[0] = aux_offd_j[0];
offd_cnt = 0;
for (i=1; i < num_nonzeros_offd; i++)
{
if (aux_offd_j[i] > col_map_offd[offd_cnt])
col_map_offd[++offd_cnt] = aux_offd_j[i];
}
for (i=0; i < num_nonzeros_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd);
}
hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
}
/* move diagonal element in first position in each row */
for (i=0; i < num_rows; i++)
{
i_col = diag_i[i];
for (j=i_col; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
diag_j[j] = diag_j[i_col];
data = diag_data[j];
diag_data[j] = diag_data[i_col];
diag_data[i_col] = data;
diag_j[i_col] = i;
break;
}
}
}
*base_i_ptr = base_i;
*base_j_ptr = base_j;
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetLocalRange
* returns the row numbers of the rows stored on this processor.
* "End" is actually the row number of the last row on this processor.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt *row_start,
HYPRE_BigInt *row_end,
HYPRE_BigInt *col_start,
HYPRE_BigInt *col_end )
{
HYPRE_Int my_id;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id );
#ifdef HYPRE_NO_GLOBAL_PARTITION
*row_start = hypre_ParCSRMatrixFirstRowIndex(matrix);
*row_end = hypre_ParCSRMatrixLastRowIndex(matrix);
*col_start = hypre_ParCSRMatrixFirstColDiag(matrix);
*col_end = hypre_ParCSRMatrixLastColDiag(matrix);
#else
*row_start = hypre_ParCSRMatrixRowStarts(matrix)[ my_id ];
*row_end = hypre_ParCSRMatrixRowStarts(matrix)[ my_id + 1 ]-1;
*col_start = hypre_ParCSRMatrixColStarts(matrix)[ my_id ];
*col_end = hypre_ParCSRMatrixColStarts(matrix)[ my_id + 1 ]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetRow
* Returns global column indices and/or values for a given row in the global
* matrix. Global row number is used, but the row must be stored locally or
* an error is returned. This implementation copies from the two matrices that
* store the local data, storing them in the hypre_ParCSRMatrix structure.
* Only a single row can be accessed via this function at any one time; the
* corresponding RestoreRow function must be called, to avoid bleeding memory,
* and to be able to look at another row.
* Either one of col_ind and values can be left null, and those values will
* not be returned.
* All indices are returned in 0-based indexing, no matter what is used under
* the hood. EXCEPTION: currently this only works if the local CSR matrices
* use 0-based indexing.
* This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ
* matrix code, adjusted for our data and software structures.
* AJC 4/99.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int my_id;
HYPRE_BigInt row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat))
{
return(-1);
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id );
hypre_ParCSRMatrixGetrowactive(mat) = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
#else
row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ];
row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ];
#endif
if (row < row_start || row >= row_end)
{
return(-1);
}
/* if buffer is not allocated and some information is requested,
allocate buffer */
if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values ))
{
/*
allocate enough space to hold information from the longest row.
*/
HYPRE_Int max = 1,tmp;
HYPRE_Int i;
HYPRE_Int m = row_end - row_start;
for ( i = 0; i < m; i++ )
{
tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] +
hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i];
if (max < tmp)
{
max = tmp;
}
}
hypre_ParCSRMatrixRowvalues(mat) =
(HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat));
hypre_ParCSRMatrixRowindices(mat) =
(HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat));
}
/* Copy from dual sequential matrices into buffer */
{
HYPRE_Complex *vworkA, *vworkB, *v_p;
HYPRE_Int i, *cworkA, *cworkB;
HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat);
HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start);
HYPRE_BigInt *cmap, *idx_p;
nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow];
cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow];
cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
nztot = nzA + nzB;
cmap = hypre_ParCSRMatrixColMapOffd(mat);
if (values || col_ind)
{
if (nztot)
{
/* Sort by increasing column numbers, assuming A and B already sorted */
HYPRE_Int imark = -1;
if (values)
{
*values = v_p = hypre_ParCSRMatrixRowvalues(mat);
for ( i = 0; i < nzB; i++ )
{
if (cmap[cworkB[i]] < cstart)
{
v_p[i] = vworkB[i];
}
else
{
break;
}
}
imark = i;
for ( i = 0; i < nzA; i++ )
{
v_p[imark+i] = vworkA[i];
}
for ( i = imark; i < nzB; i++ )
{
v_p[nzA+i] = vworkB[i];
}
}
if (col_ind)
{
*col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat);
if (imark > -1)
{
for ( i = 0; i < imark; i++ )
{
idx_p[i] = cmap[cworkB[i]];
}
}
else
{
for ( i = 0; i < nzB; i++ )
{
if (cmap[cworkB[i]] < cstart)
{
idx_p[i] = cmap[cworkB[i]];
}
else
{
break;
}
}
imark = i;
}
for ( i = 0; i < nzA; i++ )
{
idx_p[imark+i] = cstart + cworkA[i];
}
for ( i = imark; i < nzB; i++ )
{
idx_p[nzA+i] = cmap[cworkB[i]];
}
}
}
else
{
if (col_ind)
{
*col_ind = 0;
}
if (values)
{
*values = 0;
}
}
}
*size = nztot;
} /* End of copy */
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values);
}
else
#endif
{
return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRestoreRow
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
if (!hypre_ParCSRMatrixGetrowactive(matrix))
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixToParCSRMatrix:
*
* Generates a ParCSRMatrix distributed across the processors in comm
* from a CSRMatrix on proc 0 .
*
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm,
hypre_CSRMatrix *A,
HYPRE_BigInt *global_row_starts,
HYPRE_BigInt *global_col_starts )
{
hypre_ParCSRMatrix *parcsr_A;
HYPRE_BigInt *global_data;
HYPRE_BigInt global_size;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_Int num_procs, my_id;
HYPRE_Int *num_rows_proc;
HYPRE_Int *num_nonzeros_proc;
HYPRE_BigInt *row_starts = NULL;
HYPRE_BigInt *col_starts = NULL;
hypre_CSRMatrix *local_A;
HYPRE_Complex *A_data;
HYPRE_Int *A_i;
HYPRE_Int *A_j;
hypre_MPI_Request *requests;
hypre_MPI_Status *status, status0;
hypre_MPI_Datatype *csr_matrix_datatypes;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int free_global_row_starts = 0;
HYPRE_Int free_global_col_starts = 0;
#endif
HYPRE_Int total_size;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
HYPRE_Int num_rows;
HYPRE_Int num_nonzeros;
HYPRE_Int i, ind;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
total_size = 4;
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == 0)
{
total_size += 2*(num_procs + 1);
}
#else
total_size += 2*(num_procs + 1);
#endif
global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
global_size = 3;
if (global_row_starts)
{
if (global_col_starts)
{
if (global_col_starts != global_row_starts)
{
/* contains code for what to expect,
if 0: global_row_starts = global_col_starts, only global_row_starts given
if 1: only global_row_starts given, global_col_starts = NULL
if 2: both global_row_starts and global_col_starts given
if 3: only global_col_starts given, global_row_starts = NULL */
global_data[3] = 2;
global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+num_procs+5] = global_col_starts[i];
}
}
else
{
global_data[3] = 0;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
}
}
else
{
global_data[3] = 1;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_row_starts[i];
}
}
}
else
{
if (global_col_starts)
{
global_data[3] = 3;
global_size += (HYPRE_BigInt) ((num_procs + 1) + 1);
for (i = 0; i < (num_procs + 1); i++)
{
global_data[i+4] = global_col_starts[i];
}
}
}
global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A);
global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A);
global_data[2] = global_size;
A_data = hypre_CSRMatrixData(A);
A_i = hypre_CSRMatrixI(A);
A_j = hypre_CSRMatrixJ(A);
}
hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm);
global_num_rows = global_data[0];
global_num_cols = global_data[1];
global_size = global_data[2];
if (global_size > 3)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int send_start;
if (global_data[3] == 2)
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 4 + (num_procs + 1);
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5 + (num_procs + 1);
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
}
else if ((global_data[3] == 0) || (global_data[3] == 1))
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
if (global_data[3] == 0)
{
col_starts = row_starts;
}
}
else
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
send_start = 4;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm);
send_start = 5;
hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT,
&col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm);
}
#else
hypre_MPI_Bcast(&global_data[3], (global_size - 3), HYPRE_MPI_BIG_INT, 0, comm);
if (my_id)
{
if (global_data[3] < 3)
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i < num_procs+1; i++)
{
row_starts[i] = global_data[i+4];
}
if (global_data[3] == 0)
{
col_starts = row_starts;
}
else if (global_data[3] == 2)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i < num_procs+1; i++)
{
col_starts[i] = global_data[i+num_procs+5];
}
}
}
else
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i< num_procs+1; i++)
{
col_starts[i] = global_data[i+4];
}
}
}
else
{
row_starts = global_row_starts;
col_starts = global_col_starts;
}
#endif
}
hypre_TFree(global_data, HYPRE_MEMORY_HOST);
// Create ParCSR matrix
parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, 0, 0, 0);
// Allocate memory for building ParCSR matrix
num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (!global_row_starts)
{
hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts);
free_global_row_starts = 1;
}
if (!global_col_starts)
{
hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts);
free_global_col_starts = 1;
}
#else
if (!global_row_starts)
{
global_row_starts = hypre_ParCSRMatrixRowStarts(parcsr_A);
}
if (!global_col_starts)
{
global_col_starts = hypre_ParCSRMatrixColStarts(parcsr_A);
}
#endif
for (i = 0; i < num_procs; i++)
{
num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]);
num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] -
A_i[(HYPRE_Int)global_row_starts[i]];
}
//num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]];
}
hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm);
hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm);
/* RL: this is not correct: (HYPRE_Int) global_num_cols */
local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros);
csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST);
for (i = 1; i < num_procs; i++)
{
ind = A_i[(HYPRE_Int) global_row_starts[i]];
hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i],
num_rows_proc[i],
&A_data[ind],
&A_i[(HYPRE_Int) global_row_starts[i]],
&A_j[ind],
&csr_matrix_datatypes[i]);
hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm,
&requests[i-1]);
hypre_MPI_Type_free(&csr_matrix_datatypes[i]);
}
hypre_CSRMatrixData(local_A) = A_data;
hypre_CSRMatrixI(local_A) = A_i;
hypre_CSRMatrixJ(local_A) = A_j;
hypre_CSRMatrixOwnsData(local_A) = 0;
hypre_MPI_Waitall(num_procs-1, requests, status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST);
hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (free_global_row_starts)
{
hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST);
}
if (free_global_col_starts)
{
hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST);
}
#endif
}
else
{
hypre_CSRMatrixInitialize(local_A);
hypre_BuildCSRMatrixMPIDataType(num_nonzeros,
num_rows,
hypre_CSRMatrixData(local_A),
hypre_CSRMatrixI(local_A),
hypre_CSRMatrixJ(local_A),
&csr_matrix_datatypes[0]);
hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0);
hypre_MPI_Type_free(csr_matrix_datatypes);
}
first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A);
last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A);
GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag);
/* set pointers back to NULL before destroying */
if (my_id == 0)
{
hypre_CSRMatrixData(local_A) = NULL;
hypre_CSRMatrixI(local_A) = NULL;
hypre_CSRMatrixJ(local_A) = NULL;
}
hypre_CSRMatrixDestroy(local_A);
hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST);
return parcsr_A;
}
/* RL: XXX this is not a scalable routine, see `marker' therein */
HYPRE_Int
GenerateDiagAndOffd(hypre_CSRMatrix *A,
hypre_ParCSRMatrix *matrix,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt last_col_diag)
{
HYPRE_Int i, j;
HYPRE_Int jo, jd;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *a_data = hypre_CSRMatrixData(A);
HYPRE_Int *a_i = hypre_CSRMatrixI(A);
/*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */
HYPRE_Int *a_j = hypre_CSRMatrixJ(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix);
HYPRE_BigInt *col_map_offd;
HYPRE_Complex *diag_data, *offd_data;
HYPRE_Int *diag_i, *offd_i;
HYPRE_Int *diag_j, *offd_j;
HYPRE_Int *marker;
HYPRE_Int num_cols_diag, num_cols_offd;
HYPRE_Int first_elmt = a_i[0];
HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt;
HYPRE_Int counter;
num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1);
num_cols_offd = 0;
HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A);
if (num_cols - num_cols_diag)
{
hypre_CSRMatrixInitialize_v2(diag, 0, memory_location);
diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrixInitialize_v2(offd, 0, memory_location);
offd_i = hypre_CSRMatrixI(offd);
marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols; i++)
{
marker[i] = 0;
}
jo = 0;
jd = 0;
for (i = 0; i < num_rows; i++)
{
offd_i[i] = jo;
diag_i[i] = jd;
for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
{
if (a_j[j] < first_col_diag || a_j[j] > last_col_diag)
{
if (!marker[a_j[j]])
{
marker[a_j[j]] = 1;
num_cols_offd++;
}
jo++;
}
else
{
jd++;
}
}
}
offd_i[num_rows] = jo;
diag_i[num_rows] = jd;
hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
counter = 0;
for (i = 0; i < num_cols; i++)
{
if (marker[i])
{
col_map_offd[counter] = (HYPRE_BigInt) i;
marker[i] = counter;
counter++;
}
}
hypre_CSRMatrixNumNonzeros(diag) = jd;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_j = hypre_CSRMatrixJ(diag);
hypre_CSRMatrixNumNonzeros(offd) = jo;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_CSRMatrixInitialize(offd);
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
{
if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag)
{
offd_data[jo] = a_data[j];
offd_j[jo++] = marker[a_j[j]];
}
else
{
diag_data[jd] = a_data[j];
diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag);
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
}
else
{
hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
for (i=0; i < num_nonzeros; i++)
{
diag_data[i] = a_data[i];
diag_j[i] = a_j[i];
}
offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows+1; i++)
{
diag_i[i] = a_i[i];
offd_i[i] = 0;
}
hypre_CSRMatrixNumCols(offd) = 0;
hypre_CSRMatrixI(offd) = offd_i;
}
return hypre_error_flag;
}
hypre_CSRMatrix *
hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix)
{
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
hypre_CSRMatrix *matrix;
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int *matrix_i;
HYPRE_BigInt *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int num_nonzeros, i, j;
HYPRE_Int count;
HYPRE_Int size, rest, num_threads, ii;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix);
num_nonzeros = diag_i[num_rows] + offd_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = memory_location;
hypre_CSRMatrixBigInitialize(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixBigJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
num_threads = hypre_NumThreads();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
HYPRE_Int ns, ne;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
count = diag_i[ns]+offd_i[ns];;
for (i = ns; i < ne; i++)
{
matrix_i[i] = count;
for (j=diag_i[i]; j < diag_i[i+1]; j++)
{
matrix_data[count] = diag_data[j];
matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag;
}
for (j=offd_i[i]; j < offd_i[i+1]; j++)
{
matrix_data[count] = offd_data[j];
matrix_j[count++] = col_map_offd[offd_j[j]];
}
}
} /* end parallel region */
matrix_i[num_rows] = num_nonzeros;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixToCSRMatrixAll:
* generates a CSRMatrix from a ParCSRMatrix on all processors that have
* parts of the ParCSRMatrix
* Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix);
hypre_CSRMatrix *matrix;
hypre_CSRMatrix *local_matrix;
HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix);
HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(par_matrix);
#endif
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int *local_matrix_i;
HYPRE_Int *local_matrix_j;
HYPRE_Complex *local_matrix_data;
HYPRE_Int i, j;
HYPRE_Int local_num_rows;
HYPRE_Int local_num_nonzeros;
HYPRE_Int num_nonzeros;
HYPRE_Int num_data;
HYPRE_Int num_requests;
HYPRE_Int vec_len, offset;
HYPRE_Int start_index;
HYPRE_Int proc_id;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_types;
HYPRE_Int *used_procs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *new_vec_starts;
HYPRE_Int num_contacts;
HYPRE_Int contact_proc_list[1];
HYPRE_Int contact_send_buf[1];
HYPRE_Int contact_send_buf_starts[2];
HYPRE_Int max_response_size;
HYPRE_Int *response_recv_buf=NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
hypre_DataExchangeResponse response_obj;
hypre_ProcListElements send_proc_obj;
HYPRE_Int *send_info = NULL;
hypre_MPI_Status status1;
HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334;
HYPRE_Int start;
#endif
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) -
hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1);
local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */
hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
/* determine procs that have vector data and store their ids in used_procs */
/* we need to do an exchange data for this. If I own row then I will contact
processor 0 with the endpoint of my local range */
if (local_num_rows > 0)
{
num_contacts = 1;
contact_proc_list[0] = 0;
contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix);
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 1;
}
else
{
num_contacts = 0;
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 0;
}
/*build the response object*/
/*send_proc_obj will be for saving info from contacts */
send_proc_obj.length = 0;
send_proc_obj.storage_length = 10;
send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = 10;
send_proc_obj.elements =
hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
max_response_size = 0; /* each response is null */
response_obj.fill_response = hypre_FillResponseParToCSRMatrix;
response_obj.data1 = NULL;
response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
hypre_DataExchangeList(num_contacts,
contact_proc_list, contact_send_buf,
contact_send_buf_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj,
max_response_size, 1,
comm, (void**) &response_recv_buf,
&response_recv_buf_starts);
/* now processor 0 should have a list of ranges for processors that have rows -
these are in send_proc_obj - it needs to create the new list of processors
and also an array of vec starts - and send to those who own row*/
if (my_id)
{
if (local_num_rows)
{
/* look for a message from processor 0 */
hypre_MPI_Probe(0, tag1, comm, &status1);
hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);
/* now unpack */
num_types = send_info[0];
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
for (i=1; i<= num_types; i++)
{
used_procs[i-1] = send_info[i];
}
for (i=num_types+1; i< count; i++)
{
new_vec_starts[i-num_types-1] = send_info[i] ;
}
}
else /* clean up and exit */
{
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
return NULL;
}
}
else /* my_id ==0 */
{
num_types = send_proc_obj.length;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
new_vec_starts[0] = 0;
for (i=0; i< num_types; i++)
{
used_procs[i] = send_proc_obj.id[i];
new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
}
hypre_qsort0(used_procs, 0, num_types-1);
hypre_qsort0(new_vec_starts, 0, num_types);
/*now we need to put into an array to send */
count = 2*num_types+2;
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
send_info[0] = num_types;
for (i=1; i<= num_types; i++)
{
send_info[i] = (HYPRE_BigInt)used_procs[i-1];
}
for (i=num_types+1; i< count; i++)
{
send_info[i] = new_vec_starts[i-num_types-1];
}
requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST);
/* don't send to myself - these are sorted so my id would be first*/
start = 0;
if (num_types && used_procs[0] == 0)
{
start = 1;
}
for (i=start; i < num_types; i++)
{
hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1,
comm, &requests[i-start]);
}
hypre_MPI_Waitall(num_types-start, requests, status);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
/* clean up */
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_info, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
/* now proc 0 can exit if it has no rows */
if (!local_num_rows)
{
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
return NULL;
}
/* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
/* this matrix should be rather small */
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
num_requests = 4*num_types;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* exchange contents of local_matrix_i - here we are sending to ourself also*/
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]);
hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
/* global numbering?*/
offset = matrix_i[new_vec_starts[1]];
for (i=1; i < num_types; i++)
{
for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[new_vec_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]];
num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
#else
local_num_rows = (HYPRE_Int)(row_starts[my_id+1] - row_starts[my_id]);
/* if my_id contains no data, return NULL */
if (!local_num_rows)
return NULL;
local_matrix = hypre_MergeDiagAndOffd(par_matrix);
hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
/* determine procs that have vector data and store their ids in used_procs */
num_types = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
num_types++;
num_requests = 4*num_types;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
used_procs[j++] = i;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* data_type = hypre_CTAlloc(hypre_MPI_Datatype, num_types+1); */
/* exchange contents of local_matrix_i */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(row_starts[proc_id+1] - row_starts[proc_id]);
hypre_MPI_Irecv(&matrix_i[(HYPRE_Int)row_starts[proc_id]+1], vec_len, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
vec_len = (HYPRE_Int)(row_starts[my_id+1] - row_starts[my_id]);
for (i=1; i <= vec_len; i++)
matrix_i[(HYPRE_Int)row_starts[my_id]+i] = local_matrix_i[i];
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
offset = matrix_i[(HYPRE_Int)row_starts[1]];
for (i=1; i < num_procs; i++)
{
for (j = (HYPRE_Int)row_starts[i]; j < (HYPRE_Int)row_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[(HYPRE_Int)row_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[(HYPRE_Int)row_starts[proc_id]];
num_data = matrix_i[(HYPRE_Int)row_starts[proc_id+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
start_index = matrix_i[(HYPRE_Int)row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
start_index = matrix_i[(HYPRE_Int)row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
#endif
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
if (num_requests)
{
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCopy,
* copies B to A,
* if copy_data = 0, only the structure of A is copied to B
* the routine does not check whether the dimensions of A and B are compatible
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Int copy_data )
{
hypre_CSRMatrix *A_diag;
hypre_CSRMatrix *A_offd;
HYPRE_BigInt *col_map_offd_A;
hypre_CSRMatrix *B_diag;
hypre_CSRMatrix *B_offd;
HYPRE_BigInt *col_map_offd_B;
HYPRE_Int num_cols_offd_A;
HYPRE_Int num_cols_offd_B;
if (!A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!B)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
A_diag = hypre_ParCSRMatrixDiag(A);
A_offd = hypre_ParCSRMatrixOffd(A);
B_diag = hypre_ParCSRMatrixDiag(B);
B_offd = hypre_ParCSRMatrixOffd(B);
num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_assert(num_cols_offd_A == num_cols_offd_B);
col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrixCopy(A_diag, B_diag, copy_data);
hypre_CSRMatrixCopy(A_offd, B_offd, copy_data);
/* should not happen if B has been initialized */
if (num_cols_offd_B && col_map_offd_B == NULL)
{
col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
}
hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseParToCSRMatrix
* Fill response function for determining the send processors
* data exchange
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int i, index, count, elength;
HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2;
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for ids*/
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=10; /*add space for 10 more processors*/
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts =
hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
/*send proc*/
send_proc_obj->id[count] = contact_proc;
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 10);
elength += index;
send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
HYPRE_BigInt, elength, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
for (i=0; i< contact_size; i++)
{
send_proc_obj->elements[index++] = recv_contact_buf[i];
}
send_proc_obj->vec_starts[count+1] = index;
send_proc_obj->length++;
/*output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixUnion
* Creates and returns a new matrix whose elements are the union of A and B.
* Data is not copied, only structural information is created.
* A and B must have the same communicator, numbers and distributions of rows
* and columns (they can differ in which row-column pairs are nonzero, thus
* in which columns are in a offd block)
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A,
hypre_ParCSRMatrix * B )
{
hypre_ParCSRMatrix * C;
HYPRE_BigInt * col_map_offd_C = NULL;
HYPRE_Int num_procs, my_id, p;
MPI_Comm comm = hypre_ParCSRMatrixComm( A );
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A );
hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A );
hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A );
hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A );
hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B )
== hypre_ParCSRMatrixFirstRowIndex( A ) );
hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A );
hypre_ParCSRMatrixOwnsRowStarts( C ) = 0;
hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A );
hypre_ParCSRMatrixOwnsColStarts( C ) = 0;
for ( p=0; p<=num_procs; ++p )
hypre_assert( hypre_ParCSRMatrixColStarts(A)
== hypre_ParCSRMatrixColStarts(B) );
hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A );
hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A );
hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A );
hypre_ParCSRMatrixDiag( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B),
0, 0, 0 );
hypre_ParCSRMatrixOffd( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C );
hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C;
hypre_ParCSRMatrixCommPkg( C ) = NULL;
hypre_ParCSRMatrixCommPkgT( C ) = NULL;
hypre_ParCSRMatrixOwnsData( C ) = 1;
/* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce.
I suspect, but don't know, that other parts of hypre do not assume that
the correct values have been set.
hypre_ParCSRMatrixSetNumNonzeros( C );
hypre_ParCSRMatrixSetDNumNonzeros( C );*/
hypre_ParCSRMatrixNumNonzeros( C ) = 0;
hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0;
hypre_ParCSRMatrixRowindices( C ) = NULL;
hypre_ParCSRMatrixRowvalues( C ) = NULL;
hypre_ParCSRMatrixGetrowactive( C ) = 0;
return C;
}
/* drop the entries that are not on the diagonal and smaller than
* its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int type)
{
HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *marker_offd = NULL;
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int my_id, num_procs;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (tol <= 0.0)
{
return hypre_error_flag;
}
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0;
for (i = 0; i < nrow_local; i++)
{
/* compute row norm */
HYPRE_Real row_nrm = 0.0;
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Complex v = A_diag_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Complex v = A_offd_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
}
if (type == 2)
{
row_nrm = sqrt(row_nrm);
}
/* drop small entries based on tol and row norm */
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Int col = A_diag_j[j];
HYPRE_Complex val = A_diag_a[j];
if (i == col || fabs(val) >= tol * row_nrm)
{
A_diag_j[nnz_diag] = col;
A_diag_a[nnz_diag] = val;
nnz_diag ++;
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Int col = A_offd_j[j];
HYPRE_Complex val = A_offd_a[j];
/* in normal cases: diagonal entry should not
* appear in A_offd (but this can still be possible) */
if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm)
{
if (0 == marker_offd[col])
{
marker_offd[col] = 1;
}
A_offd_j[nnz_offd] = col;
A_offd_a[nnz_offd] = val;
nnz_offd ++;
}
}
}
A_diag_i_i = A_diag_i[i+1];
A_offd_i_i = A_offd_i[i+1];
A_diag_i[i+1] = nnz_diag;
A_offd_i[i+1] = nnz_offd;
}
hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag;
hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd;
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i])
{
col_map_offd_A[k] = col_map_offd_A[i];
marker_offd[i] = k++;
}
}
/* num_cols_A_offd = k; */
hypre_CSRMatrixNumCols(A_offd) = k;
for (i = 0; i < nnz_offd; i++)
{
A_offd_j[i] = marker_offd[A_offd_j[i]];
}
if ( hypre_ParCSRMatrixCommPkg(A) )
{
hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) );
}
hypre_MatvecCommPkgCreate(A);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Perform dual truncation of ParCSR matrix.
* This code is adapted from original BoomerAMGInterpTruncate()
* A: parCSR matrix to be modified
* tol: relative tolerance or truncation factor for dropping small terms
* max_row_elmts: maximum number of (largest) nonzero elements to keep.
* rescale: Boolean on whether or not to scale resulting matrix. Scaling for
* each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping),
* this way, the application of the truncated matrix on a constant vector is the same as that of
* the original matrix.
* nrm_type: type of norm used for dropping with tol.
* -- 0 = infinity-norm
* -- 1 = 1-norm
* -- 2 = 2-norm
*/
HYPRE_Int
hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int max_row_elmts,
HYPRE_Int rescale,
HYPRE_Int nrm_type)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime();
#endif
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_j_new;
HYPRE_Real *A_diag_data_new;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j_new;
HYPRE_Real *A_offd_data_new;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int i, j, start_j;
HYPRE_Int ierr = 0;
HYPRE_Int next_open;
HYPRE_Int now_checking;
HYPRE_Int num_lost;
HYPRE_Int num_lost_global=0;
HYPRE_Int next_open_offd;
HYPRE_Int now_checking_offd;
HYPRE_Int num_lost_offd;
HYPRE_Int num_lost_global_offd;
HYPRE_Int A_diag_size;
HYPRE_Int A_offd_size;
HYPRE_Int num_elmts;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Real row_nrm;
HYPRE_Real drop_coeff;
HYPRE_Real row_sum;
HYPRE_Real scale;
HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag);
HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd);
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * cum_lost_per_thread;
HYPRE_Int * num_lost_per_thread;
HYPRE_Int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. A_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, A_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine / num_threads) * my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine / num_threads) * (my_thread_num + 1);
}
/*
* Truncate based on truncation tolerance
*/
if (tol > 0)
{
num_lost = 0;
num_lost_offd = 0;
next_open = A_diag_i[start];
now_checking = A_diag_i[start];
next_open_offd = A_offd_i[start];;
now_checking_offd = A_offd_i[start];;
for (i = start; i < stop; i++)
{
row_nrm = 0;
/* compute norm for dropping small terms */
if (nrm_type == 0)
{
/* infty-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
row_nrm = (row_nrm < fabs(A_diag_data[j])) ?
fabs(A_diag_data[j]) : row_nrm;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_nrm = (row_nrm < fabs(A_offd_data[j])) ?
fabs(A_offd_data[j]) : row_nrm;
}
}
if (nrm_type == 1)
{
/* 1-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
row_nrm += fabs(A_diag_data[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_nrm += fabs(A_offd_data[j]);
}
}
if (nrm_type == 2)
{
/* 2-norm */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
HYPRE_Complex v = A_diag_data[j];
row_nrm += v*v;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
HYPRE_Complex v = A_offd_data[j];
row_nrm += v*v;
}
row_nrm = sqrt(row_nrm);
}
drop_coeff = tol * row_nrm;
start_j = A_diag_i[i];
if (num_lost)
{
A_diag_i[i] -= num_lost;
}
row_sum = 0;
scale = 0;
for (j = start_j; j < A_diag_i[i+1]; j++)
{
row_sum += A_diag_data[now_checking];
if (fabs(A_diag_data[now_checking]) < drop_coeff)
{
num_lost++;
now_checking++;
}
else
{
scale += A_diag_data[now_checking];
A_diag_data[next_open] = A_diag_data[now_checking];
A_diag_j[next_open] = A_diag_j[now_checking];
now_checking++;
next_open++;
}
}
start_j = A_offd_i[i];
if (num_lost_offd)
{
A_offd_i[i] -= num_lost_offd;
}
for (j = start_j; j < A_offd_i[i+1]; j++)
{
row_sum += A_offd_data[now_checking_offd];
if (fabs(A_offd_data[now_checking_offd]) < drop_coeff)
{
num_lost_offd++;
now_checking_offd++;
}
else
{
scale += A_offd_data[now_checking_offd];
A_offd_data[next_open_offd] = A_offd_data[now_checking_offd];
A_offd_j[next_open_offd] = A_offd_j[now_checking_offd];
now_checking_offd++;
next_open_offd++;
}
}
/* scale row of A */
if (rescale && scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++)
{
A_diag_data[j] *= scale;
}
for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++)
{
A_offd_data[j] *= scale;
}
}
}
} /* end loop for (i = 0; i < n_fine; i++) */
/* store number of dropped elements and number of threads */
if (my_thread_num == 0)
{
max_num_threads[0] = num_threads;
}
num_lost_per_thread[my_thread_num] = num_lost;
num_lost_offd_per_thread[my_thread_num] = num_lost_offd;
} /* end if (trunc_factor > 0) */
/*
* Truncate based on capping the nnz per row
*
*/
if (max_row_elmts > 0)
{
HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd;
HYPRE_Int *A_aux_j;
HYPRE_Real *A_aux_data;
/* find maximum row length locally over this row range */
A_mxnum = 0;
for (i=start; i<stop; i++)
{
/* Note A_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = A_diag_i[i+1];
last_index_offd = A_offd_i[i+1];
if (i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i];
if (cnt1 > A_mxnum)
{
A_mxnum = cnt1;
}
}
/* Some rows exceed max_row_elmts, and require truncation. Essentially,
* each thread truncates and compresses its range of rows locally. */
if (A_mxnum > max_row_elmts)
{
num_lost = 0;
num_lost_offd = 0;
/* two temporary arrays to hold row i for temporary operations */
A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST);
A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST);
cnt_diag = A_diag_i[start];
cnt_offd = A_offd_i[start];
for (i = start; i < stop; i++)
{
/* Note A_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = A_diag_i[i+1];
last_index_offd = A_offd_i[i+1];
if (i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
row_sum = 0;
num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i];
if (max_row_elmts < num_elmts)
{
/* copy both diagonal and off-diag parts of row i to _aux_ arrays */
cnt = 0;
for (j = A_diag_i[i]; j < last_index; j++)
{
A_aux_j[cnt] = A_diag_j[j];
A_aux_data[cnt++] = A_diag_data[j];
row_sum += A_diag_data[j];
}
num_lost += cnt;
cnt1 = cnt;
for (j = A_offd_i[i]; j < last_index_offd; j++)
{
A_aux_j[cnt] = A_offd_j[j]+num_cols;
A_aux_data[cnt++] = A_offd_data[j];
row_sum += A_offd_data[j];
}
num_lost_offd += cnt-cnt1;
/* sort data */
hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1);
scale = 0;
if (i > start)
{
A_diag_i[i] = cnt_diag;
A_offd_i[i] = cnt_offd;
}
for (j = 0; j < max_row_elmts; j++)
{
scale += A_aux_data[j];
if (A_aux_j[j] < num_cols)
{
A_diag_j[cnt_diag] = A_aux_j[j];
A_diag_data[cnt_diag++] = A_aux_data[j];
}
else
{
A_offd_j[cnt_offd] = A_aux_j[j]-num_cols;
A_offd_data[cnt_offd++] = A_aux_data[j];
}
}
num_lost -= cnt_diag-A_diag_i[i];
num_lost_offd -= cnt_offd-A_offd_i[i];
/* scale row of A */
if (rescale && (scale != 0.))
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = A_diag_i[i]; j < cnt_diag; j++)
{
A_diag_data[j] *= scale;
}
for (j = A_offd_i[i]; j < cnt_offd; j++)
{
A_offd_data[j] *= scale;
}
}
}
} /* end if (max_row_elmts < num_elmts) */
else
{
/* nothing dropped from this row, but still have to shift entries back
* by the number dropped so far */
if (A_diag_i[i] != cnt_diag)
{
start_j = A_diag_i[i];
A_diag_i[i] = cnt_diag;
for (j = start_j; j < last_index; j++)
{
A_diag_j[cnt_diag] = A_diag_j[j];
A_diag_data[cnt_diag++] = A_diag_data[j];
}
}
else
{
cnt_diag += last_index-A_diag_i[i];
}
if (A_offd_i[i] != cnt_offd)
{
start_j = A_offd_i[i];
A_offd_i[i] = cnt_offd;
for (j = start_j; j < last_index_offd; j++)
{
A_offd_j[cnt_offd] = A_offd_j[j];
A_offd_data[cnt_offd++] = A_offd_data[j];
}
}
else
{
cnt_offd += last_index_offd-A_offd_i[i];
}
}
} /* end for (i = 0; i < n_fine; i++) */
num_lost_per_thread[my_thread_num] += num_lost;
num_lost_offd_per_thread[my_thread_num] += num_lost_offd;
hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST);
} /* end if (A_mxnum > max_row_elmts) */
} /* end if (max_row_elmts > 0) */
/* Sum up num_lost_global */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
num_lost_global = 0;
num_lost_global_offd = 0;
for (i = 0; i < max_num_threads[0]; i++)
{
num_lost_global += num_lost_per_thread[i];
num_lost_global_offd += num_lost_offd_per_thread[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
/* Each thread has it's own locally compressed CSR matrix from rows start
* to stop. Now, we have to copy each thread's chunk into the new
* process-wide CSR data structures
*
* First, we compute the new process-wide number of nonzeros (i.e.,
* A_diag_size), and compute cum_lost_per_thread[k] so that this
* entry holds the cumulative sum of entries dropped up to and
* including thread k. */
if (my_thread_num == 0)
{
A_diag_size = A_diag_i[n_fine];
for (i = 0; i < max_num_threads[0]; i++)
{
A_diag_size -= num_lost_per_thread[i];
if (i > 0)
{
cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1];
}
else
{
cum_lost_per_thread[i] = num_lost_per_thread[i];
}
}
A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag);
A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if (my_thread_num == 0)
{
next_open = 0;
}
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++)
{
A_diag_j_new[next_open] = A_diag_j[i];
A_diag_data_new[next_open] = A_diag_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update A_diag_i with number of dropped entries by all lower ranked
* threads */
if (my_thread_num > 0)
{
for (i=start; i<stop; i++)
{
A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if (my_thread_num == 0)
{
/* Set last entry */
A_diag_i[n_fine] = A_diag_size ;
hypre_TFree(A_diag_j, memory_location_diag);
hypre_TFree(A_diag_data, memory_location_diag);
hypre_CSRMatrixJ(A_diag) = A_diag_j_new;
hypre_CSRMatrixData(A_diag) = A_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size;
}
}
/*
* Synchronize and create new offd data structures
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (num_lost_global_offd)
{
/* Repeat process for off-diagonal */
if (my_thread_num == 0)
{
A_offd_size = A_offd_i[n_fine];
for (i = 0; i < max_num_threads[0]; i++)
{
A_offd_size -= num_lost_offd_per_thread[i];
if (i > 0)
{
cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1];
}
else
{
cum_lost_per_thread[i] = num_lost_offd_per_thread[i];
}
}
A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd);
A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if (my_thread_num == 0)
{
next_open = 0;
}
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++)
{
A_offd_j_new[next_open] = A_offd_j[i];
A_offd_data_new[next_open] = A_offd_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update A_offd_i with number of dropped entries by all lower ranked
* threads */
if (my_thread_num > 0)
{
for (i=start; i<stop; i++)
{
A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if (my_thread_num == 0)
{
/* Set last entry */
A_offd_i[n_fine] = A_offd_size ;
hypre_TFree(A_offd_j, memory_location_offd);
hypre_TFree(A_offd_data, memory_location_offd);
hypre_CSRMatrixJ(A_offd) = A_offd_j_new;
hypre_CSRMatrixData(A_offd) = A_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime();
#endif
return ierr;
}
|
CrossValidation.h
|
#ifndef CROSSVAL_INCLUDED__H
#define CROSSVAL_INCLUDED__H
#include"LinearAlgebra.h"
#include"Regression.h"
#include"Model.h"
#include<vector>
#include<algorithm>
#include<cmath>
#include<omp.h>
struct CrossValidation{
std::vector< double > errors;
std::vector< double > parameters;
double selected;
};
template< class T, class KernelFunction, class TrainFunction, class ErrorFunction >
CrossValidation crossValidate( std::vector<T>& x, LinearAlgebra::Vector& y, std::vector<double> parameters, const KernelFunction& f_kernel, const TrainFunction& f_train, const ErrorFunction& f_error, unsigned long kfold ){
unsigned long n = y.size();
//randomly permute x and y
std::vector< unsigned long > indices;
for( unsigned long i = 0 ; i < n ; ++i ){
indices.push_back( i );
}
std::random_shuffle( indices.begin(), indices.end() );
std::vector<T> x_permuted( n );
LinearAlgebra::Vector y_permuted( n );
for( unsigned long i = 0 ; i < n ; ++i ){
x_permuted[i] = x[ indices[i] ];
y_permuted[i] = y[ indices[i] ];
}
std::vector< double > errors( parameters.size(), 0 );
//perform cross validation
#pragma omp parallel for
for( unsigned long j = 0 ; j < kfold ; ++j ){
unsigned long start = j * n / kfold;
unsigned long end = ( j + 1 ) * n / kfold;
std::vector<T> x_train;
std::vector<T> x_test;
for( unsigned long i = 0 ; i < n ; ++i ){
if( i >= start && i < end ){
x_test.push_back( x[i] );
}else{
x_train.push_back( x[i] );
}
}
LinearAlgebra::Vector y_train( x_train.size() );
LinearAlgebra::Vector y_test( x_test.size() );
unsigned long train_idx = 0, test_idx = 0;
for( unsigned long i = 0 ; i < n ; ++i ){
if( i >= start && i < end ){
y_test[test_idx++] = y[i];
}else{
y_train[train_idx++] = y[i];
}
}
LinearAlgebra::Matrix k_train = Regression::kernelMatrix( x_train, f_kernel );
LinearAlgebra::Matrix k_test = Regression::kernelMatrix( x_test, x_train, f_kernel );
for( unsigned long i = 0 ; i < parameters.size() ; ++i ){
double param = parameters[i];
const Model<LinearAlgebra::Matrix>& model = f_train( k_train, y_train, param );
LinearAlgebra::Vector y_hat = model.predict( k_test );
double error = f_error( y_test, y_hat ) / (double) kfold;
#pragma omp atomic
errors[i] += error;
}
}
//find best parameter and return it
double best = 0;
double min_error = INFINITY;
for( unsigned long i = 0 ; i < parameters.size() ; ++i ){
if( errors[i] < min_error ){
min_error = errors[i];
best = parameters[i];
}
}
CrossValidation xval;
xval.errors = errors;
xval.parameters = parameters;
xval.selected = best;
return xval;
}
/* TODO
template< class TrainFunction, class ErrorFunction >
double crossValidate( LinearAlgebra::Matrix& x, LinearAlgebra::Vector& y, std::vector<double> parameters, const TrainFunction& f_train, const ErrorFunction& f_error, unsigned long kfold, bool x_is_kernel = false ){
unsigned long n = y.size();
//randomly permute the x matrix and y vector
Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic> perm( n );
perm.setIdentity();
std::random_shuffle( perm.indices().data(), perm.indices().data() + perm.indices().size() );
if( x_is_kernel ){
x = perm.transpose() * x * perm;
}else{
x = perm.transpose() * x;
}
y = perm.transpose() * y;
double best = 0;
double best_score = INFINITY;
//perform cross validation
for( unsigned long i = 0 ; i < parameters.size() ; ++i ){
double param = parameters[i];
double error = 0;
for( unsigned long j = 0 ; j < kfold ; ++j ){
unsigned long start = j * n / kfold;
unsigned long length = ( j + 1 ) * n / kfold - start;
unsigned long colstart, collength;
if( x_is_kernel ){
colstart = start;
collength = length;
}else{
colstart = 0;
collength = x.cols();
}
const Model<LinearAlgebra::Matrix>& model = f_train( x.block( start, colstart, length, collength ), y.segment( start, length ), param );
}
}
//unpermute x and y
perm = perm.inverse();
if( x_is_kernel ){
x = perm.transpose() * x * perm;
}else{
x = perm.transpose() * x;
}
y = perm.transpose() * y;
}
*/
#endif
|
owl_ndarray_pool_impl.h
|
/*
* OWL - OCaml Scientific Computing
* Copyright (c) 2016-2022 Liang Wang <[email protected]>
*/
#ifdef OWL_ENABLE_TEMPLATE
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int pr = 0, pc = 0;
if (padding != 1){
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, c);
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (spatial_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
const int ksize = kernel_cols * kernel_rows;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
if (pad_cols < 0) pad_cols = 0;
if (pad_rows < 0) pad_rows = 0;
memset(input_backward_ptr, 0,
batches * input_cols * input_rows * in_channel * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
idx[c++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < c; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, c);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, c);
#endif
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (cuboid) (
value vInput, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
memset(output_ptr, 0, batches * output_crdi * sizeof(TYPE));
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c){
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
counter++;
}
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, counter);
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid) (value * argv, int argn) {
return FUN_NATIVE (cuboid) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vCol_stride, value vRow_stride, value vDpt_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int col_stride = Long_val(vCol_stride);
int row_stride = Long_val(vRow_stride);
int dpt_stride = Long_val(vDpt_stride);
int padding = Long_val(vPadding);
const int ksize = kernel_cols * kernel_rows * kernel_dpts;
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
memset(input_backward_ptr, 0, batches * input_crdi * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
idx[counter++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < counter; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, counter);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, counter);
#endif
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
#ifdef OWL_NDARRAY_MAX
CAMLprim value FUN_NATIVE (spatial_arg) (
value vInput_ptr, value vOutput_ptr, value vArgmax_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
struct caml_ba_array *AG = Caml_ba_array_val(vArgmax_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int64_t *argmax_ptr = (int64_t *) AG->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
if (pad_rows < 0) pad_rows = 0.;
if (pad_cols < 0) pad_cols = 0.;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
memset(argmax_ptr, 0, batches * output_cri * sizeof(int64_t));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int max_idx = -1;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = acc;
*(argmax_ptr + output_idx) = (int64_t) max_idx;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_arg) (value * argv, int argn) {
return FUN_NATIVE (spatial_arg) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
#endif /* OWL_NDARRAY_MAX */
#endif /* OWL_ENABLE_TEMPLATE */
|
collatzSieve2toK_FindPatterns_reduceTo1.c
|
/* ******************************************
Find deltaN and count numbers to be tested for a 2^k sieve
that is to me used to find maximum number of steps to 1.
For n = A 2^k + B, A>0 must be true when using this sieve.
Compile and run via something like...
clang -O3 collatzSieve2toK_FindPatterns_reduceTo1.c -fopenmp
time ./a.out |tee -a log26.txt &
To remove any checkpoints once the code is fully completed, run...
rm temp*
To configure OpenMP, you can change the argument of num_threads().
Just search this code for "num_threads" (appears twice).
I wouldn't do more threads than physical CPU cores
to prevent bottlenecks due to sharing resources.
k < 81 must be true.
You'll need a 64-bit computer with a not-ancient version of gcc
in order for __uint128_t to work.
Collatz conjecture is the following...
Repeated application of following f eventually reduces any n>1 integer to 1.
f(n) = (3*n+1)/2 if n%2 = 1
f(n) = n/2 if n%2 = 0
(c) 2021 Bradley Knockel
****************************************** */
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/time.h>
struct timeval tv1, tv2;
/*
To use less RAM, break job into tasks
Will use a bit more than 2^(TASK_SIZE + 4) bytes of RAM
TASK_SIZE <= k
If you set this larger than k, TASK_SIZE = k will be used
*/
int TASK_SIZE = 26;
const int k = 23;
/*
If TASK_SIZE < k, deltaN must not be more than 2^TASK_SIZE
This is because hold[] and holdC[] cannot be longer than the arrays.
*/
const __uint128_t deltaN = 1398101;
// how many checkpoints to save out ?
// will save (2^log2saves - 1) times
// log2saves must be less than or equal to k - TASK_SIZE
const int log2saves = 0;
// which checkpoint to load? 0 if none.
// If loading, don't change any parameters of this code except this one!!
// If loading, will load the files named temp#____, where loadCheckpoint is the #.
const uint64_t loadCheckpoint = 0;
/*
2^K is the number of numbers processed by each CPU thread
for the SECOND use of OpenMP
K <= TASK_SIZE
K should ideally be a good amount less than TASK_SIZE
*/
int32_t K = 6;
// Prints __uint128_t numbers since printf("%llu\n", x) doesn't work
// since "long long" is only 64-bit in gcc.
// This function works for any non-negative integer less than 128 bits.
void print128(__uint128_t n) {
char a[40] = { '\0' };
char *p = a + 39;
if (n==0) { *--p = (char)('0'); }
else { for (; n != 0; n /= 10) *--p = (char)('0' + n % 10); }
printf("%s", p);
}
uint32_t pow3(size_t n)
{
uint32_t r = 1;
uint32_t b = 3;
while (n) {
if (n & 1) {
r *= b;
}
b *= b;
n >>= 1;
}
return r;
}
#define LUT_SIZE32 21
uint32_t lut[LUT_SIZE32];
#define min(a,b) (((a)<(b))?(a):(b))
/*
for updating arrayLarge[] and arrayIncreases[]
I used code from...
https://github.com/xbarin02/collatz/blob/master/src/gpuworker/kernel32-precalc.cl
*/
void kSteps(__uint128_t arrayLarge[], uint8_t arrayIncreases[], uint64_t task_id) {
#pragma omp parallel for schedule(guided) num_threads(6)
for (size_t index = 0; index < ((size_t)1 << TASK_SIZE); ++index) {
__uint128_t L0 = index + (task_id << TASK_SIZE);
__uint128_t L = L0;
uint32_t R = k; /* counter */
size_t Salpha = 0; /* sum of alpha */
if (L == 0) goto next;
do {
L++;
do {
size_t alpha;
if ((uint64_t)L == 0) alpha = 64; // __builtin_ctzll(0) is undefined
else alpha = __builtin_ctzll(L);
alpha = min(alpha, (size_t)LUT_SIZE32 - 1);
alpha = min(alpha, (size_t)R);
R -= alpha;
L >>= alpha;
L *= lut[alpha];
Salpha += alpha;
if (R == 0) {
L--;
goto next;
}
} while (!(L & 1));
L--;
do {
size_t beta;
if ((uint64_t)L == 0) beta = 64; // __builtin_ctzll(0) is undefined
else beta = __builtin_ctzll(L);
beta = min(beta, (size_t)R);
R -= beta;
L >>= beta;
if (R == 0) goto next;
} while (!(L & 1));
} while (1);
next:
arrayLarge[index] = L;
arrayIncreases[index] = (uint8_t)Salpha;
}
}
int main(void) {
if (TASK_SIZE > k) TASK_SIZE = k;
if (K > TASK_SIZE) K = TASK_SIZE;
int j;
__uint128_t count, n0start, maxM;
// for doing stuff for finding unique patterns
int32_t nn, mm, oo;
const __uint128_t k2 = (__uint128_t)1 << k; // 2^k
const uint64_t bits = (uint64_t)1 << K; // 2^K
printf(" k = %i\n", k);
printf(" deltaN = ");
print128(deltaN);
printf("\n");
fflush(stdout);
// start timing
gettimeofday(&tv1, NULL);
/* setup */
size_t arrayLargeCount = (size_t)1 << TASK_SIZE;
size_t arrayIncreasesCount = (size_t)1 << TASK_SIZE;
const size_t patternsPerArray = arrayIncreasesCount / bits;
__uint128_t *arrayLarge = malloc(sizeof(__uint128_t) * arrayLargeCount);
uint8_t *arrayIncreases = malloc(sizeof(uint8_t) * arrayIncreasesCount);
if ( arrayLarge == NULL || arrayIncreases == NULL ) {
return -1;
}
uint64_t taskIDmax = ((uint64_t)1 << (k - TASK_SIZE)); // the number of tasks run
uint64_t tasksPerSave = taskIDmax / ((uint64_t)1 << log2saves);
uint64_t taskGroups = taskIDmax / tasksPerSave;
for (size_t i = 0; i < LUT_SIZE32; ++i) lut[i] = pow3(i);
//// Look for unique patterns, find max deltaN, and
//// count numbers that need testing.
fflush(stdout);
n0start = loadCheckpoint * tasksPerSave * arrayIncreasesCount;
count = 0;
maxM = 0; // see if deltaN is ever really reached
double timePrior = 0; // from before loaded checkpoint
// temporarily hold the last deltaN 128-bit integers of old task (also hold the last deltaN 8-bit increases)
__uint128_t *hold = malloc(sizeof(__uint128_t) * deltaN);
uint8_t *holdC = malloc(sizeof(uint8_t) * deltaN);
if (loadCheckpoint) {
FILE *fp;
char path[4096];
sprintf(path, "temp%" PRIu64 "count", loadCheckpoint);
fp = fopen(path, "rb");
fread(&count, sizeof(__uint128_t), 1, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "maxM", loadCheckpoint);
fp = fopen(path, "rb");
fread(&maxM, sizeof(__uint128_t), 1, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "hold", loadCheckpoint);
fp = fopen(path, "rb");
fread(hold, sizeof(__uint128_t), deltaN, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "holdC", loadCheckpoint);
fp = fopen(path, "rb");
fread(holdC, sizeof(uint8_t), deltaN, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "timeElapsed", loadCheckpoint);
fp = fopen(path, "rb");
fread(&timePrior, sizeof(double), 1, fp);
fclose(fp);
printf(" Checkpoint %" PRIu64 " loaded\n", loadCheckpoint);
}
// for OpenMP threads to write to
// I probably should use uint64_t instead ??
uint32_t *collectCount = malloc(sizeof(uint32_t) * patternsPerArray);
uint32_t *collectMaxM = malloc(sizeof(uint32_t) * patternsPerArray);
// run task_id in groups and save after each group
for (uint64_t task_id_group = loadCheckpoint; task_id_group < taskGroups; task_id_group++) {
// loop over task_id to repeatedly call kSteps() and analyze results
for (uint64_t task_id = task_id_group * tasksPerSave; task_id < (task_id_group + 1) * tasksPerSave; task_id++) {
/* run kSteps() to update arrayLarge[] and arrayIncreases[] */
kSteps(arrayLarge, arrayIncreases, task_id);
#pragma omp parallel for schedule(guided) num_threads(6)
for (size_t iPattern = 0; iPattern < patternsPerArray; iPattern++) {
uint32_t countTiny = 0;
uint32_t maxMtiny = maxM;
for (size_t index = iPattern*bits; index < (iPattern + 1)*bits; index++) {
int temp = 1;
// the number being tested
__uint128_t n0 = n0start + index;
if (n0 > 0) {
// for seeing if n joins the previous number, nm
__uint128_t n = arrayLarge[index];
__uint128_t nm;
// for seeing if the number of increases, c, equals the previous increases, cm
int c = (int)arrayIncreases[index];
int cm;
__uint128_t lenList = ((deltaN+1) < (n0-1)) ? (deltaN+1) : (n0-1) ; // get min(deltaN+1, n0-1)
for(size_t m=1; m<lenList; m++) {
if ( index >= m ) {
nm = arrayLarge[index - m];
cm = (int)arrayIncreases[index - m];
} else {
if (task_id == 0) break;
size_t iAdjusted = deltaN + index - m;
nm = hold[iAdjusted];
cm = (int)holdC[iAdjusted];
}
if ( nm == n && cm == c ) {
if(m > maxMtiny){
maxMtiny = m;
}
temp = 0;
break;
}
}
}
if (temp) countTiny++;
}
collectCount[iPattern] = countTiny;
collectMaxM[iPattern] = maxMtiny;
}
n0start += arrayIncreasesCount;
for (size_t iPattern = 0; iPattern < patternsPerArray; iPattern++) {
count += collectCount[iPattern];
uint32_t maxMtiny = collectMaxM[iPattern];
if (maxMtiny > maxM) maxM = maxMtiny;
}
// fill hold[] and holdC[] with arrayLarge[] and arrayIncreases[], respectively
if ( task_id < taskIDmax - 1 ) {
for (size_t i=0; i < deltaN; i++)
hold[i] = arrayLarge[arrayLargeCount - deltaN + i];
for (size_t i=0; i < deltaN; i++)
holdC[i] = arrayIncreases[arrayIncreasesCount - deltaN + i];
}
}
// save everything out!
if ( task_id_group < taskGroups - 1 ) {
FILE *fp;
char path[4096];
sprintf(path, "temp%" PRIu64 "count", task_id_group + 1);
fp = fopen(path, "wb");
fwrite(&count, sizeof(__uint128_t), 1, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "maxM", task_id_group + 1);
fp = fopen(path, "wb");
fwrite(&maxM, sizeof(__uint128_t), 1, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "hold", task_id_group + 1);
fp = fopen(path, "wb");
fwrite(hold, sizeof(__uint128_t), deltaN, fp);
fclose(fp);
sprintf(path, "temp%" PRIu64 "holdC", task_id_group + 1);
fp = fopen(path, "wb");
fwrite(holdC, sizeof(uint8_t), deltaN, fp);
fclose(fp);
gettimeofday(&tv2, NULL);
double timeElapsed = (double)(tv2.tv_usec - tv1.tv_usec) / 1000000.0 + (double)(tv2.tv_sec - tv1.tv_sec);
timeElapsed += timePrior;
sprintf(path, "temp%" PRIu64 "timeElapsed", task_id_group + 1);
fp = fopen(path, "wb");
fwrite(&timeElapsed, sizeof(double), 1, fp);
fclose(fp);
}
}
gettimeofday(&tv2, NULL);
printf(" Elapsed wall time is %e seconds\n",
(double)(tv2.tv_usec - tv1.tv_usec) / 1000000.0 + (double)(tv2.tv_sec - tv1.tv_sec) + timePrior );
print128(count);
printf(" out of ");
print128(k2);
printf(" need testing, so %f\n", (double)count / (double)k2);
if (maxM > 0) {
printf(" max deltaN = ");
print128(maxM);
printf("\n");
}
printf("\n");
free(hold);
free(holdC);
free(collectCount);
free(collectMaxM);
free(arrayLarge);
free(arrayIncreases);
return 0;
}
|
mscash1_fmt_plug.c
|
/* MSCASH patch for john (performance improvement)
*
* Modified for utf-8 support by magnum in 2011, same terms as below
*
* Written by Alain Espinosa <alainesp at gmail.com> in 2007. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2007 Alain Espinosa and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mscash;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mscash);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#include "options.h"
#include "loader.h"
#include "johnswap.h"
#include "mscash_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 192
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mscash"
#define FORMAT_NAME "MS Cache Hash (DCC)"
#define ALGORITHM_NAME "MD4 32/" ARCH_BITS_STR
#define PLAINTEXT_LENGTH 27
#define SALT_SIZE (11*4)
#define OK_NUM_KEYS 64
#define BEST_NUM_KEYS 512
#ifdef _OPENMP
#define MS_NUM_KEYS OK_NUM_KEYS
#else
#define MS_NUM_KEYS BEST_NUM_KEYS
#endif
#define MIN_KEYS_PER_CRYPT OK_NUM_KEYS
#define MAX_KEYS_PER_CRYPT MS_NUM_KEYS
static unsigned int *ms_buffer1x;
static unsigned int *output1x;
static unsigned int *crypt_out;
static unsigned int *last;
static unsigned int *last_i;
static unsigned int *salt_buffer;
static unsigned int new_key;
//Init values
#define INIT_A 0x67452301
#define INIT_B 0xefcdab89
#define INIT_C 0x98badcfe
#define INIT_D 0x10325476
#define SQRT_2 0x5a827999
#define SQRT_3 0x6ed9eba1
static void set_key_utf8(char *_key, int index);
static void set_key_encoding(char *_key, int index);
static void * get_salt_utf8(char *_ciphertext);
static void * get_salt_encoding(char *_ciphertext);
struct fmt_main fmt_mscash;
#if !ARCH_LITTLE_ENDIAN
static inline void swap(unsigned int *x, int count)
{
while (count--) {
*x = JOHNSWAP(*x);
x++;
}
}
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
fmt_mscash.params.max_keys_per_crypt *= omp_t;
#endif
ms_buffer1x = mem_calloc(sizeof(ms_buffer1x[0]), 16*fmt_mscash.params.max_keys_per_crypt);
output1x = mem_calloc(sizeof(output1x[0]) , 4*fmt_mscash.params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(crypt_out[0]) , 4*fmt_mscash.params.max_keys_per_crypt);
last = mem_calloc(sizeof(last[0]) , 4*fmt_mscash.params.max_keys_per_crypt);
last_i = mem_calloc(sizeof(last_i[0]) , fmt_mscash.params.max_keys_per_crypt);
new_key=1;
mscash1_adjust_tests(self, options.target_enc, PLAINTEXT_LENGTH,
set_key_utf8, set_key_encoding, get_salt_utf8, get_salt_encoding);
}
static void done(void)
{
MEM_FREE(last_i);
MEM_FREE(last);
MEM_FREE(crypt_out);
MEM_FREE(output1x);
MEM_FREE(ms_buffer1x);
}
static void set_salt(void *salt) {
salt_buffer=salt;
}
static void *get_salt(char *_ciphertext)
{
unsigned char *ciphertext = (unsigned char *)_ciphertext;
// length=11 for save memory
// position 10 = length
// 0-9 = 1-19 Unicode characters + EOS marker (0x80)
static unsigned int *out=0;
unsigned int md4_size;
if (!out) out = mem_alloc_tiny(11*sizeof(unsigned int), MEM_ALIGN_WORD);
memset(out,0,11*sizeof(unsigned int));
ciphertext+=2;
for(md4_size = 0 ;; md4_size++)
if(md4_size < 19 && ciphertext[md4_size]!='#')
{
md4_size++;
out[md4_size>>1] = ciphertext[md4_size-1] | ((ciphertext[md4_size]!='#') ? (ciphertext[md4_size]<<16) : 0x800000);
if(ciphertext[md4_size]=='#')
break;
}
else
{
out[md4_size>>1] = 0x80;
break;
}
out[10] = (8 + md4_size) << 4;
// dump_stuff(out, 44);
return out;
}
static void *get_salt_encoding(char *_ciphertext) {
unsigned char *ciphertext = (unsigned char *)_ciphertext;
unsigned char input[19*3+1];
int utf16len, md4_size;
static UTF16 *out=0;
if (!out) out = mem_alloc_tiny(22*sizeof(UTF16), MEM_ALIGN_WORD);
memset(out, 0, 22*sizeof(UTF16));
ciphertext += 2;
for (md4_size=0;md4_size<sizeof(input)-1;md4_size++) {
if (ciphertext[md4_size] == '#')
break;
input[md4_size] = ciphertext[md4_size];
}
input[md4_size] = 0;
utf16len = enc_to_utf16(out, 19, input, md4_size);
if (utf16len < 0)
utf16len = strlen16(out);
#if ARCH_LITTLE_ENDIAN
out[utf16len] = 0x80;
#else
out[utf16len] = 0x8000;
swap((unsigned int*)out, (md4_size>>1)+1);
#endif
((unsigned int*)out)[10] = (8 + utf16len) << 4;
// dump_stuff(out, 44);
return out;
}
static void * get_salt_utf8(char *_ciphertext)
{
unsigned char *ciphertext = (unsigned char *)_ciphertext;
unsigned int md4_size;
UTF16 ciphertext_utf16[21];
int len;
static ARCH_WORD_32 *out=0;
if (!out) out = mem_alloc_tiny(11*sizeof(ARCH_WORD_32), MEM_ALIGN_WORD);
memset(out, 0, 11*sizeof(ARCH_WORD_32));
ciphertext+=2;
len = ((unsigned char*)strchr((char*)ciphertext, '#')) - ciphertext;
utf8_to_utf16(ciphertext_utf16, 20, ciphertext, len+1);
for(md4_size = 0 ;; md4_size++) {
#if !ARCH_LITTLE_ENDIAN
ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8);
#endif
if(md4_size < 19 && ciphertext_utf16[md4_size]!=(UTF16)'#') {
md4_size++;
#if !ARCH_LITTLE_ENDIAN
ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8);
#endif
out[md4_size>>1] = ciphertext_utf16[md4_size-1] |
((ciphertext_utf16[md4_size]!=(UTF16)'#') ?
(ciphertext_utf16[md4_size]<<16) : 0x800000);
if(ciphertext_utf16[md4_size]==(UTF16)'#')
break;
}
else {
out[md4_size>>1] = 0x80;
break;
}
}
out[10] = (8 + md4_size) << 4;
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned int out[BINARY_SIZE/sizeof(unsigned int)];
unsigned int i=0;
unsigned int temp;
unsigned int *salt=fmt_mscash.methods.salt(ciphertext);
for(;ciphertext[0]!='#';ciphertext++);
ciphertext++;
for(; i<4 ;i++)
{
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24;
out[i]=temp;
}
out[0] -= INIT_A;
out[1] -= INIT_B;
out[2] -= INIT_C;
out[3] -= INIT_D;
// Reversed b += (c ^ d ^ a) + salt_buffer[11] + SQRT_3; b = (b << 15) | (b >> 17);
out[1] = (out[1] >> 15) | (out[1] << 17);
out[1] -= SQRT_3 + (out[2] ^ out[3] ^ out[0]);
// Reversed c += (d ^ a ^ b) + salt_buffer[3] + SQRT_3; c = (c << 11) | (c >> 21);
out[2] = (out[2] << 21) | (out[2] >> 11);
out[2]-= SQRT_3 + (out[3] ^ out[0] ^ out[1]) + salt[3];
// Reversed d += (a ^ b ^ c) + salt_buffer[7] + SQRT_3; d = (d << 9 ) | (d >> 23);
out[3] = (out[3] << 23) | (out[3] >> 9);
out[3] -= SQRT_3 + (out[0] ^ out[1] ^ out[2]) + salt[7];
//+ SQRT_3; d = (d << 9 ) | (d >> 23);
out[3]=(out[3] << 23 ) | (out[3] >> 9);
out[3]-=SQRT_3;
return out;
}
static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_6; }
static int get_hash_0(int index) { return output1x[4*index+3] & PH_MASK_0; }
static int get_hash_1(int index) { return output1x[4*index+3] & PH_MASK_1; }
static int get_hash_2(int index) { return output1x[4*index+3] & PH_MASK_2; }
static int get_hash_3(int index) { return output1x[4*index+3] & PH_MASK_3; }
static int get_hash_4(int index) { return output1x[4*index+3] & PH_MASK_4; }
static int get_hash_5(int index) { return output1x[4*index+3] & PH_MASK_5; }
static int get_hash_6(int index) { return output1x[4*index+3] & PH_MASK_6; }
static void nt_hash(int count)
{
int i;
#if MS_NUM_KEYS > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)
#endif
for (i = 0; i < count; i++)
{
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
/* Round 1 */
a = 0xFFFFFFFF + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29);
d = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25);
c = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21);
b = INIT_B + (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19);
/* Round 3 */
a += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17);
crypt_out[4*i+0] = a + INIT_A;
crypt_out[4*i+1] = b + INIT_B;
crypt_out[4*i+2] = c + INIT_C;
crypt_out[4*i+3] = d + INIT_D;
//Another MD4_crypt for the salt
/* Round 1 */
a= 0xFFFFFFFF +crypt_out[4*i+0]; a=(a<<3 )|(a>>29);
d=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25);
c=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21);
b=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13);
last[4*i+0]=a;
last[4*i+1]=b;
last[4*i+2]=c;
last[4*i+3]=d;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i;
if(new_key)
{
new_key=0;
nt_hash(count);
}
#if MS_NUM_KEYS > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)
#endif
for(i = 0; i < count; i++)
{
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
a = last[4*i+0];
b = last[4*i+1];
c = last[4*i+2];
d = last[4*i+3];
a += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19);
/* Round 3 */
a += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[5];
output1x[4*i+0]=a;
output1x[4*i+1]=b;
output1x[4*i+2]=c;
output1x[4*i+3]=d;
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int i=0;
unsigned int d=((unsigned int *)binary)[3];
for(;i<count;i++)
if(d==output1x[i*4+3])
return 1;
return 0;
}
static int cmp_one(void * binary, int index)
{
unsigned int *t=(unsigned int *)binary;
unsigned int a=output1x[4*index+0];
unsigned int b=output1x[4*index+1];
unsigned int c=output1x[4*index+2];
unsigned int d=output1x[4*index+3];
if(d!=t[3])
return 0;
d+=SQRT_3;d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[1] + SQRT_3; c = (c << 11) | (c >> 21);
if(c!=t[2])
return 0;
b += (c ^ d ^ a) + salt_buffer[9] + SQRT_3; b = (b << 15) | (b >> 17);
if(b!=t[1])
return 0;
a += (b ^ c ^ d) + crypt_out[4*index+3]+ SQRT_3; a = (a << 3 ) | (a >> 29);
return (a==t[0]);
}
static int cmp_exact(char *source, int index)
{
// This check is for the unreal case of collisions.
// It verifies that the salts are the same.
unsigned int *salt=fmt_mscash.methods.salt(source);
unsigned int i=0;
for(;i<11;i++)
if(salt[i]!=salt_buffer[i])
return 0;
return 1;
}
// This is common code for the SSE/MMX/generic variants of non-UTF8 set_key
static inline void set_key_helper(unsigned int * keybuffer,
unsigned int xBuf,
const unsigned char * key,
unsigned int lenStoreOffset,
unsigned int *last_length)
{
unsigned int i=0;
unsigned int md4_size=0;
for(; key[md4_size] && md4_size < PLAINTEXT_LENGTH; i += xBuf, md4_size++)
{
unsigned int temp;
if ((temp = key[++md4_size]))
{
keybuffer[i] = key[md4_size-1] | (temp << 16);
}
else
{
keybuffer[i] = key[md4_size-1] | 0x800000;
goto key_cleaning;
}
}
keybuffer[i] = 0x80;
key_cleaning:
i += xBuf;
for(;i <= *last_length; i += xBuf)
keybuffer[i] = 0;
*last_length = (md4_size >> 1)+1;
keybuffer[lenStoreOffset] = md4_size << 4;
}
static void set_key(char *_key, int index)
{
set_key_helper(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// UTF-8 conversion right into key buffer
// This is common code for the SSE/MMX/generic variants
static inline void set_key_helper_utf8(unsigned int * keybuffer, unsigned int xBuf,
const UTF8 * source, unsigned int lenStoreOffset, unsigned int *lastlen)
{
unsigned int *target = keybuffer;
UTF32 chl, chh = 0x80;
unsigned int outlen = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 0:
break;
default:
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
outlen++;
if (chl > UNI_MAX_BMP) {
if (outlen == PLAINTEXT_LENGTH) {
chh = 0x80;
*target = (chh << 16) | chl;
target += xBuf;
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
outlen++;
} else if (*source && outlen < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 0:
break;
default:
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
outlen++;
} else {
chh = 0x80;
*target = chh << 16 | chl;
target += xBuf;
break;
}
*target = chh << 16 | chl;
target += xBuf;
}
if (chh != 0x80 || outlen == 0) {
*target = 0x80;
target += xBuf;
}
while(target < &keybuffer[*lastlen]) {
*target = 0;
target += xBuf;
}
*lastlen = ((outlen >> 1) + 1) * xBuf;
keybuffer[lenStoreOffset] = outlen << 4;
}
static void set_key_utf8(char *_key, int index)
{
set_key_helper_utf8(&ms_buffer1x[index << 4], 1, (UTF8 *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// This is common code for the SSE/MMX/generic variants of non-UTF8 non-ISO-8859-1 set_key
static inline void set_key_helper_encoding(unsigned int * keybuffer,
unsigned int xBuf,
const unsigned char * key,
unsigned int lenStoreOffset,
unsigned int *last_length)
{
unsigned int i=0;
int md4_size;
md4_size = enc_to_utf16( (UTF16 *)keybuffer, PLAINTEXT_LENGTH, (UTF8 *) key, strlen((char*)key));
if (md4_size < 0)
md4_size = strlen16((UTF16 *)keybuffer);
#if ARCH_LITTLE_ENDIAN
((UTF16*)keybuffer)[md4_size] = 0x80;
#else
((UTF16*)keybuffer)[md4_size] = 0x8000;
#endif
((UTF16*)keybuffer)[md4_size+1] = 0;
#if !ARCH_LITTLE_ENDIAN
((UTF16*)keybuffer)[md4_size+2] = 0;
#endif
i = md4_size>>1;
i += xBuf;
for(;i <= *last_length; i += xBuf)
keybuffer[i] = 0;
#if !ARCH_LITTLE_ENDIAN
swap(keybuffer, (md4_size>>1)+1);
#endif
*last_length = (md4_size >> 1) + 1;
keybuffer[lenStoreOffset] = md4_size << 4;
}
static void set_key_encoding(char *_key, int index)
{
set_key_helper_encoding(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// Get the key back from the key buffer, from UCS-2 LE
static char *get_key(int index)
{
static union {
UTF16 u16[PLAINTEXT_LENGTH + 1];
unsigned int u32[(PLAINTEXT_LENGTH + 1 + 1) / 2];
} key;
unsigned int * keybuffer = &ms_buffer1x[index << 4];
unsigned int md4_size;
unsigned int i=0;
int len = keybuffer[14] >> 4;
for(md4_size = 0; md4_size < len; i++, md4_size += 2)
{
#if ARCH_LITTLE_ENDIAN
key.u16[md4_size] = keybuffer[i];
key.u16[md4_size+1] = keybuffer[i] >> 16;
#else
key.u16[md4_size] = keybuffer[i] >> 16;
key.u16[md4_size+1] = keybuffer[i];
#endif
}
#if !ARCH_LITTLE_ENDIAN
swap(key.u32, md4_size >> 1);
#endif
key.u16[len] = 0x00;
return (char *)utf16_to_enc(key.u16);
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = salt;
unsigned int hash = 5381;
while (*s != 0x80)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mscash = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{ NULL },
mscash1_common_tests
}, {
init,
done,
fmt_default_reset,
mscash1_common_prepare,
mscash1_common_valid,
mscash1_common_split,
get_binary, // NOTE, not using the 'common' binary function.
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt.c
|
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__lt_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16)
// A*D function (colscale): GB (_AxD__lt_uint16)
// D*A function (rowscale): GB (_DxB__lt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16)
// C=scalar+B GB (_bind1st__lt_uint16)
// C=scalar+B' GB (_bind1st_tran__lt_uint16)
// C=A+scalar GB (_bind2nd__lt_uint16)
// C=A'+scalar GB (_bind2nd_tran__lt_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__lnot_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_fp32_fp32
// op(A') function: GB_unop_tran__lnot_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = !(z != 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LAGraph_bfs_pushpull.c
|
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact [email protected] for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
#include "LAGraph_bfs_pushpull.h"
#include "../config.h"
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#define LAGRAPH_FREE_ALL \
{ \
GrB_Vector_free (&v) ; \
GrB_Vector_free (&t) ; \
GrB_Vector_free (&q) ; \
GrB_Vector_free (&pi) ; \
}
#define LAGRAPH_ERROR(message,info) \
{ \
fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \
message, info, __FILE__, __LINE__) ; \
LAGRAPH_FREE_ALL ; \
return (info) ; \
}
#define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y))
#define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y))
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
) {
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if(v_output == NULL || (A == NULL && AT == NULL)) {
// required output argument is missing
LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_sc = GrB_DESC_SC ;
GrB_Descriptor desc_rc = GrB_DESC_RC ;
GrB_Descriptor desc_r = GrB_DESC_R ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
// A is provided. AT may or may not be provided
GrB_Matrix_nrows(&nrows, A) ;
GrB_Matrix_ncols(&ncols, A) ;
GrB_Matrix_nvals(&nvalA, A) ;
bool use_vxm_with_A = true ;
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if(nrows != ncols) {
// A must be square
LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = Config_GetOMPThreadCount();
nthreads = LAGRAPH_MIN(n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX(nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
GrB_Vector_new(&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ;
if(!vsparse) {
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
GrB_Vector_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if(compute_tree) {
// create an integer vector q, and set q(source) to source+1
GrB_Vector_new(&q, int_type, n) ;
GrB_Vector_setElement_UINT64(q, source + 1, source) ;
if(n > INT32_MAX) {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
} else {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
}
// create the empty parent vector
GrB_Vector_new(&pi, int_type, n) ;
if(!vsparse) {
// make pi a dense vector of all zeros
GrB_Vector_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
GrB_Vector_setElement_UINT64(pi, source + 1, source) ;
} else {
// create a boolean vector q, and set q(source) to true
GrB_Vector_new(&q, GrB_BOOL, n) ;
GrB_Vector_setElement_UINT64(q, true, source) ;
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for(int64_t level = 1 ; ; level++) {
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
GrB_Vector_assign_UINT64(v, q, NULL, level, GrB_ALL, n, desc_s) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if(nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if(vsparse && nvisited > vlimit) {
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
GrB_Vector_assign_UINT64(v, v, NULL, 0, GrB_ALL, n, desc_sc) ;
GrB_Vector_nvals(&ignore, v) ;
if(compute_tree) {
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
GrB_Vector_assign_UINT64(pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ;
GrB_Vector_nvals(&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if(push_pull) {
double pushwork = d * nq ;
double expected = (double) n / (double)(nvisited + 1) ;
double per_dot = LAGRAPH_MIN(d, expected) ;
double binarysearch = (3 * (1 + log2((double) nq))) ;
double pullwork = (n - nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if(use_vxm_with_A) {
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
GrB_vxm(q, v, NULL, first_semiring, q, A, desc_rc) ;
} else {
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
GrB_mxv(q, v, NULL, second_semiring, AT, q, desc_rc) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if(compute_tree) {
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
GrB_Vector_assign_UINT64(pi, q, NULL, q, GrB_ALL, n, desc_s) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
GrB_Index *qi ;
if(n > INT32_MAX) {
int64_t *qx ;
GxB_Vector_export(&q, &int_type, &n, &nq, &qi,
(void **)(&qx), NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int64_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import(&q, int_type, n, nq, &qi,
(void **)(&qx), NULL) ;
} else {
int32_t *qx ;
GxB_Vector_export(&q, &int_type, &n, &nq, &qi,
(void **)(&qx), NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int32_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import(&q, int_type, n, nq, &qi,
(void **)(&qx), NULL) ;
}
} else {
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
GrB_Vector_nvals(&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if(compute_tree) {
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
GB_unaryop__one_int16_int16.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_int16_int16
// op(A') function: GB_tran__one_int16_int16
// C type: int16_t
// A type: int16_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_paje.c
|
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include <math.h>
#include "ParSHUM_solver.h"
#include "ParSHUM_matrix.h"
#include "ParSHUM_dense.h"
#include "ParSHUM_enum.h"
#include "ParSHUM_pivot_list.h"
#include "ParSHUM_auxiliary.h"
int
main(int argc, char **argv)
{
ParSHUM_solver self;
self = ParSHUM_solver_create();
ParSHUM_solver_parse_args(self, argc, argv, 1);
ParSHUM_solver_read_matrix(self);
ParSHUM_solver_init(self);
printf("%d !\n",omp_get_thread_num());
#pragma omp parallel num_threads(self->exe_parms->nb_threads) //proc_bind(spread)
{
ParSHUM_verbose_trace_start_event(self->verbose, 0);
sleep(1);
ParSHUM_verbose_trace_stop_event(self->verbose);
sleep(2);
ParSHUM_verbose_trace_start_event(self->verbose, 1);
sleep(1);
ParSHUM_verbose_trace_stop_event(self->verbose);
}
ParSHUM_solver_finalize(self);
ParSHUM_solver_destroy(self);
return 0;
}
|
dgesv.c
|
/**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgesv.c, normal z -> d, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
******************************************************************************/
int plasma_dgesv(int n, int nrhs,
double *pA, int lda, int *ipiv,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaRealDouble, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dgesv(A, ipiv, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
******************************************************************************/
void plasma_omp_dgesv(plasma_desc_t A, int *ipiv,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid B");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pdgetrf(A, ipiv, sequence, request);
plasma_pdgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A,
B,
sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A,
B,
sequence, request);
}
|
GB_binop__atan2_fp32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__atan2_fp32
// A.*B function (eWiseMult): GB_AemultB__atan2_fp32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__atan2_fp32
// C+=b function (dense accum): GB_Cdense_accumb__atan2_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__atan2_fp32
// C=scalar+B GB_bind1st__atan2_fp32
// C=scalar+B' GB_bind1st_tran__atan2_fp32
// C=A+scalar GB_bind2nd__atan2_fp32
// C=A'+scalar GB_bind2nd_tran__atan2_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = atan2f (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = atan2f (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN2 || GxB_NO_FP32 || GxB_NO_ATAN2_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__atan2_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__atan2_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__atan2_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__atan2_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__atan2_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__atan2_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = atan2f (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__atan2_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = atan2f (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = atan2f (x, aij) ; \
}
GrB_Info GB_bind1st_tran__atan2_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = atan2f (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__atan2_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
composite.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static Image *BlendConvolveImage(const Image *image,const char *kernel,
ExceptionInfo *exception)
{
Image
*clone_image,
*convolve_image;
KernelInfo
*kernel_info;
/*
Convolve image with a kernel.
*/
kernel_info=AcquireKernelInfo(kernel,exception);
if (kernel_info == (KernelInfo *) NULL)
return((Image *) NULL);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(clone_image,OffAlphaChannel,exception);
convolve_image=ConvolveImage(clone_image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
clone_image=DestroyImage(clone_image);
return(convolve_image);
}
static Image *BlendMagnitudeImage(const Image *dx_image,const Image *dy_image,
ExceptionInfo *exception)
{
CacheView
*dx_view,
*dy_view,
*magnitude_view;
Image
*magnitude_image;
MagickBooleanType
status = MagickTrue;
ssize_t
y;
/*
Generate the magnitude between two images.
*/
magnitude_image=CloneImage(dx_image,0,0,MagickTrue,exception);
if (magnitude_image == (Image *) NULL)
return(magnitude_image);
dx_view=AcquireVirtualCacheView(dx_image,exception);
dy_view=AcquireVirtualCacheView(dy_image,exception);
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(dx_image,magnitude_image,dx_image->rows,1)
#endif
for (y=0; y < (ssize_t) dx_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(dx_view,0,y,dx_image->columns,1,exception);
q=GetCacheViewVirtualPixels(dy_view,0,y,dx_image->columns,1,exception);
r=GetCacheViewAuthenticPixels(magnitude_view,0,y,dx_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) dx_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(dx_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(dx_image,i);
PixelTrait traits = GetPixelChannelTraits(dx_image,channel);
PixelTrait dy_traits = GetPixelChannelTraits(dy_image,channel);
if ((traits == UndefinedPixelTrait) ||
(dy_traits == UndefinedPixelTrait) ||
((dy_traits & UpdatePixelTrait) == 0))
continue;
r[i]=ClampToQuantum(hypot((double) p[i],(double)
GetPixelChannel(dy_image,channel,q)));
}
p+=GetPixelChannels(dx_image);
q+=GetPixelChannels(dy_image);
r+=GetPixelChannels(magnitude_image);
}
if (SyncCacheViewAuthenticPixels(magnitude_view,exception) == MagickFalse)
status=MagickFalse;
}
magnitude_view=DestroyCacheView(magnitude_view);
dy_view=DestroyCacheView(dy_view);
dx_view=DestroyCacheView(dx_view);
if (status == MagickFalse)
magnitude_image=DestroyImage(magnitude_image);
return(magnitude_image);
}
static Image *BlendMaxMagnitudeImage(const Image *alpha_image,
const Image *beta_image,const Image *dx_image,const Image *dy_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view,
*dx_view,
*dy_view,
*magnitude_view;
Image
*magnitude_image;
MagickBooleanType
status = MagickTrue;
ssize_t
y;
/*
Select the larger of two magnitudes.
*/
magnitude_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (magnitude_image == (Image *) NULL)
return(magnitude_image);
alpha_view=AcquireVirtualCacheView(alpha_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
dx_view=AcquireVirtualCacheView(dx_image,exception);
dy_view=AcquireVirtualCacheView(dy_image,exception);
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(alpha_image,magnitude_image,alpha_image->rows,1)
#endif
for (y=0; y < (ssize_t) alpha_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q,
*magick_restrict r,
*magick_restrict s;
Quantum
*magick_restrict t;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(alpha_view,0,y,alpha_image->columns,1,
exception);
q=GetCacheViewVirtualPixels(beta_view,0,y,alpha_image->columns,1,exception);
r=GetCacheViewVirtualPixels(dx_view,0,y,alpha_image->columns,1,exception);
s=GetCacheViewVirtualPixels(dy_view,0,y,alpha_image->columns,1,exception);
t=GetCacheViewAuthenticPixels(magnitude_view,0,y,alpha_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (const Quantum *) NULL) || (s == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) alpha_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(alpha_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(alpha_image,i);
PixelTrait traits = GetPixelChannelTraits(alpha_image,channel);
PixelTrait beta_traits = GetPixelChannelTraits(beta_image,channel);
if ((traits == UndefinedPixelTrait) ||
(beta_traits == UndefinedPixelTrait) ||
((beta_traits & UpdatePixelTrait) == 0))
continue;
if (p[i] > GetPixelChannel(beta_image,channel,q))
t[i]=GetPixelChannel(dx_image,channel,r);
else
t[i]=GetPixelChannel(dy_image,channel,s);
}
p+=GetPixelChannels(alpha_image);
q+=GetPixelChannels(beta_image);
r+=GetPixelChannels(dx_image);
s+=GetPixelChannels(dy_image);
t+=GetPixelChannels(magnitude_image);
}
if (SyncCacheViewAuthenticPixels(magnitude_view,exception) == MagickFalse)
status=MagickFalse;
}
magnitude_view=DestroyCacheView(magnitude_view);
dy_view=DestroyCacheView(dy_view);
dx_view=DestroyCacheView(dx_view);
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
magnitude_image=DestroyImage(magnitude_image);
return(magnitude_image);
}
static Image *BlendSumImage(const Image *alpha_image,const Image *beta_image,
const double attenuate,const double sign,ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view,
*sum_view;
Image
*sum_image;
MagickBooleanType
status = MagickTrue;
ssize_t
y;
/*
Add or subtract and optionally attenuate two images.
*/
sum_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (sum_image == (Image *) NULL)
return(sum_image);
alpha_view=AcquireVirtualCacheView(alpha_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
sum_view=AcquireAuthenticCacheView(sum_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(alpha_image,sum_image,alpha_image->rows,1)
#endif
for (y=0; y < (ssize_t) alpha_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(alpha_view,0,y,alpha_image->columns,1,
exception);
q=GetCacheViewVirtualPixels(beta_view,0,y,alpha_image->columns,1,exception);
r=GetCacheViewAuthenticPixels(sum_view,0,y,alpha_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) alpha_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(alpha_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(alpha_image,i);
PixelTrait traits = GetPixelChannelTraits(alpha_image,channel);
PixelTrait beta_traits = GetPixelChannelTraits(beta_image,channel);
if ((traits == UndefinedPixelTrait) ||
(beta_traits == UndefinedPixelTrait) ||
((beta_traits & UpdatePixelTrait) == 0))
continue;
r[i]=ClampToQuantum(attenuate*(p[i]+sign*
GetPixelChannel(beta_image,channel,q)));
}
p+=GetPixelChannels(alpha_image);
q+=GetPixelChannels(beta_image);
r+=GetPixelChannels(sum_image);
}
if (SyncCacheViewAuthenticPixels(sum_view,exception) == MagickFalse)
status=MagickFalse;
}
sum_view=DestroyCacheView(sum_view);
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
sum_image=DestroyImage(sum_image);
return(sum_image);
}
static Image *BlendDivergentImage(const Image *alpha_image,
const Image *beta_image,ExceptionInfo *exception)
{
#define FreeDivergentResources() \
{ \
if (dy_image != (Image *) NULL) \
dy_image=DestroyImage(dy_image); \
if (dx_image != (Image *) NULL) \
dx_image=DestroyImage(dx_image); \
if (magnitude_beta != (Image *) NULL) \
magnitude_beta=DestroyImage(magnitude_beta); \
if (dy_beta != (Image *) NULL) \
dy_beta=DestroyImage(dy_beta); \
if (dx_beta != (Image *) NULL) \
dx_beta=DestroyImage(dx_beta); \
if (magnitude_alpha != (Image *) NULL) \
magnitude_alpha=DestroyImage(magnitude_alpha); \
if (dy_alpha != (Image *) NULL) \
dy_alpha=DestroyImage(dy_alpha); \
if (dx_alpha != (Image *) NULL) \
dx_alpha=DestroyImage(dx_alpha); \
}
Image
*divergent_image = (Image *) NULL,
*dx_alpha = (Image *) NULL,
*dx_beta = (Image *) NULL,
*dx_divergent = (Image *) NULL,
*dx_image = (Image *) NULL,
*dy_alpha = (Image *) NULL,
*dy_beta = (Image *) NULL,
*dy_divergent = (Image *) NULL,
*dy_image = (Image *) NULL,
*magnitude_alpha = (Image *) NULL,
*magnitude_beta = (Image *) NULL;
/*
Create X and Y gradient images for alpha image and the magnitude.
*/
dx_alpha=BlendConvolveImage(alpha_image,"3x1:-0.5,0.0,0.5",exception);
if (dx_alpha == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
dy_alpha=BlendConvolveImage(alpha_image,"1x3:-0.5,0.0,0.5",exception);
if (dy_alpha == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
magnitude_alpha=BlendMagnitudeImage(dx_alpha,dy_alpha,exception);
if (magnitude_alpha == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
/*
Create X and Y gradient images for beta and the magnitude.
*/
dx_beta=BlendConvolveImage(beta_image,"3x1:-0.5,0.0,0.5",exception);
if (dx_beta == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
dy_beta=BlendConvolveImage(beta_image,"1x3:-0.5,0.0,0.5",exception);
if (dy_beta == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
magnitude_beta=BlendMagnitudeImage(dx_beta,dy_beta,exception);
if (magnitude_beta == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
/*
Select alpha or beta gradient for larger of two magnitudes.
*/
dx_image=BlendMaxMagnitudeImage(magnitude_alpha,magnitude_beta,dx_alpha,
dx_beta,exception);
if (dx_image == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
dy_image=BlendMaxMagnitudeImage(magnitude_alpha,magnitude_beta,dy_alpha,
dy_beta,exception);
if (dy_image == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
dx_beta=DestroyImage(dx_beta);
dx_alpha=DestroyImage(dx_alpha);
magnitude_beta=DestroyImage(magnitude_beta);
magnitude_alpha=DestroyImage(magnitude_alpha);
/*
Create divergence of gradients dx and dy and divide by 4 as guide image.
*/
dx_divergent=BlendConvolveImage(dx_image,"3x1:-0.5,0.0,0.5",exception);
if (dx_divergent == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
dy_divergent=BlendConvolveImage(dy_image,"1x3:-0.5,0.0,0.5",exception);
if (dy_divergent == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
divergent_image=BlendSumImage(dx_divergent,dy_divergent,0.25,1.0,exception);
dy_divergent=DestroyImage(dy_divergent);
dx_divergent=DestroyImage(dx_divergent);
if (divergent_image == (Image *) NULL)
{
FreeDivergentResources();
return((Image *) NULL);
}
FreeDivergentResources();
return(divergent_image);
}
static MagickBooleanType BlendMaskAlphaChannel(Image *image,
const Image *mask_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*mask_view;
MagickBooleanType
status = MagickTrue;
ssize_t
y;
/*
Threshold the alpha channel.
*/
if (SetImageAlpha(image,OpaqueAlpha,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
mask_view=AcquireVirtualCacheView(mask_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
alpha = GetPixelAlpha(mask_image,p);
ssize_t
i = GetPixelChannelOffset(image,AlphaPixelChannel);
if (fabs((double) alpha) >= MagickEpsilon)
q[i]=(Quantum) 0;
p+=GetPixelChannels(mask_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *BlendMeanImage(Image *image,const Image *mask_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*mask_view,
*mean_view;
double
mean[MaxPixelChannels];
Image
*mean_image;
MagickBooleanType
status = MagickTrue;
ssize_t
j,
y;
/*
Compute the mean of the image.
*/
(void) memset(mean,0,MaxPixelChannels*sizeof(*mean));
alpha_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(alpha_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
mean[i]+=QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
alpha_view=DestroyCacheView(alpha_view);
if (y < (ssize_t) image->rows)
return((Image *) NULL);
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
mean[j]=(double) QuantumRange*mean[j]/image->columns/
image->rows;
/*
Replace any unmasked pixels with the mean pixel.
*/
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return(mean_image);
mask_view=AcquireVirtualCacheView(mask_image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mean_image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
Quantum
alpha = GetPixelAlpha(mask_image,p),
mask = GetPixelReadMask(mask_image,p);
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(mean_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(mean_image,i);
PixelTrait traits = GetPixelChannelTraits(mean_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (mask <= (QuantumRange/2))
q[i]=(Quantum) 0;
else
if (fabs((double) alpha) >= MagickEpsilon)
q[i]=ClampToQuantum(mean[i]);
}
p+=GetPixelChannels(mask_image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
mean_view=DestroyCacheView(mean_view);
if (status == MagickFalse)
mean_image=DestroyImage(mean_image);
return(mean_image);
}
static MagickBooleanType BlendRMSEResidual(const Image *alpha_image,
const Image *beta_image,double *residual,ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view;
double
area = 0.0;
MagickBooleanType
status = MagickTrue;
size_t
columns = MagickMax(alpha_image->columns,beta_image->columns),
rows = MagickMax(alpha_image->rows,beta_image->rows);
ssize_t
y;
*residual=0.0;
alpha_view=AcquireVirtualCacheView(alpha_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(alpha_image,alpha_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
double
channel_residual;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(alpha_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(beta_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
channel_residual=0.0;
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(alpha_image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(beta_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(alpha_image);
q+=GetPixelChannels(beta_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(alpha_image,p);
Da=QuantumScale*GetPixelAlpha(beta_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(alpha_image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(alpha_image,i);
PixelTrait traits = GetPixelChannelTraits(alpha_image,channel);
PixelTrait beta_traits = GetPixelChannelTraits(beta_image,channel);
if ((traits == UndefinedPixelTrait) ||
(beta_traits == UndefinedPixelTrait) ||
((beta_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(beta_image,channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(beta_image,channel,
q));
channel_residual+=distance*distance;
}
local_area++;
p+=GetPixelChannels(alpha_image);
q+=GetPixelChannels(beta_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlendRMSEResidual)
#endif
{
area+=local_area;
*residual+=channel_residual;
}
}
area=PerceptibleReciprocal(area);
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
*residual=sqrt(*residual*area/(double) GetImageChannels(alpha_image));
return(status);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static MagickBooleanType SaliencyBlendImage(Image *image,
const Image *source_image,const ssize_t x_offset,const ssize_t y_offset,
const double iterations,const double residual_threshold,const size_t tick,
ExceptionInfo *exception)
{
Image
*crop_image,
*divergent_image,
*relax_image,
*residual_image = (Image *) NULL;
KernelInfo
*kernel_info;
MagickBooleanType
status = MagickTrue,
verbose = MagickFalse;
RectangleInfo
crop_info = {
source_image->columns,
source_image->rows,
x_offset,
y_offset
};
ssize_t
i;
/*
Saliency blend composite operator.
*/
crop_image=CropImage(image,&crop_info,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
divergent_image=BlendDivergentImage(source_image,crop_image,exception);
if (divergent_image == (Image *) NULL)
{
crop_image=DestroyImage(crop_image);
return(MagickFalse);
}
(void) ResetImagePage(crop_image,"0x0+0+0");
relax_image=BlendMeanImage(crop_image,source_image,exception);
if (relax_image == (Image *) NULL)
{
divergent_image=DestroyImage(divergent_image);
return(MagickFalse);
}
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
{
relax_image=DestroyImage(relax_image);
return(MagickFalse);
}
/*
Convolve relaxed image and blur area of interest.
*/
kernel_info=AcquireKernelInfo("3x3:0,0.25,0,0.25,0,0.25,0,0.25,0",exception);
if (kernel_info == (KernelInfo *) NULL)
{
residual_image=DestroyImage(residual_image);
relax_image=DestroyImage(relax_image);
return(MagickFalse);
}
verbose=IsStringTrue(GetImageArtifact(image,"verbose"));
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"saliency blending:\n");
for (i=0; i < (ssize_t) iterations; i++)
{
double
residual = 1.0;
Image
*convolve_image,
*sum_image;
convolve_image=ConvolveImage(relax_image,kernel_info,exception);
if (convolve_image == (Image *) NULL)
break;
relax_image=DestroyImage(relax_image);
relax_image=convolve_image;
sum_image=BlendSumImage(relax_image,divergent_image,1.0,-1.0,exception);
if (sum_image == (Image *) NULL)
break;
relax_image=DestroyImage(relax_image);
relax_image=sum_image;
status=BlendRMSEResidual(relax_image,residual_image,&residual,exception);
if (status == MagickFalse)
break;
if ((verbose != MagickFalse) && ((i % MagickMax(tick,1)) == 0))
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double) residual);
if (residual < residual_threshold)
{
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double)
residual);
break;
}
residual_image=DestroyImage(residual_image);
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
break;
}
kernel_info=DestroyKernelInfo(kernel_info);
divergent_image=DestroyImage(divergent_image);
residual_image=DestroyImage(residual_image);
/*
Composite relaxed over the background image.
*/
status=CompositeOverImage(image,relax_image,MagickTrue,x_offset,y_offset,
exception);
return(status);
}
static MagickBooleanType SeamlessBlendImage(Image *image,
const Image *source_image,const ssize_t x_offset,const ssize_t y_offset,
const double iterations,const double residual_threshold,const size_t tick,
ExceptionInfo *exception)
{
Image
*crop_image,
*foreground_image,
*mean_image,
*relax_image,
*residual_image,
*sum_image;
KernelInfo
*kernel_info;
MagickBooleanType
status = MagickTrue,
verbose = MagickFalse;
RectangleInfo
crop_info = {
source_image->columns,
source_image->rows,
x_offset,
y_offset
};
ssize_t
i;
/*
Seamless blend composite operator.
*/
crop_image=CropImage(image,&crop_info,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
(void) ResetImagePage(crop_image,"0x0+0+0");
sum_image=BlendSumImage(crop_image,source_image,1.0,-1.0,exception);
crop_image=DestroyImage(crop_image);
if (sum_image == (Image *) NULL)
return(MagickFalse);
mean_image=BlendMeanImage(sum_image,source_image,exception);
sum_image=DestroyImage(sum_image);
if (mean_image == (Image *) NULL)
return(MagickFalse);
relax_image=CloneImage(mean_image,0,0,MagickTrue,exception);
if (relax_image == (Image *) NULL)
{
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
status=BlendMaskAlphaChannel(mean_image,source_image,exception);
if (status == MagickFalse)
{
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
{
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
/*
Convolve relaxed image and blur area of interest.
*/
kernel_info=AcquireKernelInfo("3x3:0,0.25,0,0.25,0,0.25,0,0.25,0",exception);
if (kernel_info == (KernelInfo *) NULL)
{
residual_image=DestroyImage(residual_image);
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
verbose=IsStringTrue(GetImageArtifact(image,"verbose"));
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"seamless blending:\n");
for (i=0; i < (ssize_t) iterations; i++)
{
double
residual = 1.0;
Image
*convolve_image;
convolve_image=ConvolveImage(relax_image,kernel_info,exception);
if (convolve_image == (Image *) NULL)
break;
relax_image=DestroyImage(relax_image);
relax_image=convolve_image;
status=CompositeOverImage(relax_image,mean_image,MagickTrue,0,0,exception);
if (status == MagickFalse)
break;
status=BlendRMSEResidual(relax_image,residual_image,&residual,exception);
if (status == MagickFalse)
break;
if ((verbose != MagickFalse) && ((i % MagickMax(tick,1)) == 0))
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double) residual);
if (residual < residual_threshold)
{
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double)
residual);
break;
}
if (residual_image != (Image *) NULL)
residual_image=DestroyImage(residual_image);
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
break;
}
kernel_info=DestroyKernelInfo(kernel_info);
mean_image=DestroyImage(mean_image);
residual_image=DestroyImage(residual_image);
/*
Composite the foreground image over the background image.
*/
foreground_image=BlendSumImage(source_image,relax_image,1.0,1.0,exception);
relax_image=DestroyImage(relax_image);
if (foreground_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageMask(foreground_image,ReadPixelMask,(const Image *) NULL,
exception);
status=CompositeOverImage(image,foreground_image,MagickTrue,x_offset,y_offset,
exception);
foreground_image=DestroyImage(foreground_image);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
compose_sync,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
compose_sync=MagickTrue;
value=GetImageArtifact(image,"compose:sync");
if (value != (const char *) NULL)
compose_sync=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
double
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling dictated by an overlay gradient map:
X = red_channel; Y = green_channel; compose:args =
x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the users
input the ellipse size needs to be doubled.
*/
width=2.0*geometry_info.rho;
height=width;
if ((flags & HeightValue) != 0)
height=2.0*geometry_info.sigma;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
/*
Rotate vectors if a rotation angle is given.
*/
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
/*
Lets set a angle range and calculate in the loop.
*/
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry! The
solution (as well as the problem of preventing any user expert filter
settings, is to set our own user settings, restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/*
Perform the variable blurring of each pixel in image.
*/
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case SaliencyBlendCompositeOp:
{
double
residual_threshold = 0.0002,
iterations = 400.0;
size_t
tick = 100;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
iterations=geometry_info.rho;
if ((flags & SigmaValue) != 0)
residual_threshold=geometry_info.sigma;
if ((flags & XiValue) != 0)
tick=(size_t) geometry_info.xi;
}
status=SaliencyBlendImage(image,composite,x_offset,y_offset,iterations,
residual_threshold,tick,exception);
source_image=DestroyImage(source_image);
return(status);
}
case SeamlessBlendCompositeOp:
{
double
residual_threshold = 0.0002,
iterations = 400.0;
size_t
tick = 100;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
iterations=geometry_info.rho;
if ((flags & SigmaValue) != 0)
residual_threshold=geometry_info.sigma;
if ((flags & XiValue) != 0)
tick=(size_t) geometry_info.xi;
}
status=SeamlessBlendImage(image,composite,x_offset,y_offset,iterations,
residual_threshold,tick,exception);
source_image=DestroyImage(source_image);
return(status);
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
if (flags == NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",value);
}
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case FreezeCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case InterpolateCompositeOp:
case LightenCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case NegateCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ReflectCompositeOp:
case ScreenCompositeOp:
case SoftBurnCompositeOp:
case SoftDodgeCompositeOp:
case SoftLightCompositeOp:
case StampCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case BumpmapCompositeOp:
{
pixel=GetPixelIntensity(source_image,p)*Da;
break;
}
case ChangeMaskCompositeOp:
{
if (IsFuzzyEquivalencePixel(source_image,p,image,q) != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=QuantumRange*Da;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) <
GetPixelIntensity(image,q) ? Sa : Da;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs((double) (Sa-Da));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)*
PerceptibleReciprocal(Da));
if (pixel < 0.0)
pixel=0.0;
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25*
cos(MagickPI*Da));
break;
}
case LightenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) >
GetPixelIntensity(image,q) ? Sa : Da;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*Sa*Da;
break;
}
pixel=QuantumRange*alpha;
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*((1.0-Sa-Da));
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sa+Da*Da-1.0);
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if (compose_sync == MagickFalse)
{
pixel=MagickMin(Sc,Dc);
break;
}
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) <
GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=fabs((double) Sc-Dc);
break;
}
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*(Sc/PerceptibleReciprocal(Dc));
break;
}
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*(Dc/PerceptibleReciprocal(Sc));
break;
}
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)*
PerceptibleReciprocal(Dca));
if (pixel < 0.0)
pixel=0.0;
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25*
cos(MagickPI*Dca));
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=MagickMax(Sc,Dc);
break;
}
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) >
GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
if (compose_sync == MagickFalse)
{
pixel=geometry_info.rho*Sc*Dc+geometry_info.sigma*Sc+
geometry_info.xi*Dc+geometry_info.psi;
break;
}
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=Dc-Sc;
break;
}
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
if (compose_sync == MagickFalse)
{
pixel=Sc-Dc;
break;
}
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Sc+Dc);
break;
}
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Sc-Dc);
break;
}
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumScale*Dc*Sc;
break;
}
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Dc+Sc);
break;
}
pixel=QuantumRange*(Sca+Dca);
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case RMSECompositeOp:
{
double
gray;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
gray=sqrt(
(canvas_pixel.red-source_pixel.red)*
(canvas_pixel.red-source_pixel.red)+
(canvas_pixel.green-source_pixel.green)*
(canvas_pixel.green-source_pixel.green)+
(canvas_pixel.blue-source_pixel.blue)*
(canvas_pixel.blue-source_pixel.blue)/3.0);
switch (channel)
{
case RedPixelChannel: pixel=gray; break;
case GreenPixelChannel: pixel=gray; break;
case BluePixelChannel: pixel=gray; break;
default: pixel=Dc; break;
}
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
if (compose_sync == MagickFalse)
{
pixel=Sc+Dc-Sc*Dc;
break;
}
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftBurnCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)*
PerceptibleReciprocal(Dca));
break;
}
case SoftDodgeCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)*
PerceptibleReciprocal(Sca));
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sca+Dca*Dca-1.0);
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p,
*pixels;
ssize_t
x;
Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
GB_Matrix_extractElement.c
|
//------------------------------------------------------------------------------
// GB_Matrix_extractElement: x = A(row,col)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Extract the value of single scalar, x = A(row,col), typecasting from the
// type of A to the type of x, as needed.
// Returns GrB_SUCCESS if A(row,col) is present, and sets x to its value.
// Returns GrB_NO_VALUE if A(row,col) is not present, and x is unmodified.
// This template constructs GrB_Matrix_extractElement_[TYPE] for each of the
// 13 built-in types, and the _UDT method for all user-defined types.
// FUTURE: tolerate zombies
GrB_Info GB_EXTRACT_ELEMENT // extract a single entry, x = A(row,col)
(
GB_XTYPE *x, // scalar to extract, not modified if not found
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index row, // row index
GrB_Index col // column index
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_NULL (x) ;
// TODO: do not wait unless jumbled. First try to find the element.
// If found (live or zombie), no need to wait. If not found and pending
// tuples exist, wait and then extractElement again.
// delete any lingering zombies, assemble any pending tuples, and unjumble
if (GB_ANY_PENDING_WORK (A))
{
GrB_Info info ;
GB_WHERE1 (GB_WHERE_STRING) ;
GB_BURBLE_START ("GrB_Matrix_extractElement") ;
GB_OK (GB_wait (A, "A", Context)) ;
GB_BURBLE_END ;
}
ASSERT (!GB_ANY_PENDING_WORK (A)) ;
// look for index i in vector j
int64_t i, j, nrows, ncols ;
if (A->is_csc)
{
i = row ;
j = col ;
nrows = A->vlen ;
ncols = A->vdim ;
}
else
{
i = col ;
j = row ;
nrows = A->vdim ;
ncols = A->vlen ;
}
// check row and column indices
if (row >= nrows || col >= ncols)
{
return (GrB_INVALID_INDEX) ;
}
// GB_XCODE and A must be compatible
GB_Type_code acode = A->type->code ;
if (!GB_code_compatible (GB_XCODE, acode))
{
return (GrB_DOMAIN_MISMATCH) ;
}
if (GB_nnz (A) == 0)
{
// quick return
return (GrB_NO_VALUE) ;
}
//--------------------------------------------------------------------------
// find the entry A(i,j)
//--------------------------------------------------------------------------
int64_t pleft ;
bool found ;
const int64_t *restrict Ap = A->p ;
if (Ap != NULL)
{
// A is sparse or hypersparse
const int64_t *restrict Ai = A->i ;
// extract from vector j of a GrB_Matrix
int64_t k ;
if (A->h != NULL)
{
// A is hypersparse: look for j in hyperlist A->h [0 ... A->nvec-1]
const int64_t *restrict Ah = A->h ;
int64_t pleft = 0 ;
int64_t pright = A->nvec-1 ;
GB_BINARY_SEARCH (j, Ah, pleft, pright, found) ;
if (!found)
{
// vector j is empty
return (GrB_NO_VALUE) ;
}
ASSERT (j == Ah [pleft]) ;
k = pleft ;
}
else
{
// A is sparse: j = k is the kth vector
k = j ;
}
pleft = Ap [k] ;
int64_t pright = Ap [k+1] - 1 ;
// binary search in kth vector for index i
// Time taken for this step is at most O(log(nnz(A(:,j))).
GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ;
}
else
{
// A is bitmap or full
pleft = i + j * A->vlen ;
const int8_t *restrict Ab = A->b ;
if (Ab != NULL)
{
// A is bitmap
found = (Ab [pleft] == 1) ;
}
else
{
// A is full
found = true ;
}
}
//--------------------------------------------------------------------------
// extract the element
//--------------------------------------------------------------------------
if (found)
{
#if !defined ( GB_UDT_EXTRACT )
if (GB_XCODE == acode)
{
// copy A [pleft] into x, no typecasting, for built-in types only.
GB_XTYPE *restrict Ax = ((GB_XTYPE *) (A->x)) ;
(*x) = Ax [A->iso ? 0:pleft] ;
}
else
#endif
{
// typecast the value from A [pleft] into x
size_t asize = A->type->size ;
void *ax = ((GB_void *) A->x) + (A->iso ? 0 : (pleft*asize)) ;
GB_cast_scalar (x, GB_XCODE, ax, acode, asize) ;
}
// TODO: do not flush if extracting to GrB_Scalar
#pragma omp flush
return (GrB_SUCCESS) ;
}
else
{
// Entry not found.
return (GrB_NO_VALUE) ;
}
}
#undef GB_UDT_EXTRACT
#undef GB_EXTRACT_ELEMENT
#undef GB_XTYPE
#undef GB_XCODE
|
distributiongenerator.h
|
/**
* @file distributiongenerator.h This code provides basic structure for distribution generators. This should be inherited by all other distribution generators.
* @author TPOC: [email protected]
*
* @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#define LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include "backend.h"
//#define FIXED_SEED // if defined, then uses a fixed seed number for reproducible results during debug. Use only one OMP thread to ensure reproducibility
namespace lbcrypto {
/**
* @brief Abstract class describing generator requirements.
*
* The Distribution Generator defines the methods that must be implemented by a real generator.
* It also holds the single PRNG, which should be called by all child class when generating a random number is required.
*
*/
class PseudoRandomNumberGenerator {
public:
static std::mt19937 &GetPRNG () {
// initialization of PRNGs
if (!m_flag) {
#if defined(FIXED_SEED)
//TP: Need reproducibility to debug NTL.
std::cerr << "**FOR DEBUGGING ONLY!!!! Using fixed initializer for PRNG. Use a single thread only!" << std::endl;
std::mt19937 *gen;
gen = new std::mt19937(1);
gen->seed(1);
m_prng.reset(gen);
m_flag = true;
#else
#pragma omp critical
{
m_flag = true;
}
#pragma omp parallel
{
m_prng.reset(new std::mt19937(std::chrono::high_resolution_clock::now().time_since_epoch().count()+std::hash<std::thread::id>{}(std::this_thread::get_id())));
}
#endif
}
return *m_prng;
}
private:
// flag for initializing the PRNGs for each thread
static bool m_flag;
static std::shared_ptr<std::mt19937> m_prng;
#if !defined(FIXED_SEED)
// avoid contention on m_prng
#pragma omp threadprivate(m_prng)
#endif
};
// Base class for Distribution Generator by type
template<typename VecType>
class DistributionGenerator {
public:
DistributionGenerator () {}
virtual ~DistributionGenerator() {}
};
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
|
GB_unaryop__abs_uint8_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_int64
// op(A') function: GB_tran__abs_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_int64
(
uint8_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr96424.c
|
/* PR tree-optimization/96424 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O0 -fexceptions -fnon-call-exceptions -fprofile-use -Wno-missing-profile" } */
void
foo (void)
{
int i, j;
#pragma omp for collapse (2)
for (i = 0; i < 10; ++i)
for (j = 0; j <= i; ++j)
;
}
void
bar (void)
{
int i, j;
#pragma omp for collapse (2)
for (i = 0; i < 10; ++i)
for (j = 0; j < i; ++j)
;
}
|
symv_x_csc_u_lo.c
|
#include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/util.h"
#include <memory.h>
static alphasparse_status_t
symv_csc_u_lo_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
// m==n
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_Number tmp1, tmp2;
alpha_mul(tmp1, beta, y[i]);
alpha_mul(tmp2, alpha, x[i]);
alpha_add(y[i], tmp1, tmp2);
}
// each thread has a y_local
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < n; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT ail = aie - ais;
ALPHA_INT start = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if (start < aie && A->row_indx[start] == i)
start += 1;
const ALPHA_INT *A_row = &A->row_indx[ais];
const ALPHA_Number *A_val = &A->values[ais];
ALPHA_INT ai = start - ais;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for (; ai < ail - 3; ai += 4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde(y_local[tid][ar0], av0, alpha_xi);
alpha_madde(y_local[tid][ar1], av1, alpha_xi);
alpha_madde(y_local[tid][ar2], av2, alpha_xi);
alpha_madde(y_local[tid][ar3], av3, alpha_xi);
alpha_mul(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for (; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde(y_local[tid][ar], av, alpha_xi);
alpha_mul(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT col = 0; col < m; col++)
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_csc_u_lo_unroll(alpha, A, x, beta, y);
}
|
schur_eliminator_impl.h
|
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: [email protected] (Sameer Agarwal)
//
// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
// Chunk::start ?
#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
// Eigen has an internal threshold switching between different matrix
// multiplication algorithms. In particular for matrices larger than
// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
// matrix matrix product algorithm that has a higher setup cost. For
// matrix sizes close to this threshold, especially when the matrices
// are thin and long, the default choice may not be optimal. This is
// the case for us, as the default choice causes a 30% performance
// regression when we moved from Eigen2 to Eigen3.
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include <algorithm>
#include <map>
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/invert_psd_matrix.h"
#include "ceres/map_util.h"
#include "ceres/schur_eliminator.h"
#include "ceres/scoped_thread_token.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
#include "ceres/thread_token_provider.h"
#include "Eigen/Dense"
#include "glog/logging.h"
#ifdef CERES_USE_TBB
#include <tbb/parallel_for.h>
#include <tbb/task_arena.h>
#endif
namespace ceres {
namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
STLDeleteElements(&rhs_locks_);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
int num_eliminate_blocks,
bool assume_full_rank_ete,
const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
assume_full_rank_ete_ = assume_full_rank_ete;
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
buffer_size_ = 1;
chunks_.clear();
lhs_row_layout_.clear();
int lhs_num_rows = 0;
// Add a map object for each block in the reduced linear system
// and build the row/column block structure of the reduced linear
// system.
lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
lhs_num_rows += bs->cols[i].size;
}
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
// containing the same y block are vertically contiguous. Along
// the way also compute the amount of space each chunk will need
// to perform the elimination.
while (r < num_row_blocks) {
const int chunk_block_id = bs->rows[r].cells.front().block_id;
if (chunk_block_id >= num_eliminate_blocks_) {
break;
}
chunks_.push_back(Chunk());
Chunk& chunk = chunks_.back();
chunk.size = 0;
chunk.start = r;
int buffer_size = 0;
const int e_block_size = bs->cols[chunk_block_id].size;
// Add to the chunk until the first block in the row is
// different than the one in the first row for the chunk.
while (r + chunk.size < num_row_blocks) {
const CompressedRow& row = bs->rows[r + chunk.size];
if (row.cells.front().block_id != chunk_block_id) {
break;
}
// Iterate over the blocks in the row, ignoring the first
// block since it is the one to be eliminated.
for (int c = 1; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
if (InsertIfNotPresent(
&(chunk.buffer_layout), cell.block_id, buffer_size)) {
buffer_size += e_block_size * bs->cols[cell.block_id].size;
}
}
buffer_size_ = std::max(buffer_size, buffer_size_);
++chunk.size;
}
CHECK_GT(chunk.size, 0);
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
if (num_threads_ > 1) {
random_shuffle(chunks_.begin(), chunks_.end());
}
buffer_.reset(new double[buffer_size_ * num_threads_]);
// chunk_outer_product_buffer_ only needs to store e_block_size *
// f_block_size, which is always less than buffer_size_, so we just
// allocate buffer_size_ per thread.
chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
rhs_locks_[i] = new Mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Eliminate(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
#else
tbb::task_arena task_arena(num_threads_);
task_arena.execute([&]{
tbb::parallel_for(num_eliminate_blocks_, num_col_blocks, [&](int i) {
#endif // !CERES_USE_TBB
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<Eigen::Dynamic>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
#ifdef CERES_USE_TBB
);
});
#endif // CERES_USE_TBB
}
ThreadTokenProvider thread_token_provider(num_threads_);
#ifdef CERES_USE_OPENMP
// Eliminate y blocks one chunk at a time. For each chunk, compute
// the entries of the normal equations and the gradient vector block
// corresponding to the y block and then apply Gaussian elimination
// to them. The matrix ete stores the normal matrix corresponding to
// the block being eliminated and array buffer_ contains the
// non-zero blocks in the row corresponding to this y block in the
// normal equations. This computation is done in
// ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian
// elimination to the rhs of the normal equations, updating the rhs
// of the reduced linear system by modifying rhs blocks for all the
// z blocks that share a row block/residual term with the y
// block. EliminateRowOuterProduct does the corresponding operation
// for the lhs of the reduced linear system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = 0; i < chunks_.size(); ++i) {
#else
tbb::task_arena task_arena(num_threads_);
task_arena.execute([&]{
tbb::parallel_for(0, int(chunks_.size()), [&](int i) {
#endif // !CERES_USE_TBB
const ScopedThreadToken scoped_thread_token(&thread_token_provider);
const int thread_id = scoped_thread_token.token();
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
VectorRef(buffer, buffer_size_).setZero();
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
FixedArray<double, 8> g(e_block_size);
typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
gref.setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockAndGradient(
chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
// Normally one wouldn't compute the inverse explicitly, but
// e_block_size will typically be a small number like 3, in
// which case its much faster to compute the inverse once and
// use it to multiply other matrices/vectors instead of doing a
// Solve call over and over again.
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete);
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
FixedArray<double, 8> inverse_ete_g(e_block_size);
MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
inverse_ete.data(),
e_block_size,
e_block_size,
g.get(),
inverse_ete_g.get());
UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(
thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
}
#ifdef CERES_USE_TBB
);
});
#endif // CERES_USE_TBB
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
BackSubstitute(const BlockSparseMatrix* A,
const double* b,
const double* D,
const double* z,
double* y) {
const CompressedRowBlockStructure* bs = A->block_structure();
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = 0; i < chunks_.size(); ++i) {
#else
tbb::task_arena task_arena(num_threads_);
task_arena.execute([&]{
tbb::parallel_for(0, int(chunks_.size()), [&](int i) {
#endif // !CERES_USE_TBB
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
DCHECK_EQ(e_block_id, e_cell.block_id);
FixedArray<double, 8> sj(row.block.size);
typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
sj.get());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
sj.get(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
}
y_block = InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete)
* y_block;
}
#ifdef CERES_USE_TBB
);
});
#endif // CERES_USE_TBB
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
int b_pos = bs->rows[row_block_counter].block.position;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + b_pos, row.block.size);
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
// g += E_i' b_i
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProduct(int thread_id,
const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
// S(i,j) -= bi' * ete^{-1} b_j
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For rows with no e_blocks, the schur complement update reduces to S
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowsUpdate(const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const double* values = A->values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
// very similar in structure to EBlockRowOuterProduct except for
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
// e_blocks. This is becase rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
// detection will lead to most template parameters being set to
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For a row with an e_block, compute the contribition S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
|
test.c
|
#include "trace.h"
tracepoint fact_entry, fact_exit;
int fact(int n) {
TRACE(fact_entry, "fact(%d)\n", n);
int out = 1;
if (n > 1)
out = n * fact(n-1);
TRACE(fact_exit, "fact(%d) = %d\n", n, out);
return out;
}
int main() {
trace_init();
#pragma omp parallel
{
#pragma omp single nowait
{
while (1)
{
#pragma omp task
{
printf("result 00 is %d\n\n", fact(5));
enable(&fact_entry);
printf("result 01 is %d\n\n", fact(5));
enable(&fact_exit);
printf("result 11 is %d\n\n", fact(5));
disable(&fact_entry);
printf("result 10 is %d\n\n", fact(5));
disable(&fact_exit);
}
}
}
}
return 0;
}
|
omp_parallel_sections_private.c
|
<ompts:test>
<ompts:testdescription>Test which checks the omp parallel sections private directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel sections private</ompts:directive>
<ompts:dependences>omp critical</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_sections_private</ompts:testcode:functionname>(FILE * logFile){
int sum=7;
int sum0=0;
int known_sum;
int i;
#pragma omp parallel sections private(<ompts:check>sum0,</ompts:check><ompts:crosscheck></ompts:crosscheck>i)
{
#pragma omp section
{
sum0=0;
for (i=1;i<400;i++)
sum0=sum0+i;
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
#pragma omp section
{
sum0=0;
for(i=400;i<700;i++)
sum0=sum0+i;
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
#pragma omp section
{
sum0=0;
for(i=700;i<1000;i++)
sum0=sum0+i;
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
} /*end of paralell sections*/
known_sum=(999*1000)/2+7;
return (known_sum==sum);
} /* end of check_section_private*/
</ompts:testcode>
</ompts:test>
|
GB_unaryop__lnot_uint64_uint32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint32
// op(A') function: GB_tran__lnot_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_messages.c
|
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// expected-error@+1 {{expected an OpenMP directive}}
#pragma omp declare
int foo(void);
#pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}}
#pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}}
#pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}}
#pragma omp declare variant(foo) match( // expected-error {{expected context selector in 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match() // expected-error {{expected context selector in 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match(xxx) // expected-error {{expected '=' after 'xxx' context selector set name on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match(xxx=) // expected-error {{expected '{' after '='}}
#pragma omp declare variant(foo) match(xxx=yyy) // expected-error {{expected '{' after '='}}
#pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected '{' after '='}}
#pragma omp declare variant(foo) match(xxx={) // expected-error {{expected '}' or ',' after ')'}} expected-error {{expected '}'}} expected-note {{to match this '{'}}
#pragma omp declare variant(foo) match(xxx={})
#pragma omp declare variant(foo) match(xxx={vvv, vvv})
#pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-error {{expected ','}} expected-error {{expected '=' after 'xxx' context selector set name on 'omp declare variant' directive}} expected-error {{context selector set 'xxx' is used already in the same 'omp declare variant' directive}} expected-note {{previously context selector set 'xxx' used here}}
#pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{extra tokens at the end of '#pragma omp declare variant' are ignored}}
#pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{unknown context selector in 'implementation' context selector set of 'omp declare variant' directive, ignored}}
#pragma omp declare variant(foo) match(implementation={vendor}) // expected-error {{expected '(' after 'vendor'}} expected-error {{expected vendor identifier in 'vendor' context selector of 'implementation' selector set of 'omp declare variant' directive}} expected-error {{expected ')' or ',' after 'vendor name'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected vendor identifier in 'vendor' context selector of 'implementation' selector set of 'omp declare variant' directive}} expected-error {{expected ')' or ',' after 'vendor name'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor()}) // expected-error {{expected vendor identifier in 'vendor' context selector of 'implementation' selector set of 'omp declare variant' directive}}
#pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{missing ':' after context selector score clause - ignoring}}
#pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{expected ')' or ',' after 'vendor name'}} expected-error {{expected ')'}} expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected vendor identifier in 'vendor' context selector of 'implementation' selector set of 'omp declare variant' directive}} expected-warning {{missing ':' after context selector score clause - ignoring}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')' or ',' after 'vendor name'}} expected-error 2 {{expected ')'}} expected-error {{expected vendor identifier in 'vendor' context selector of 'implementation' selector set of 'omp declare variant' directive}} expected-warning {{missing ':' after context selector score clause - ignoring}} expected-note 2 {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{missing ':' after context selector score clause - ignoring}} expected-error {{expression is not an integer constant expression}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-error {{context trait selector 'vendor' is used already in the same 'implementation' context selector set of 'omp declare variant' directive}} expected-note {{previously context trait selector 'vendor' used here}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{unknown context selector in 'implementation' context selector set of 'omp declare variant' directive, ignored}}
#pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{unknown context selector in 'device' context selector set of 'omp declare variant' directive, ignored}}
#pragma omp declare variant(foo) match(device={kind}) // expected-error {{expected '(' after 'kind'}} expected-error {{expected 'host', 'nohost', 'cpu', 'gpu', or 'fpga' in 'kind' context selector of 'device' selector set of 'omp declare variant' directive}} expected-error {{expected ')'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected 'host', 'nohost', 'cpu', 'gpu', or 'fpga' in 'kind' context selector of 'device' selector set of 'omp declare variant' directive}} expected-error 2 {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind()}) // expected-error {{expected 'host', 'nohost', 'cpu', 'gpu', or 'fpga' in 'kind' context selector of 'device' selector set of 'omp declare variant' directive}}
#pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected ')' or ',' after 'score'}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}
#pragma omp declare variant(foo) match(device={kind(score( ibm)}) // expected-error 2 {{expected ')'}} expected-note {{to match this '('}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}
#pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error 2 {{expected ')'}} expected-note {{to match this '('}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}
#pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-error {{expected ')' or ',' after 'score'}} expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}
#pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-error {{context trait selector 'kind' is used already in the same 'device' context selector set of 'omp declare variant' directive}} expected-note {{previously context trait selector 'kind' used here}} expected-error {{expected ')' or ',' after 'score'}} expected-note {{to match this '('}} expected-error {{expected ')'}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}} expected-error {{unknown 'llvm' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}
#pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{unknown context selector in 'device' context selector set of 'omp declare variant' directive, ignored}} expected-error {{expected ')' or ',' after 'score'}} expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{unknown 'score' device kind trait in the 'device' context selector set, expected one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'}}}
int bar(void);
// expected-error@+2 {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={})
int a;
// expected-error@+2 {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={})
#pragma omp threadprivate(a)
int var;
#pragma omp threadprivate(var)
// expected-error@+2 {{expected an OpenMP directive}} expected-error@+1 {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={})
#pragma omp declare
// expected-error@+3 {{function declaration is expected after 'declare variant' directive}}
// expected-error@+1 {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={})
#pragma omp declare variant(foo) match(xxx={})
#pragma options align=packed
int main();
// expected-error@+3 {{function declaration is expected after 'declare variant' directive}}
// expected-error@+1 {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={})
#pragma omp declare variant(foo) match(xxx={})
#pragma init_seg(compiler)
int main();
// expected-error@+1 {{single declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={})
int b, c;
int no_proto();
#pragma omp declare variant(no_proto) match(xxx={})
int no_proto_too();
int proto1(int);
// expected-note@+2 {{previous declaration is here}}
#pragma omp declare variant(proto1) match(xxx={})
int diff_proto();
// expected-error@+1 {{conflicting types for 'diff_proto'}}
int diff_proto(double);
#pragma omp declare variant(no_proto) match(xxx={})
int diff_proto1(double);
int after_use_variant(void);
int after_use();
int bar() {
return after_use();
}
// expected-warning@+1 {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}}
#pragma omp declare variant(after_use_variant) match(xxx={})
int after_use(void);
#pragma omp declare variant(after_use_variant) match(xxx={})
int defined(void) { return 0; }
int defined1(void) { return 0; }
// expected-warning@+1 {{#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}}
#pragma omp declare variant(after_use_variant) match(xxx={})
int defined1(void);
int diff_cc_variant(void);
// expected-error@+1 {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}}
#pragma omp declare variant(diff_cc_variant) match(xxx={})
__vectorcall int diff_cc(void);
int diff_ret_variant(void);
// expected-error@+1 {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}}
#pragma omp declare variant(diff_ret_variant) match(xxx={})
void diff_ret(void);
void marked(void);
void not_marked(void);
// expected-note@+1 {{marked as 'declare variant' here}}
#pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)})
void marked_variant(void);
// expected-warning@+1 {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}}
#pragma omp declare variant(marked_variant) match(xxx={})
void marked(void);
// expected-error@+1 {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant
// expected-error@+1 {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant
|
3d7pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(8*t3+Nx+4,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d25pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) {
for (t4=max(max(ceild(t1-252,256),ceild(4*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(min(floord(4*Nt+Nx-9,512),floord(2*t1+Nx-3,512)),floord(4*t2+Nx-9,512)),floord(16*t3+Nx+3,512));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
transpose.c
|
/*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: transpose
PURPOSE: This OpenMP program measures the time for the transpose of a
column-major stored matrix into a row-major stored matrix.
USAGE: Program input is three command line arguments that give the
matrix order, the number of times to repeat the operation
(iterations), and the number of threads to use:
transpose <# threads> <matrix_size> <# iterations> [tile size]
An optional parameter specifies the tile size used to divide the
individual matrix blocks for improved cache and TLB performance.
The output consists of diagnostics to make sure the
transpose worked and timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime() portable wall-timer interface.
bail_out()
test_results() Verify that the transpose worked
HISTORY: Written by Tim Mattson, April 1999.
Updated by Rob Van der Wijngaart, December 2005.
*******************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* Constant to shift column index */
#define COL_SHIFT 1000.00
/* Constant to shift row index */
#define ROW_SHIFT 0.001
static double test_results (int , double*);
int main(int argc, char ** argv) {
int order; /* order of a the matrix */
int tile_size=32; /* default tile size for tiling of local transpose */
int iterations; /* number of times to do the transpose */
int i, j, it, jt, iter; /* dummies */
double bytes; /* combined size of matrices */
double * RESTRICT A; /* buffer to hold original matrix */
double * RESTRICT B; /* buffer to hold transposed matrix */
double errsq; /* squared error */
double epsilon=1.e-8; /* error tolerance */
double trans_time, /* timing parameters */
avgtime = 0.0,
maxtime = 0.0,
mintime = 366.0*24.0*3600.0; /* set the minimum time to a large
value; one leap year should be enough */
int nthread_input,
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/*********************************************************************
** read and test input parameters
*********************************************************************/
if (argc != 4 && argc != 5){
printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n",
*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
exit(EXIT_FAILURE);
}
order = atoi(*++argv);
if (order < 0){
printf("ERROR: Matrix Order must be greater than 0 : %d \n", order);
exit(EXIT_FAILURE);
}
if (argc == 5) tile_size = atoi(*++argv);
/* a non-positive tile size means no tiling of the local transpose */
if (tile_size <=0) tile_size = order;
/*********************************************************************
** Allocate space for the input and transpose matrix
*********************************************************************/
A = (double *)malloc(order*order*sizeof(double));
if (A == NULL){
printf(" Error allocating space for input matrix\n");
exit(EXIT_FAILURE);
}
B = (double *)malloc(order*order*sizeof(double));
if (B == NULL){
printf(" Error allocating space for transposed matrix\n");
exit(EXIT_FAILURE);
}
bytes = 2.0 * sizeof(double) * order * order;
#pragma omp parallel private (iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP Matrix transpose: B = A^T\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Matrix order = %d\n", order);
if (tile_size < order) printf("Tile size = %d\n", tile_size);
else printf("Untiled\n");
printf("Number of iterations = %d\n", iterations);
}
}
bail_out(num_error);
/* Fill the original matrix, set transpose to known garbage value. */
#pragma omp for private (j)
for (i=0;i<order; i++) {
for (j=0;j<order;j++) {
*(A+i*order+j) = COL_SHIFT * j + ROW_SHIFT * i;
*(B+i*order+j) = -1.0;
}
}
errsq = 0.0;
for (iter = 0; iter<iterations; iter++){
#pragma omp barrier
#pragma omp master
{
trans_time = wtime();
}
/* Transpose the matrix; only use tiling if the tile size is smaller
than the matrix */
if (tile_size < order) {
#pragma omp for private (j, it, jt)
for (i=0; i<order; i+=tile_size) {
for (j=0; j<order; j+=tile_size) {
for (it=i; it<MIN(order,i+tile_size); it++){
for (jt=j; jt<MIN(order,j+tile_size);jt++){
B[it+order*jt] = A[jt+order*it];
}
}
}
}
}
else {
#pragma omp for private (j)
for (i=0;i<order; i++) {
for (j=0;j<order;j++) {
B[i+order*j] = A[j+order*i];
}
}
}
#pragma omp master
{
trans_time = wtime() - trans_time;
#ifdef VERBOSE
printf("\nFinished with transpose, using %lf seconds \n", trans_time);
#endif
if (iter>0 || iterations==1) { /* skip the first iteration */
avgtime = avgtime + trans_time;
mintime = MIN(mintime, trans_time);
maxtime = MAX(maxtime, trans_time);
}
}
errsq += test_results (order, B);
} /* end of iter loop */
} /* end of OpenMP parallel region */
/*********************************************************************
** Analyze and output results.
*********************************************************************/
if (errsq < epsilon) {
printf("Solution validates\n");
avgtime = avgtime/(double)(MAX(iterations-1,1));
printf("Rate (MB/s): %lf, Avg time (s): %lf, Min time (s): %lf",
1.0E-06 * bytes/mintime, avgtime, mintime);
printf(", Max time (s): %lf\n", maxtime);
#ifdef VERBOSE
printf("Squared errors: %f \n", errsq);
#endif
exit(EXIT_SUCCESS);
}
else {
printf("ERROR: Aggregate squared error %lf exceeds threshold %e\n",
errsq, epsilon);
exit(EXIT_FAILURE);
}
} /* end of main */
/* function that computes the error committed during the transposition */
double test_results (int order, double *trans) {
double diff, errsq=0.0;
int i,j;
#pragma omp parallel for private(j,diff) reduction(+:errsq)
for (i=0;i<order; i++) {
for (j=0;j<order;j++) {
diff = *(trans+i*order+j) -
(COL_SHIFT*i + ROW_SHIFT * j);
errsq += diff*diff;
}
}
#ifdef VERBOSE
#pragma omp master
{
printf(" Squared sum of differences: %f\n",errsq);
}
#endif
return errsq;
}
|
conv_dw_kernel_x86.c
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: [email protected]
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_dw_kernel_x86.h"
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = max(data[i], ( float )0);
if (activation > 0)
{
data[i] = min(data[i], ( float )activation);
}
}
}
static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v)
{
float* ptr = input;
float* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(float));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
#if __AVX__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _sum4 = _mm256_loadu_ps(btmp);
__m256 _sum5 = _mm256_loadu_ps(btmp);
__m256 _sum6 = _mm256_loadu_ps(btmp);
__m256 _sum7 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _va9 = _mm256_loadu_ps(itmp0 + 72);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_va9 = _mm256_loadu_ps(itmp1 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_va9 = _mm256_loadu_ps(itmp2 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
_mm256_storeu_ps(otmp + 32, _sum4);
_mm256_storeu_ps(otmp + 40, _sum5);
_mm256_storeu_ps(otmp + 48, _sum6);
_mm256_storeu_ps(otmp + 56, _sum7);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#elif __SSE2__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 7 < outw; j += 8)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _sum4 = _mm_loadu_ps(btmp);
__m128 _sum5 = _mm_loadu_ps(btmp);
__m128 _sum6 = _mm_loadu_ps(btmp);
__m128 _sum7 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _va9 = _mm_loadu_ps(itmp0 + 36);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_va9 = _mm_loadu_ps(itmp1 + 36);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_va9 = _mm_loadu_ps(itmp2 + 36);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
_mm_storeu_ps(otmp + 16, _sum4);
_mm_storeu_ps(otmp + 20, _sum5);
_mm_storeu_ps(otmp + 24, _sum6);
_mm_storeu_ps(otmp + 28, _sum7);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
sum4[k] += itmp0[k + 16] * ktmp[k];
sum4[k] += itmp1[k + 16] * ktmp[k + 12];
sum4[k] += itmp2[k + 16] * ktmp[k + 24];
sum4[k] += itmp0[k + 20] * ktmp[k + 4];
sum4[k] += itmp1[k + 20] * ktmp[k + 16];
sum4[k] += itmp2[k + 20] * ktmp[k + 28];
sum4[k] += itmp0[k + 24] * ktmp[k + 8];
sum4[k] += itmp1[k + 24] * ktmp[k + 20];
sum4[k] += itmp2[k + 24] * ktmp[k + 32];
sum5[k] += itmp0[k + 20] * ktmp[k];
sum5[k] += itmp1[k + 20] * ktmp[k + 12];
sum5[k] += itmp2[k + 20] * ktmp[k + 24];
sum5[k] += itmp0[k + 24] * ktmp[k + 4];
sum5[k] += itmp1[k + 24] * ktmp[k + 16];
sum5[k] += itmp2[k + 24] * ktmp[k + 28];
sum5[k] += itmp0[k + 28] * ktmp[k + 8];
sum5[k] += itmp1[k + 28] * ktmp[k + 20];
sum5[k] += itmp2[k + 28] * ktmp[k + 32];
sum6[k] += itmp0[k + 24] * ktmp[k];
sum6[k] += itmp1[k + 24] * ktmp[k + 12];
sum6[k] += itmp2[k + 24] * ktmp[k + 24];
sum6[k] += itmp0[k + 28] * ktmp[k + 4];
sum6[k] += itmp1[k + 28] * ktmp[k + 16];
sum6[k] += itmp2[k + 28] * ktmp[k + 28];
sum6[k] += itmp0[k + 32] * ktmp[k + 8];
sum6[k] += itmp1[k + 32] * ktmp[k + 20];
sum6[k] += itmp2[k + 32] * ktmp[k + 32];
sum7[k] += itmp0[k + 28] * ktmp[k];
sum7[k] += itmp1[k + 28] * ktmp[k + 12];
sum7[k] += itmp2[k + 28] * ktmp[k + 24];
sum7[k] += itmp0[k + 32] * ktmp[k + 4];
sum7[k] += itmp1[k + 32] * ktmp[k + 16];
sum7[k] += itmp2[k + 32] * ktmp[k + 28];
sum7[k] += itmp0[k + 36] * ktmp[k + 8];
sum7[k] += itmp1[k + 36] * ktmp[k + 20];
sum7[k] += itmp2[k + 36] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
otmp[k + 16] = sum4[k];
otmp[k + 20] = sum5[k];
otmp[k + 24] = sum6[k];
otmp[k + 28] = sum7[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 4;
itmp1 += 4;
itmp2 += 4;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 8] * ktmp[k];
sum1[k] += itmp1[k + 8] * ktmp[k + 12];
sum1[k] += itmp2[k + 8] * ktmp[k + 24];
sum1[k] += itmp0[k + 12] * ktmp[k + 4];
sum1[k] += itmp1[k + 12] * ktmp[k + 16];
sum1[k] += itmp2[k + 12] * ktmp[k + 28];
sum1[k] += itmp0[k + 16] * ktmp[k + 8];
sum1[k] += itmp1[k + 16] * ktmp[k + 20];
sum1[k] += itmp2[k + 16] * ktmp[k + 32];
sum2[k] += itmp0[k + 16] * ktmp[k];
sum2[k] += itmp1[k + 16] * ktmp[k + 12];
sum2[k] += itmp2[k + 16] * ktmp[k + 24];
sum2[k] += itmp0[k + 20] * ktmp[k + 4];
sum2[k] += itmp1[k + 20] * ktmp[k + 16];
sum2[k] += itmp2[k + 20] * ktmp[k + 28];
sum2[k] += itmp0[k + 24] * ktmp[k + 8];
sum2[k] += itmp1[k + 24] * ktmp[k + 20];
sum2[k] += itmp2[k + 24] * ktmp[k + 32];
sum3[k] += itmp0[k + 24] * ktmp[k];
sum3[k] += itmp1[k + 24] * ktmp[k + 12];
sum3[k] += itmp2[k + 24] * ktmp[k + 24];
sum3[k] += itmp0[k + 28] * ktmp[k + 4];
sum3[k] += itmp1[k + 28] * ktmp[k + 16];
sum3[k] += itmp2[k + 28] * ktmp[k + 28];
sum3[k] += itmp0[k + 32] * ktmp[k + 8];
sum3[k] += itmp1[k + 32] * ktmp[k + 20];
sum3[k] += itmp2[k + 32] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#else
static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
float* outptr2 = outptr + outw;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
const float* kernel0 = kernel + g * 9;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#endif
int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* kernel = ( float* )weight_tensor->data;
float* biases = NULL;
if (bias_tensor)
biases = ( float* )bias_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_chw = inc * inh * inw;
int outc = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_chw = out_hw * outc;
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int stride_w = param->stride_w;
int stride_h = param->stride_h;
int dilation_w = param->dilation_w;
int dilation_h = param->dilation_h;
int group = param->group;
int activation = param->activation;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
float* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input;
else
{
input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float));
for (int g = 0; g < group; g++)
{
float* pad_in = input + g * inh * inw;
float* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f);
}
}
/* process */
for (int i = 0; i < batch_number; i++)
{
if (stride_h == 1)
convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else
convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
}
/* relu */
if (activation >= 0)
relu(output, batch_number * out_chw, activation);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
|
GB_binop__band_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint32)
// A*D function (colscale): GB (_AxD__band_uint32)
// D*A function (rowscale): GB (_DxB__band_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__band_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__band_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint32)
// C=scalar+B GB (_bind1st__band_uint32)
// C=scalar+B' GB (_bind1st_tran__band_uint32)
// C=A+scalar GB (_bind2nd__band_uint32)
// C=A'+scalar GB (_bind2nd_tran__band_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
6754.c
|
/*
* Compile using the command:
* `cc 27Stencil.c -o oa -fopenmp -lm`
*/
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#define DEFAULT_DATASIZE 1048576 /* Default datasize. */
#define DEFAULT_REPS 10 /* Default repetitions. */
#define CONF95 1.96
#define ITERATIONS 10
#define FAC (1./26)
#define TOLERANCE 1.0e-15
extern int reps; /* Repetitions. */
extern double *times; /* Array to store results in. */
extern int flag; /* Flag to set CPU or GPU invocation. */
extern unsigned int datasize; /* Datasize passed to benchmark functions. */
unsigned int datasize = -1; /* Datasize for tests in bytes. */
int reps = -1; /* Repetitions. */
double *times; /* Array of doubles storing the benchmark times in microseconds. */
double testtime; /* The average test time in microseconds for reps runs. */
double testsd; /* The standard deviation in the test time in microseconds for reps runs. */
int flag = 0; /* 0 indicates CPU. */
/*
* Function prototypes for common functions.
*/
void init(int argc, char **argv);
void finalisetest(char *);
void finalise(void);
void benchmark(char *, double (*test)(void));
void print_results(char *, double, double);
/* Forward Declarations of utility functions*/
double max_diff(double *, double *, int);
void wul();
void usage(char *argv[]) {
printf("Usage: %s \n"
"\t--reps <repetitions> (default %d)\n"
"\t--datasize <datasize> (default %d bytes)\n",
argv[0],
DEFAULT_REPS, DEFAULT_DATASIZE);
}
/*
* This function parses the parameters from the command line.
*/
void parse_args(int argc, char *argv[]) {
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--reps") == 0) {
reps = atoi(argv[++arg]);
if (reps == 0) {
printf("Invalid integer:--reps: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--datasize") == 0) {
datasize = atoi(argv[++arg]);
if (datasize == 0) {
printf("Invalid integer:--datasize: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd;
int i, good_reps;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
good_reps = 0;
for (i = 0; i < reps; i++) {
/* Skip entries where times is 0, this indicates an error occured */
if (times[i] != 0){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
good_reps++;
}
}
meantime = totaltime / good_reps;
sumsq = 0;
for (i = 0; i < reps; i++) {
if (times[i] != 0){
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
}
sd = sqrt(sumsq / good_reps);
*mtp = meantime;
*sdp = sd;
}
/*
* This function prints the results of the tests.
* If you use a compiler which sets a different preprocessor flag
* you may wish to add it here.
*/
void print_results(char *name, double testtime, double testsd) {
char compiler[20];
/* Set default compiler idetifier. */
sprintf(compiler, "COMPILER");
/* Set compiler identifier based on known preprocessor flags. */
#ifdef __PGI
sprintf(compiler, "PGI");
#endif
#ifdef __HMPP
sprintf(compiler, "CAPS");
#endif
//printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6);
printf("%f\n", testtime*1e6);
}
/*
* This function initialises the storage for the test results and set the defaults.
*/
void init(int argc, char **argv)
{
parse_args(argc, argv);
if (reps == -1) {
reps = DEFAULT_REPS;
}
if (datasize == (unsigned int)-1) {
datasize = DEFAULT_DATASIZE;
}
times = (double *)malloc((reps) * sizeof(double));
/*
#ifdef __PGI
acc_init(acc_device_nvidia);
// printf("PGI INIT\n");
#endif
#ifdef __HMPP
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
#ifdef _CRAYC
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
*/
}
void finalise(void) {
free(times);
}
/*
* This function runs the benchmark specified.
*/
void benchmark(char *name, double (*test)(void))
{
int i = 0;
double tmp = 0;
for (i=0; i<reps; i++) {
tmp = test();
if (tmp == -10000){
printf("Memory allocation failure in %s\n", name);
times[i] = 0;
}
else if (tmp == -11000){
printf("CPU/GPU mismatch in %s\n", name);
times[i] = 0;
}
else{
times[i] = tmp;
}
}
stats(&testtime, &testsd);
//printf("in benchmark\n");
print_results(name, testtime, testsd);
//printf("printed result\n");
}
double stencil()
{
extern unsigned int datasize;
int sz = cbrt((datasize/sizeof(double))/2);
int i, j, k, iter;
int n = sz-2;
double fac = FAC;
double t1, t2;
double md;
//printf("size = %d\n", sz);
/* Work buffers, with halos */
double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz);
if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){
/* Something went wrong in the memory allocation here, fail gracefully */
return(-10000);
}
/* initialize input array a0 */
/* zero all of array (including halos) */
//printf("size = %d\n", sz);
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = 0.0;
//printf("%d\t", (i*sz*sz+j*sz+k));
}
}
}
//printf("\n");
//int size_of_a0 = sizeof(a0) / sizeof(*a0);
//printf("size of a0 = %d\n", size_of_a0);
/* use random numbers to fill interior */
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX);
}
}
}
/* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */
/* save initial input array for later GPU run */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
}
}
}
//printf("Host computation\n");
/* run main computation on host */
for (iter = 0; iter < ITERATIONS; iter++) {
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
/* save result */
/* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
// printf("%lf\t", a0[i*sz*sz+j*sz+k]);
}
}
}
//int size = sizeof(host_result)/sizeof(host_result[0]);
//for(i = 0; i < size; i++) {
// printf("%lf\t", host_result[i]);
//}
//printf("\n");
/* copy initial array back to a0 */
/* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k];
}
}
}
//printf("Starting acc pragma code\n");
t1 = omp_get_wtime();
#pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n)
{
for (iter = 0; iter < ITERATIONS; iter++) {
#pragma omp parallel for
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
#pragma acc parallel loop
for (i = 1; i < n+1; i++) {
#pragma acc loop
for (j = 1; j < n+1; j++) {
#pragma acc loop
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
} /* end data region */
#pragma acc wait
t2 = omp_get_wtime();
memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz);
md = max_diff(&host_result[0],&device_result[0], sz);
/* Free malloc'd memory to prevent leaks */
free(a0);
free(a0_init);
free(a1);
free(host_result);
free(device_result);
//printf("md: %lf \t tolerance: %lf", md, TOLERANCE);
if (md < TOLERANCE ){
//printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE);
return(t2 - t1);
}
else{
// printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md);
return(-11000);
}
}
/* Utility Functions */
double max_diff(double *array1,double *array2, int sz)
{
double tmpdiff, diff;
int i,j,k;
int n = sz-2;
diff=0.0;
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]);
//printf("diff: %lf", tmpdiff);
if (tmpdiff > diff) diff = tmpdiff;
}
}
}
return diff;
}
/*
* This function ensures the device is awake.
* It is more portable than acc_init().
*/
void wul(){
int data = 8192;
double *arr_a = (double *)malloc(sizeof(double) * data);
double *arr_b = (double *)malloc(sizeof(double) * data);
int i = 0;
if (arr_a==NULL||arr_b==NULL) {
printf("Unable to allocate memory in wul.\n");
}
for (i=0;i<data;i++){
arr_a[i] = (double) (rand()/(1.0+RAND_MAX));
}
#pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data])
{
#pragma acc parallel loop
for (i=0;i<data;i++){
arr_b[i] = arr_a[i] * 2;
}
}
if (arr_a[0] < 0){
printf("Error in WUL\n");
/*
* This should never be called as rands should be in the range (0,1].
* This stops clever optimizers.
*/
}
free(arr_a);
free(arr_b);
}
int main(int argc, char **argv) {
char testName[32];
//printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n");
/* Initialise storage for test results & parse input arguements. */
init(argc, argv);
/* Ensure device is awake. */
wul();
sprintf(testName, "27S");
benchmark(testName, &stencil);
/* Print results & free results storage */
finalise();
return EXIT_SUCCESS;
}
|
GB_unaryop__ainv_uint16_uint8.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_uint8
// op(A') function: GB_tran__ainv_uint16_uint8
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_uint8
(
uint16_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
xgboost_regrank.h
|
#ifndef XGBOOST_REGRANK_H
#define XGBOOST_REGRANK_H
/*!
* \file xgboost_regrank.h
* \brief class for gradient boosted regression and ranking
* \author Kailong Chen: [email protected], Tianqi Chen: [email protected]
*/
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "xgboost_regrank_data.h"
#include "xgboost_regrank_eval.h"
#include "xgboost_regrank_obj.h"
#include "../utils/xgboost_omp.h"
#include "../booster/xgboost_gbmbase.h"
#include "../utils/xgboost_utils.h"
#include "../utils/xgboost_stream.h"
namespace xgboost{
namespace regrank{
/*! \brief class for gradient boosted regression and ranking */
class RegRankBoostLearner{
public:
/*! \brief constructor */
RegRankBoostLearner(void){
silent = 0;
obj_ = NULL;
name_obj_ = "reg:linear";
}
/*!
* \brief a regression booter associated with training and evaluating data
* \param mats array of pointers to matrix whose prediction result need to be cached
*/
RegRankBoostLearner(const std::vector<DMatrix *>& mats){
silent = 0;
obj_ = NULL;
name_obj_ = "reg:linear";
this->SetCacheData(mats);
}
/*!
* \brief add internal cache space for mat, this can speedup prediction for matrix,
* please cache prediction for training and eval data
* warning: if the model is loaded from file from some previous training history
* set cache data must be called with exactly SAME
* data matrices to continue training otherwise it will cause error
* \param mats array of pointers to matrix whose prediction result need to be cached
*/
inline void SetCacheData(const std::vector<DMatrix *>& mats){
// estimate feature bound
int num_feature = 0;
// assign buffer index
unsigned buffer_size = 0;
utils::Assert( cache_.size() == 0, "can only call cache data once" );
for( size_t i = 0; i < mats.size(); ++i ){
bool dupilicate = false;
for( size_t j = 0; j < i; ++ j ){
if( mats[i] == mats[j] ) dupilicate = true;
}
if( dupilicate ) continue;
// set mats[i]'s cache learner pointer to this
mats[i]->cache_learner_ptr_ = this;
cache_.push_back( CacheEntry( mats[i], buffer_size, mats[i]->Size() ) );
buffer_size += static_cast<unsigned>(mats[i]->Size());
num_feature = std::max(num_feature, (int)(mats[i]->data.NumCol()));
}
char str_temp[25];
if (num_feature > mparam.num_feature){
mparam.num_feature = num_feature;
sprintf(str_temp, "%d", num_feature);
base_gbm.SetParam("bst:num_feature", str_temp);
}
sprintf(str_temp, "%u", buffer_size);
base_gbm.SetParam("num_pbuffer", str_temp);
if (!silent){
printf("buffer_size=%u\n", buffer_size);
}
}
/*!
* \brief set parameters from outside
* \param name name of the parameter
* \param val value of the parameter
*/
inline void SetParam(const char *name, const char *val){
if (!strcmp(name, "silent")) silent = atoi(val);
if (!strcmp(name, "eval_metric")) evaluator_.AddEval(val);
if (!strcmp(name, "objective") ) name_obj_ = val;
if (!strcmp(name, "num_class") ) base_gbm.SetParam("num_booster_group", val );
mparam.SetParam(name, val);
base_gbm.SetParam(name, val);
cfg_.push_back( std::make_pair( std::string(name), std::string(val) ) );
}
/*!
* \brief initialize solver before training, called before training
* this function is reserved for solver to allocate necessary space and do other preparation
*/
inline void InitTrainer(void){
if( mparam.num_class != 0 ){
if( name_obj_ != "multi:softmax" ){
name_obj_ = "multi:softmax";
printf("auto select objective=softmax to support multi-class classification\n" );
}
}
base_gbm.InitTrainer();
obj_ = CreateObjFunction( name_obj_.c_str() );
for( size_t i = 0; i < cfg_.size(); ++ i ){
obj_->SetParam( cfg_[i].first.c_str(), cfg_[i].second.c_str() );
}
evaluator_.AddEval( obj_->DefaultEvalMetric() );
}
/*!
* \brief initialize the current data storage for model, if the model is used first time, call this function
*/
inline void InitModel(void){
base_gbm.InitModel();
mparam.AdjustBase(name_obj_.c_str());
}
/*!
* \brief load model from file
* \param fname file name
*/
inline void LoadModel(const char *fname){
utils::FileStream fi(utils::FopenCheck(fname, "rb"));
this->LoadModel(fi);
fi.Close();
}
/*!
* \brief load model from stream
* \param fi input stream
*/
inline void LoadModel(utils::IStream &fi){
base_gbm.LoadModel(fi);
utils::Assert(fi.Read(&mparam, sizeof(ModelParam)) != 0);
}
/*!
* \brief DumpModel
* \param fo text file
* \param fmap feature map that may help give interpretations of feature
* \param with_stats whether print statistics as well
*/
inline void DumpModel(FILE *fo, const utils::FeatMap& fmap, bool with_stats){
base_gbm.DumpModel(fo, fmap, with_stats);
}
/*!
* \brief Dump path of all trees
* \param fo text file
* \param data input data
*/
inline void DumpPath(FILE *fo, const DMatrix &data){
base_gbm.DumpPath(fo, data.data);
}
/*!
* \brief save model to stream
* \param fo output stream
*/
inline void SaveModel(utils::IStream &fo) const{
base_gbm.SaveModel(fo);
fo.Write(&mparam, sizeof(ModelParam));
}
/*!
* \brief save model into file
* \param fname file name
*/
inline void SaveModel(const char *fname) const{
utils::FileStream fo(utils::FopenCheck(fname, "wb"));
this->SaveModel(fo);
fo.Close();
}
/*!
* \brief update the model for one iteration
*/
inline void UpdateOneIter(const DMatrix &train){
this->PredictRaw(preds_, train);
obj_->GetGradient(preds_, train.info, base_gbm.NumBoosters(), grad_, hess_);
if( grad_.size() == train.Size() ){
base_gbm.DoBoost(grad_, hess_, train.data, train.info.root_index);
}else{
int ngroup = base_gbm.NumBoosterGroup();
utils::Assert( grad_.size() == train.Size() * (size_t)ngroup, "BUG: UpdateOneIter: mclass" );
std::vector<float> tgrad( train.Size() ), thess( train.Size() );
for( int g = 0; g < ngroup; ++ g ){
memcpy( &tgrad[0], &grad_[g*tgrad.size()], sizeof(float)*tgrad.size() );
memcpy( &thess[0], &hess_[g*tgrad.size()], sizeof(float)*tgrad.size() );
base_gbm.DoBoost(tgrad, thess, train.data, train.info.root_index, g );
}
}
}
/*!
* \brief evaluate the model for specific iteration
* \param iter iteration number
* \param evals datas i want to evaluate
* \param evname name of each dataset
* \param fo file to output log
*/
inline void EvalOneIter(int iter,
const std::vector<const DMatrix*> &evals,
const std::vector<std::string> &evname,
FILE *fo=stderr ){
fprintf(fo, "[%d]", iter);
for (size_t i = 0; i < evals.size(); ++i){
this->PredictRaw(preds_, *evals[i]);
obj_->PredTransform(preds_);
evaluator_.Eval(fo, evname[i].c_str(), preds_, evals[i]->info);
}
fprintf(fo, "\n");
fflush(fo);
}
/*!
* \brief get prediction
* \param storage to store prediction
* \param data input data
* \param bst_group booster group we are in
*/
inline void Predict(std::vector<float> &preds, const DMatrix &data, int bst_group = -1){
this->PredictRaw( preds, data, bst_group );
obj_->PredTransform( preds );
}
public:
/*!
* \brief interactive update
* \param action action type
* \parma train training data
*/
inline void UpdateInteract(std::string action, const DMatrix& train){
for(size_t i = 0; i < cache_.size(); ++i){
this->InteractPredict(preds_, *cache_[i].mat_);
}
if (action == "remove"){
base_gbm.DelteBooster(); return;
}
obj_->GetGradient(preds_, train.info, base_gbm.NumBoosters(), grad_, hess_);
std::vector<unsigned> root_index;
base_gbm.DoBoost(grad_, hess_, train.data, root_index);
for(size_t i = 0; i < cache_.size(); ++i){
this->InteractRePredict(*cache_[i].mat_);
}
}
private:
/*! \brief get the transformed predictions, given data */
inline void InteractPredict(std::vector<float> &preds, const DMatrix &data){
int buffer_offset = this->FindBufferOffset(data);
utils::Assert( buffer_offset >=0, "interact mode must cache training data" );
preds.resize(data.Size());
const unsigned ndata = static_cast<unsigned>(data.Size());
#pragma omp parallel for schedule( static )
for (unsigned j = 0; j < ndata; ++j){
preds[j] = mparam.base_score + base_gbm.InteractPredict(data.data, j, buffer_offset + j);
}
obj_->PredTransform( preds );
}
/*! \brief repredict trial */
inline void InteractRePredict(const DMatrix &data){
int buffer_offset = this->FindBufferOffset(data);
utils::Assert( buffer_offset >=0, "interact mode must cache training data" );
const unsigned ndata = static_cast<unsigned>(data.Size());
#pragma omp parallel for schedule( static )
for (unsigned j = 0; j < ndata; ++j){
base_gbm.InteractRePredict(data.data, j, buffer_offset + j);
}
}
/*! \brief get un-transformed prediction*/
inline void PredictRaw(std::vector<float> &preds, const DMatrix &data, int bst_group = -1 ){
int buffer_offset = this->FindBufferOffset(data);
if( bst_group < 0 ){
int ngroup = base_gbm.NumBoosterGroup();
preds.resize( data.Size() * ngroup );
for( int g = 0; g < ngroup; ++ g ){
this->PredictBuffer(&preds[ data.Size() * g ], data, buffer_offset, g );
}
}else{
preds.resize( data.Size() );
this->PredictBuffer(&preds[0], data, buffer_offset, bst_group );
}
}
/*! \brief get the un-transformed predictions, given data */
inline void PredictBuffer(float *preds, const DMatrix &data, int buffer_offset, int bst_group ){
const unsigned ndata = static_cast<unsigned>(data.Size());
if( buffer_offset >= 0 ){
#pragma omp parallel for schedule( static )
for (unsigned j = 0; j < ndata; ++j){
preds[j] = mparam.base_score + base_gbm.Predict(data.data, j, buffer_offset + j, data.info.GetRoot(j), bst_group );
}
}else
#pragma omp parallel for schedule( static )
for (unsigned j = 0; j < ndata; ++j){
preds[j] = mparam.base_score + base_gbm.Predict(data.data, j, -1, data.info.GetRoot(j), bst_group );
}{
}
}
private:
/*! \brief training parameter for regression */
struct ModelParam{
/* \brief global bias */
float base_score;
/* \brief type of loss function */
int loss_type;
/* \brief number of features */
int num_feature;
/* \brief number of class, if it is multi-class classification */
int num_class;
/*! \brief reserved field */
int reserved[15];
/*! \brief constructor */
ModelParam(void){
base_score = 0.5f;
loss_type = -1;
num_feature = 0;
num_class = 0;
memset(reserved, 0, sizeof(reserved));
}
/*!
* \brief set parameters from outside
* \param name name of the parameter
* \param val value of the parameter
*/
inline void SetParam(const char *name, const char *val){
if (!strcmp("base_score", name)) base_score = (float)atof(val);
if (!strcmp("num_class", name)) num_class = atoi(val);
if (!strcmp("loss_type", name)) loss_type = atoi(val);
if (!strcmp("bst:num_feature", name)) num_feature = atoi(val);
}
/*!
* \brief adjust base_score based on loss type and objective function
*/
inline void AdjustBase(const char *obj){
// some tweaks for loss type
if( loss_type == -1 ){
loss_type = 1;
if( !strcmp("reg:linear", obj ) ) loss_type = 0;
}
if (loss_type == 1 || loss_type == 2|| loss_type == 3){
utils::Assert(base_score > 0.0f && base_score < 1.0f, "sigmoid range constrain");
base_score = -logf(1.0f / base_score - 1.0f);
}
}
};
private:
struct CacheEntry{
const DMatrix *mat_;
int buffer_offset_;
size_t num_row_;
CacheEntry(const DMatrix *mat, int buffer_offset, size_t num_row)
:mat_(mat), buffer_offset_(buffer_offset), num_row_(num_row){}
};
/*! \brief the entries indicates that we have internal prediction cache */
std::vector<CacheEntry> cache_;
private:
// find internal bufer offset for certain matrix, if not exist, return -1
inline int FindBufferOffset(const DMatrix &mat){
for(size_t i = 0; i < cache_.size(); ++i){
if( cache_[i].mat_ == &mat && mat.cache_learner_ptr_ == this ) {
if( cache_[i].num_row_ == mat.Size() ){
return cache_[i].buffer_offset_;
}else{
fprintf( stderr, "warning: number of rows in input matrix changed as remembered in cachelist, ignore cached results\n" );
fflush( stderr );
}
}
}
return -1;
}
protected:
int silent;
EvalSet evaluator_;
booster::GBMBase base_gbm;
ModelParam mparam;
// objective fnction
IObjFunction *obj_;
// name of objective function
std::string name_obj_;
std::vector< std::pair<std::string, std::string> > cfg_;
protected:
std::vector<float> grad_, hess_, preds_;
};
}
};
#endif
|
ten_tusscher_2004_epi_S3_18.c
|
//Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_18.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4602741330493,0.00132015399693432,0.777046147920554,0.776889105046541,0.000177476660481185,0.483966421957761,0.00296299845502102,0.999998312552388,1.96859929499616e-08,1.91848968272809e-05,0.999770796899963,1.00689245224577,0.999991575225794,4.99140392840598e-05,0.555311049406003,10.3337101551850,139.222559522954};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.3939461883443,0.000178290076738646,0.000130760434446578,0.000491809587701692,0.246201963463598,0.140711180074250,0.176907559979616,4.75131058988226,0.0123758427568759,1.60482972836173,1098.92211996714,0.000445170678968932,0.364157364539899,0.0144672086648825,0.00152731655832290,4.39966220420297e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_binop__ge_int32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int32)
// A*D function (colscale): GB (_AxD__ge_int32)
// D*A function (rowscale): GB (_DxB__ge_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int32)
// C=scalar+B GB (_bind1st__ge_int32)
// C=scalar+B' GB (_bind1st_tran__ge_int32)
// C=A+scalar GB (_bind2nd__ge_int32)
// C=A'+scalar GB (_bind2nd_tran__ge_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT32 || GxB_NO_GE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nodal_two_step_v_p_strategy_for_FSI.h
|
//
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/implicit_solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include "nodal_two_step_v_p_strategy.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart,
pVelocityLinearSolver,
pPressureLinearSolver,
ReformDofSet,
VelTol,
PresTol,
MaxPressureIterations,
TimeOrder,
DomainSize)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategyForFSI() {}
bool SolveSolutionStep() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
/* boost::timer solve_step_time; */
// std::cout<<" InitializeSolutionStep().... "<<std::endl;
InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both
this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids
}
// std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl;
CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes
// std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
UpdateTopology(rModelPart, BaseType::GetEchoLevel());
// std::cout<<" ComputeNodalVolume .... "<<std::endl;
ComputeNodalVolume();
// std::cout<<" ComputeNodalVolume DONE "<<std::endl;
this->InitializeNonLinearIterations();
// std::cout<<" InitializeNonLinearIterations DONE "<<std::endl;
CalcNodalStrains();
// std::cout<<" CalcNodalStrains DONE "<<std::endl;
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
bool hybridMethod = false;
if (hybridMethod == true)
{
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 0))
{
this->UpdateElementalStressStrain();
}
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return converged;
}
void UpdateElementalStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
}
void Initialize() override
{
std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
{
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
{
rSolidSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
{
rSolidFgrad.resize(dimension, dimension, false);
}
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
{
rSolidFgradVel.resize(dimension, dimension, false);
}
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
AssignMaterialToEachNode(itNode);
}
// }
}
void AssignMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
if (itNode->Is(SOLID))
{
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
//deviatoricCoeff=deltaT*secondLame
deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
//volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3)
volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
}
else if (itNode->Is(FLUID) || itNode->Is(RIGID))
{
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
if ((itNode->Is(SOLID) && itNode->Is(RIGID)))
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true;
}
else
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false;
}
const double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
//currFirstLame=deltaT*firstLame
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
}
}
}
// }
}
void ComputeNodalVolumeAndAssignFlagToElementType()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
unsigned int fluidNodes = 0;
unsigned int solidNodes = 0;
unsigned int interfaceNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true))
{
fluidNodes += 1;
}
if (geometry(i)->Is(SOLID))
{
solidNodes += 1;
}
if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
interfaceNodes += 1;
}
}
if (solidNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (interfaceNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (fluidNodes == numNodes)
{
itElem->Set(FLUID);
// std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (solidNodes == numNodes && fluidNodes == numNodes)
{
itElem->Reset(FLUID);
// std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
// if(interfaceNodes==numNodes && solidDensity==0){
// std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl;
// double density=geometry(i)->FastGetSolutionStepValue(DENSITY);
// geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density;
// }
}
}
}
// }
}
void InitializeSolutionStep() override
{
FillNodalSFDVector();
}
void FillNodalSFDVector()
{
// std::cout << "FillNodalSFDVector(); ... " << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false)
{
this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
if (itNode->Is(SOLID))
{
SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER
}
}
else
{
SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids
}
}
// }
// std::cout << "FillNodalSFDVector(); DONE " << std::endl;
}
void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int fluidCounter = 1;
unsigned int solidCounter = 1;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
}
}
}
Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rFluidNodeOrderedNeighbours.size() != fluidCounter)
rFluidNodeOrderedNeighbours.resize(fluidCounter, false);
if (rSolidNodeOrderedNeighbours.size() != solidCounter)
rSolidNodeOrderedNeighbours.resize(solidCounter, false);
noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter);
noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter);
rFluidNodeOrderedNeighbours[0] = itNode->Id();
rSolidNodeOrderedNeighbours[0] = itNode->Id();
fluidCounter = 0;
solidCounter = 0;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id();
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id();
}
}
}
// fluidCounter+=1;
// solidCounter+=1;
// ModelPart& rModelPart = BaseType::GetModelPart();
// const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension;
// const unsigned int sizeSolidSDFNeigh=solidCounter*dimension;
// Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
// Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
// if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh)
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false);
// if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh)
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false);
// noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh);
// noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh);
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true);
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true);
// std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl;
// std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl;
}
void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
rSolidNodalStress.resize(sizeStrains, false);
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalDevStress.size() != sizeStrains)
rSolidNodalDevStress.resize(sizeStrains, false);
noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes)
rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
rSolidSpatialDefRate.resize(sizeStrains, false);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
rSolidFgrad.resize(dimension, dimension, false);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
rSolidFgradVel.resize(dimension, dimension, false);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 0.5;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
//I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid
// Matrix interfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
theta = 1.0;
// Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
theta = 1.0;
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsAndStressesForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
theta = 0.5;
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
theta = 0.5;
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// }
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode)
{
Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE);
Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size();
solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false);
Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeNodalSFDneigh = nodalSFDneigh.size();
solidNodalSFDneigh.resize(sizeNodalSFDneigh, false);
solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE);
solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS);
solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
}
void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3];
// nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4];
// nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3];
// nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4];
// nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor3D[0] = nodalSigmaTot_xx;
r_stress_tensor3D[1] = nodalSigmaTot_yy;
r_stress_tensor3D[2] = nodalSigmaTot_zz;
r_stress_tensor3D[3] = nodalSigmaTot_xy;
r_stress_tensor3D[4] = nodalSigmaTot_xz;
r_stress_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor3D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor3D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor3D[2] = nodalSigmaDev_zz;
r_dev_stress_tensor3D[3] = nodalSigmaDev_xy;
r_dev_stress_tensor3D[4] = nodalSigmaDev_xz;
r_dev_stress_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
// if(itNode->Is(SOLID)){
// std::cout<<"solid node"<<std::endl;
// }
// if(itNode->Is(FLUID)){
// std::cout<<"FLUID node"<<std::endl;
// }
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }else{
// std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_tensor3D[0] = nodalSigmaTot_xx;
r_tensor3D[1] = nodalSigmaTot_yy;
r_tensor3D[2] = nodalSigmaTot_zz;
r_tensor3D[3] = nodalSigmaTot_xy;
r_tensor3D[4] = nodalSigmaTot_xz;
r_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_tensor3D[0] = nodalSigmaDev_xx;
r_dev_tensor3D[1] = nodalSigmaDev_yy;
r_dev_tensor3D[2] = nodalSigmaDev_zz;
r_dev_tensor3D[3] = nodalSigmaDev_xy;
r_dev_tensor3D[4] = nodalSigmaDev_xz;
r_dev_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 1.0;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
//I have to compute the strains two times because one time is for the solid and the other for the fluid
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix interfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
this->CalcNodalStrainsForNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
noalias(Fgrad) = ZeroMatrix(dimension, dimension);
noalias(FgradVel) = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighNodesSize = neighb_nodes.size();
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
break;
}
}
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
break;
}
}
}
}
}
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
// std::cout<<" UpdateTopology ..."<<std::endl;
/* this->CalculateDisplacements(); */
CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
// std::cout<<" UpdateTopology DONE"<<std::endl;
KRATOS_CATCH("");
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
// if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){
Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size();
// unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh);
Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
// }
}
// }
}
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategyForFSI";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategyForFSI";
}
// /// Print object's data.
// void PrintData(std::ostream& rOStream) const override
// {
// }
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategyForFSI
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
swapit.h
|
#ifndef _SWAPIT_H_
#define _SWAPIT_H_
#include <sys/types.h> // u_int32_t, etc
#include <unistd.h> // read()
#include <stdexcept> // std::runtime_error
#include <cstring> // memcpy()
#include <vector>
#include <cassert>
namespace p_bin {
// NO NUMTHREADS!!!
inline void swap16(void* buf, ssize_t numWords){
const u_int16_t lobyte=(u_int16_t)0x00ff;
const u_int16_t hibyte=(u_int16_t)0xff00;
const u_int16_t bitsperbyte=(u_int16_t)8;
register ssize_t i;
register u_int16_t* src = reinterpret_cast<u_int16_t*>(buf);
#pragma omp parallel for
for(i = 0 ; i < numWords ; ++i ){
src[i] = (u_int16_t)
((u_int16_t)((src[i] & lobyte)<<bitsperbyte) |
(u_int16_t)((src[i] & hibyte)>>bitsperbyte));
}
}
// NO NUMTHREADS!!!
inline void swap32(void* buf, ssize_t numWords){
register ssize_t i;
register u_int32_t* src = reinterpret_cast<u_int32_t*>(buf);
#pragma omp parallel for
for(i = 0 ; i < numWords ; ++i ){
src[i] = ((src[i] & 0x000000ff) << 24) |
((src[i] & 0x0000ff00) << 8) |
((src[i] & 0x00ff0000) >> 8) |
((src[i] & 0xff000000) >> 24);
}
}
inline void swap(std::vector<u_int32_t>& buf){
swap32(&buf[0],buf.size());
}
inline void swap(std::vector<int32_t>& buf){
swap32((u_int32_t*)&buf[0],buf.size());
}
inline void swap(std::vector<float>& buf){
swap32((u_int32_t*)&buf[0],buf.size());
}
// NO NUMTHREADS!!!
inline void swap64(void* buf, ssize_t numWords){
register ssize_t i;
register u_int64_t* src = reinterpret_cast<u_int64_t*>(buf);
#pragma omp parallel for
for(i = 0 ; i < numWords ; ++i ){
src[i] = ((u_int64_t)((u_int64_t)src[i] & 0x00000000000000ffULL)<<56ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x000000000000ff00ULL)<<40ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x0000000000ff0000ULL)<<24ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x00000000ff000000ULL)<< 8ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x000000ff00000000ULL)>> 8ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x0000ff0000000000ULL)>>24ULL | \
(u_int64_t)((u_int64_t)src[i] & 0x00ff000000000000ULL)>>40ULL | \
(u_int64_t)((u_int64_t)src[i] & 0xff00000000000000ULL)>>56ULL );
}
}
inline void swap(std::vector<u_int64_t>& buf){
swap64((u_int64_t*)&buf[0],buf.size());
}
inline void swap(std::vector<int64_t>& buf){
swap64((u_int64_t*)&buf[0],buf.size());
}
inline void swap(std::vector<double>& buf){
swap64((u_int64_t*)&buf[0],buf.size());
}
inline double swap(double val){
u_int64_t bits;
memcpy(&bits,&val,sizeof(double));
p_bin::swap64(&bits,1);
memcpy(&val,&bits,sizeof(double));
return val;
}
inline float swap(float val){
u_int32_t bits;
memcpy(&bits,&val,sizeof(float));
p_bin::swap32(&bits,1);
memcpy(&val,&bits,sizeof(float));
return val;
}
inline u_int64_t swap(u_int64_t val){
u_int64_t bits;
memcpy(&bits,&val,sizeof(u_int64_t));
p_bin::swap64(&bits,1);
memcpy(&val,&bits,sizeof(u_int64_t));
return val;
}
inline u_int64_t swap(int64_t val){
u_int64_t bits;
memcpy(&bits,&val,sizeof(u_int64_t));
p_bin::swap64(&bits,1);
memcpy(&val,&bits,sizeof(u_int64_t));
return val;
}
inline u_int32_t swap(u_int32_t val){
u_int32_t bits;
memcpy(&bits,&val,sizeof(u_int32_t));
p_bin::swap32(&bits,1);
memcpy(&val,&bits,sizeof(u_int32_t));
return val;
}
inline u_int32_t swap(int32_t val){
u_int32_t bits;
memcpy(&bits,&val,sizeof(u_int32_t));
p_bin::swap32(&bits,1);
memcpy(&val,&bits,sizeof(u_int32_t));
return val;
}
inline u_int16_t swap(u_int16_t val){
u_int16_t bits;
memcpy(&bits,&val,sizeof(u_int16_t));
p_bin::swap16(&bits,1);
memcpy(&val,&bits,sizeof(u_int16_t));
return val;
}
inline u_int16_t swap(int16_t val){
u_int16_t bits;
memcpy(&bits,&val,sizeof(u_int16_t));
p_bin::swap16(&bits,1);
memcpy(&val,&bits,sizeof(u_int16_t));
return val;
}
inline u_int32_t getbareword(int fin, bool swap=false)
{
u_int32_t word;
ssize_t res=read(fin,&word,sizeof(u_int32_t));
if( res<=0 ) throw std::logic_error("getbareword() failed");
if( swap ) p_bin::swap32(&word, 1);
return word;
}
inline u_int32_t getrecord(int fin, bool swap=false)
{
return getbareword(fin,swap);
}
inline u_int32_t getword(int fin, bool swap=false)
{
u_int32_t recbgn=getrecord(fin,swap);
size_t words=recbgn/sizeof(u_int32_t);
if( words != 1 ) throw std::runtime_error("getword(): [rec>4] => larger than one word to read");
u_int32_t res = getbareword(fin,swap);
u_int32_t recend=getrecord(fin,swap);
if( recbgn != recend ) throw std::runtime_error("getword(): rec bgn/end mismatch");
return res;
}
template <typename T> inline void getbare(int fin, T* buf,
size_t bytes, bool swap=false)
{
const size_t MAXBYTE=2147483648L;
size_t bytesRead=0,bytesLeft=bytes;
unsigned char* dst=reinterpret_cast<unsigned char*>(buf);
while( bytesLeft ){
size_t req = bytesLeft>MAXBYTE ? MAXBYTE : bytesLeft;
ssize_t res = read(fin,&dst[bytesRead],req);
if( res <= 0) throw std::runtime_error("getbare() failed");
bytesLeft -= res;
bytesRead += res;
}
if( swap ) {
size_t N=bytes/sizeof(T);
for(size_t i=0;i<N;++i)
buf[i]=p_bin::swap(buf[i]);
}
}
};
#endif
|
GB_binop__land_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int64
// A.*B function (eWiseMult): GB_AemultB__land_int64
// A*D function (colscale): GB_AxD__land_int64
// D*A function (rowscale): GB_DxB__land_int64
// C+=B function (dense accum): GB_Cdense_accumB__land_int64
// C+=b function (dense accum): GB_Cdense_accumb__land_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int64
// C=scalar+B GB_bind1st__land_int64
// C=scalar+B' GB_bind1st_tran__land_int64
// C=A+scalar GB_bind2nd__land_int64
// C=A'+scalar GB_bind2nd_tran__land_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__round_fc32_fc32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fc32_fc32)
// op(A') function: GB (_unop_tran__round_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_croundf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_croundf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_croundf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
own_warp.c
|
/*******************************************************************************
* Copyright (c) 2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////
//
// scikit-ipp's own functions for image warp transformations, that uses
// Intel(R) Integrated Performance Primitives (Intel(R) IPP).
//
////////////////////////////////////////////////////////////////////////////////////////
#include "own_warp.h"
#define EXIT_FUNC exitLine: /* Label for Exit */
#define check_sts(st) if((st) != ippStsNoErr) goto exitLine
////////////////////////////////////////////////////////////////////////////////////////
//
// own_Warp
//
// own_Warp uses Intel(R) IPP funcstions for implementing image warp
// transformations
//
// TODO: complete the description.
//
////////////////////////////////////////////////////////////////////////////////////////
IppStatus
own_Warp(
IppDataType ippDataType,
void * pSrc,
void * pDst,
int img_width,
int img_height,
int dst_width,
int dst_height,
int numChannels,
double * coeffs,
IppiInterpolationType interpolation,
IppiWarpDirection direction,
IppiBorderType ippBorderType,
double ippBorderValue)
{
IppStatus status = ippStsNoErr;
IppiWarpSpec* pSpec = NULL; // Pointer to the specification
// structure
Ipp8u* pInitBuf = NULL;
// ``scikit-image`` uses Catmull-Rom spline (0.0, 0.5)
// Catmull-Rom spline (0.0, 0.5)
// Don P. Mitchell, Arun N. Netravali. Reconstruction Filters in Computer Graphics.
// Computer Graphics, Volume 22, Number 4, AT&T Bell Laboratories, Murray Hill,
// New Jersey, August 1988.
Ipp64f valueB = 0.0;
Ipp64f valueC = 0.5;
Ipp8u * pBuffer = NULL;
Ipp64f pBorderValue[4];
IppiSize srcSize = { img_width, img_height }; // Size of source image
IppiSize dstSize = { dst_width, dst_height }; // size of destination images
int srcStep, dstStep; // Steps, in bytes, through the
// source/destination images
int specSize = 0, initSize = 0, bufSize = 0; // Work buffer size
IppiPoint dstOffset = { 0, 0 };
#ifdef USE_OPENMP
IppStatus * pStatus = NULL;
int numThreads, slice, tail;
int bufSize1, bufSize2;
IppiSize dstTileSize, dstLastTileSize;
int max_num_threads;
#ifdef MAX_NUM_THREADS
max_num_threads = MAX_NUM_THREADS;
#else
max_num_threads = omp_get_max_threads();
if(dstSize.height / max_num_threads < 2)
{
max_num_threads = 1;
}
#endif
#endif
// checking supported dtypes
if (!(ippDataType==ipp8u ||
ippDataType==ipp16u ||
ippDataType==ipp16s ||
ippDataType==ipp32f))
{
status = ippStsDataTypeErr;
check_sts(status);
}
int sizeof_src;
status = get_sizeof(ippDataType, &sizeof_src);
check_sts(status);
srcStep = numChannels * img_width * sizeof_src;
dstStep = numChannels * dst_width * sizeof_src;;
pBorderValue[0] = (Ipp64f)ippBorderValue;
pBorderValue[1] = (Ipp64f)ippBorderValue;
pBorderValue[2] = (Ipp64f)ippBorderValue;
pBorderValue[3] = (Ipp64f)ippBorderValue;
// Spec and init buffer sizes
status = ippiWarpAffineGetSize(srcSize, dstSize, ippDataType,
(double(*)[3])coeffs, interpolation, direction,
ippBorderType, &specSize, &initSize);
check_sts(status);
pInitBuf = ippsMalloc_8u(initSize);
if (pInitBuf == NULL)
{
status = ippStsNoMemErr;
check_sts(status);
}
pSpec = (IppiWarpSpec*)ippsMalloc_8u(specSize);
if (pSpec == NULL)
{
status = ippStsMemAllocErr;
check_sts(status);
}
// Filter initialization
switch (interpolation)
{
case ippCubic:
{
status = ippiWarpAffineCubicInit(srcSize, dstSize, ippDataType,
(double(*)[3])coeffs, direction, numChannels,
valueB, valueC, ippBorderType, pBorderValue,
0, pSpec, pInitBuf);
break;
}
case ippNearest:
{
status = ippiWarpAffineNearestInit(srcSize, dstSize, ippDataType,
(double(*)[3])coeffs, direction,
numChannels, ippBorderType,
pBorderValue, 0, pSpec);
break;
}
case ippLinear:
{
status = ippiWarpAffineLinearInit(srcSize, dstSize, ippDataType,
(double(*)[3])coeffs, direction,
numChannels, ippBorderType, pBorderValue,
0, pSpec);
break;
}
default:
{
status = ippStsErr;
}
}
check_sts(status);
#ifdef USE_OPENMP
if (max_num_threads != 1)
{
// General transform function
// Parallelized only by Y-direction here
#pragma omp parallel num_threads(max_num_threads)
{
#pragma omp master
{
numThreads = omp_get_num_threads();
pStatus = (IppStatus*)ippsMalloc_8u(sizeof(IppStatus) * numThreads);
if (pStatus == NULL)
{
status = ippStsMemAllocErr;
}
if(status == ippStsNoErr)
{
for (int i = 0; i < max_num_threads; ++i) pStatus[i] = ippStsNoErr;
// ippSetNumThreads(numThreads);
slice = dstSize.height / numThreads;
tail = dstSize.height % numThreads;
dstTileSize.width = dstSize.width;
dstTileSize.height = slice;
dstLastTileSize.width = dstSize.width;
dstLastTileSize.height = slice + tail;
status = ippiWarpGetBufferSize(pSpec, dstTileSize, &bufSize1);
if (status == ippStsNoErr) status = ippiWarpGetBufferSize(pSpec, dstLastTileSize, &bufSize2);
if (status == ippStsNoErr)
{
pBuffer = ippsMalloc_8u(bufSize1 * (numThreads - 1) + bufSize2);
if (pBuffer == NULL) status = ippStsMemAllocErr;
}
}
}
#pragma omp barrier
{
if (pBuffer)
{
// ippSetNumThreads(1);
Ipp32u i;
void * pDstT = NULL;
Ipp8u * pOneBuf = NULL;
i = omp_get_thread_num();
IppiPoint dstOffset = { 0, 0 };
IppiSize srcSizeT = srcSize;
IppiSize dstSizeT = dstTileSize;
dstSizeT.height = slice;
dstOffset.y += i * slice;
if (i == numThreads - 1) dstSizeT = dstLastTileSize;
pDstT = (void*)((Ipp8u*)pDst + dstOffset.y * (Ipp32s)dstStep);
if(status == ippStsNoErr)
{
pOneBuf = pBuffer + i * bufSize1;
pStatus[i] = _ippiWarpAffine_interpolation(ippDataType, interpolation,
numChannels, pSrc, srcStep, pDstT, dstStep, dstOffset, dstSizeT,
pSpec, pOneBuf);
}
}
}
}
// checking status for pBuffer allocation
// and ippDataType checking in switch case
// for getting pDstT
check_sts(status);
// Checking status for tiles
for (Ipp32u i = 0; i < numThreads; ++i)
{
status = pStatus[i];
check_sts(status);
}
}
else
{
#endif
status = ippiWarpGetBufferSize(pSpec, dstSize, &bufSize);
check_sts(status);
pBuffer = ippsMalloc_8u(bufSize);
if (pBuffer == NULL)
{
status = ippStsMemAllocErr;
check_sts(status);
}
status = _ippiWarpAffine_interpolation(ippDataType, interpolation, numChannels,
pSrc, srcStep, pDst, dstStep, dstOffset, dstSize, pSpec, pBuffer);
#ifdef USE_OPENMP
}
#endif
EXIT_FUNC
ippsFree(pInitBuf);
ippsFree(pSpec);
ippsFree(pBuffer);
#ifdef USE_OPENMP
ippFree(pStatus);
#endif
return status;
}
|
tinyexr.h
|
/*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
//#include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <[email protected]>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks [email protected]) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From [email protected] and [email protected] - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <[email protected]> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// [email protected], Jyrki Katajainen, [email protected], November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
}
return ret;
}
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Failed to parse EXR version", err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
nvector_openmp.c
|
/*
* -----------------------------------------------------------------
* $Revision: 4272 $
* $Date: 2014-12-02 11:19:41 -0800 (Tue, 02 Dec 2014) $
* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* LLNS Copyright Start
* Copyright (c) 2014, Lawrence Livermore National Security
* This work was performed under the auspices of the U.S. Department
* of Energy by Lawrence Livermore National Laboratory in part under
* Contract W-7405-Eng-48 and in part under Contract DE-AC52-07NA27344.
* Produced at the Lawrence Livermore National Laboratory.
* All rights reserved.
* For details, see the LICENSE file.
* LLNS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private function prototypes */
/* z=x */
static void VCopy_OpenMP(N_Vector x, N_Vector z);
/* z=x+y */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z);
/* z=x-y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z);
/* z=-x */
static void VNeg_OpenMP(N_Vector x, N_Vector z);
/* z=c(x+y) */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=c(x-y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=ax+y */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* z=ax-y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* y <- ax+y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y);
/* x <- ax */
static void VScaleBy_OpenMP(realtype a, N_Vector x);
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(long int length, int num_threads)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvclone = N_VClone_OpenMP;
ops->nvcloneempty = N_VCloneEmpty_OpenMP;
ops->nvdestroy = N_VDestroy_OpenMP;
ops->nvspace = N_VSpace_OpenMP;
ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
ops->nvlinearsum = N_VLinearSum_OpenMP;
ops->nvconst = N_VConst_OpenMP;
ops->nvprod = N_VProd_OpenMP;
ops->nvdiv = N_VDiv_OpenMP;
ops->nvscale = N_VScale_OpenMP;
ops->nvabs = N_VAbs_OpenMP;
ops->nvinv = N_VInv_OpenMP;
ops->nvaddconst = N_VAddConst_OpenMP;
ops->nvdotprod = N_VDotProd_OpenMP;
ops->nvmaxnorm = N_VMaxNorm_OpenMP;
ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
ops->nvmin = N_VMin_OpenMP;
ops->nvwl2norm = N_VWL2Norm_OpenMP;
ops->nvl1norm = N_VL1Norm_OpenMP;
ops->nvcompare = N_VCompare_OpenMP;
ops->nvinvtest = N_VInvTest_OpenMP;
ops->nvconstrmask = N_VConstrMask_OpenMP;
ops->nvminquotient = N_VMinQuotient_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = length;
content->num_threads = num_threads;
content->own_data = FALSE;
content->data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(long int length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = TRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(long int length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = FALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to print a vector
*/
void N_VPrint_OpenMP(N_Vector x)
{
long int i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%11.8g\n", xd[i]);
#else
printf("%11.8g\n", xd[i]);
#endif
}
printf("\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvclone = w->ops->nvclone;
ops->nvcloneempty = w->ops->nvcloneempty;
ops->nvdestroy = w->ops->nvdestroy;
ops->nvspace = w->ops->nvspace;
ops->nvgetarraypointer = w->ops->nvgetarraypointer;
ops->nvsetarraypointer = w->ops->nvsetarraypointer;
ops->nvlinearsum = w->ops->nvlinearsum;
ops->nvconst = w->ops->nvconst;
ops->nvprod = w->ops->nvprod;
ops->nvdiv = w->ops->nvdiv;
ops->nvscale = w->ops->nvscale;
ops->nvabs = w->ops->nvabs;
ops->nvinv = w->ops->nvinv;
ops->nvaddconst = w->ops->nvaddconst;
ops->nvdotprod = w->ops->nvdotprod;
ops->nvmaxnorm = w->ops->nvmaxnorm;
ops->nvwrmsnormmask = w->ops->nvwrmsnormmask;
ops->nvwrmsnorm = w->ops->nvwrmsnorm;
ops->nvmin = w->ops->nvmin;
ops->nvwl2norm = w->ops->nvwl2norm;
ops->nvl1norm = w->ops->nvl1norm;
ops->nvcompare = w->ops->nvcompare;
ops->nvinvtest = w->ops->nvinvtest;
ops->nvconstrmask = w->ops->nvconstrmask;
ops->nvminquotient = w->ops->nvminquotient;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = FALSE;
content->data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
long int length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = TRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (NV_OWN_DATA_OMP(v) == TRUE) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content); v->content = NULL;
free(v->ops); v->ops = NULL;
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, long int *lrw, long int *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
long int i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
long int i, N;
realtype *zd;
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
long int i, N;
realtype sum, *xd, *yd;
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
long int i, N;
realtype tmax, max, *xd;
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
long int i, N;
realtype sum, *xd, *wd;
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum/N));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
long int i, N;
realtype sum, *xd, *wd, *idd;
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(SUNRsqrt(sum / N));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
long int i, N;
realtype min, *xd;
realtype tmin;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
long int i, N;
realtype sum, *xd, *wd;
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
long int i, N;
realtype sum, *xd;
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd, val;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (FALSE);
else
return (TRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
long int i, N;
realtype temp;
realtype *cd, *xd, *md;
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ONE;
#pragma omp parallel for default(none) private(i) shared(N,xd,cd,md,temp) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
if (cd[i] == ZERO) continue;
if (cd[i] > ONEPT5 || cd[i] < -ONEPT5) {
if ( xd[i]*cd[i] <= ZERO) { temp = ZERO; md[i] = ONE; }
continue;
}
if ( cd[i] > HALF || cd[i] < -HALF) {
if (xd[i]*cd[i] < ZERO ) { temp = ZERO; md[i] = ONE; }
}
}
if (temp == ONE) return (TRUE);
else return(FALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
long int i, N;
realtype *nd, *dd, min, tmin, val;
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
long int i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
long int i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
long int i, N;
realtype *xd, *yd;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
long int i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
|
69c979fee_ac_so4.c
|
#define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw);
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int yb_size = block_sizes[1];
int x0_blk0_size = block_sizes[2];
int y0_blk0_size = block_sizes[3];
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
//for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3))
//{
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1)
{
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1)
{
for (int time = t_blk, t0 = (time + 2) % (3), t1 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
}
for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3))
{
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x0_blk0_size == 0 || y0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(damp, u, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r8 = 1.0/dt;
float r7 = 1.0/(dt*dt);
float r6 = 1.0/(vp[x - time + 4][y - time + 4][z + 4]*vp[x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = (r6*(-r7*(u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F*u[t1][x - time + 4][y - time + 4][z + 4])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F*(u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F*(u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F*u[t1][x - time + 4][y - time + 4][z + 4])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]);
}
#pragma omp simd aligned(damp, u, vp : 32)
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
u[t2][x - time + 4][y - time + 4][zind + 4] += r0;
}
}
}
}
}
}
}
|
bp.c
|
#include<stdio.h>
#include "gdal.h"
#include<omp.h>
#define NODATA 28768
void usage()
{
printf( "-----------------------------------------\n");
printf( "--Modis Processing chain--Serial code----\n");
printf( "-----------------------------------------\n");
printf( "./ndvi inNDVI inET inPET\n");
printf( "\toutNDVI\n");
printf( "\tDOY Tsw\n");
printf( "-----------------------------------------\n");
printf( "inNDVI\t\tModis MOD13Q1 NDVI 250m\n");
printf( "inET\t\tModis MOD16A2 ET 500m\n");
printf( "inPET\t\tModis MOD16A2 PET 500m\n");
printf( "outNDVI\tQA corrected NDVI output [-]\n");
printf( "DOY\tDay of Year\n");
printf( "Tsw\tTransmissivity single-way [-]\n");
return;
}
double biomass(double fpar, double solar_day, double evap_fr, double light_use_ef);
double solar_day(double lat, double doy, double tsw);
int main( int argc, char *argv[] )
{
if( argc < 6 ) {
usage();
return 1;
}
char *inB3 = argv[1]; //NDVI_QA 250m
char *inB4 = argv[2]; // ET 500m
char *inB5 = argv[3]; // PET 500m
char *bpF = argv[4];
int doy = atoi(argv[5]); // DOY for solar day
float tsw = atof(argv[6]); // TSW for solar day
GDALAllRegister();
GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//NDVI 250m
GDALDatasetH hD4 = GDALOpen(inB4,GA_ReadOnly);//ET 500m
GDALDatasetH hD5 = GDALOpen(inB5,GA_ReadOnly);//PET 500m
if(hD3==NULL||hD4==NULL||hD5==NULL){
printf("One or more input files ");
printf("could not be loaded\n");
exit(1);
}
//LOAD 250 m bands and create output
GDALDriverH hDr3 = GDALGetDatasetDriver(hD3);
char **options = NULL;
options = CSLSetNameValue( options, "TILED", "YES" );
options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" );
options = CSLSetNameValue( options, "PREDICTOR", "2" );
GDALDatasetH hDOut = GDALCreateCopy(hDr3,bpF,hD3,FALSE,options,NULL,NULL);
GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1);
GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//NDVI 250m
GDALRasterBandH hB4 = GDALGetRasterBand(hD4,1);//ET 500m
GDALRasterBandH hB5 = GDALGetRasterBand(hD5,1);//PET 500m
double geomx[6]={0.0};
if(GDALGetGeoTransform(hD3,geomx)==CE_None){
/* Do Nothing */
//printf( "Origin (ULx,ULy) = (%.6f,%.6f)\n", geomx[0], geomx[3] );
//printf( "Pixel Size = (%.6f,%.6f)\n", geomx[1], geomx[5] );
//printf( "Rot0 = (%.6f,%.6f)\n", geomx[2], geomx[4] );
} else {
printf("ERROR: Projection acquisition problem from Band1\n");
exit(1);
}
int nX = GDALGetRasterBandXSize(hB3);//From NDVI@250m
int nY = GDALGetRasterBandYSize(hB3);//From NDVI@250m
int16_t *l3 = (int16_t *) malloc(sizeof(int16_t)*nX);
int16_t *l4 = (int16_t *) malloc(sizeof(int16_t)*nX/2);
int16_t *l5 = (int16_t *) malloc(sizeof(int16_t)*nX/2);
unsigned int *lOut = (unsigned int *) malloc(sizeof(unsigned int) *nX);
int row,col;
int minimum=100000.0;
int maximum = 0.0;
for(row=0;row<nY;row++){
GDALRasterIO(hB3,GF_Read,0,row,nX,1,l3,nX,1,GDT_Int16,0,0);
GDALRasterIO(hB4,GF_Read,0,row/2,nX/2,1,l4,nX/2,1,GDT_Int16,0,0);
GDALRasterIO(hB5,GF_Read,0,row/2,nX/2,1,l5,nX/2,1,GDT_Int16,0,0);
#pragma omp parallel for default(none) \
private (col) \
shared (row,geomx,doy,tsw,nX,nY,l3,l4,l5,lOut,minimum,maximum)
for(col=0;col<nX;col++){
if(l4[col/2]<32000||l5[col/2]<32000){
double lat=geomx[3]+geomx[4]*col+geomx[5]*row;
double solar = solar_day(lat, doy, tsw );
if(solar<0.0) solar=0.0;
double evapfr = l4[col/2]/(1.0*l5[col/2]);
if(evapfr<0.0) evapfr=0.0;
double fpar=0.0;
fpar = 1.257*(l3[col]/10000.0)-0.161;
if(fpar<0.0) fpar=0.0;
lOut[col]=(unsigned int) 10000.0*biomass(fpar,solar,evapfr,1.0);
if(lOut[col]<minimum)minimum=lOut[col];
if(lOut[col]>maximum)maximum=lOut[col];
}else{
lOut[col] = NODATA;
}
}
#pragma omp barrier
GDALRasterIO(hBOut,GF_Write,0,row,nX,1,lOut,nX,1,GDT_UInt32,0,0);
}
printf("min/max: %f %f [kg/ha/day]\n",minimum/10000.0,maximum/10000.0);
GDALSetRasterNoDataValue(hBOut,NODATA);
GDALSetRasterUnitType(hBOut,"Biomass [kg/ha/day]");
GDALSetDescription(hBOut,"250m 8 Days mean Biomass");
GDALSetMetadataItem(hDOut,"*","",NULL);
if(l3 != NULL) free(l3);
if(l4 != NULL) free(l4);
if(l5 != NULL) free(l5);
GDALClose(hD3);
GDALClose(hD4);
GDALClose(hD5);
GDALClose(hDOut);
return(EXIT_SUCCESS);
}
|
valid.mob2.src.h
|
#pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_64_56_56_64_3_3.h"
#include "gen_ukr_A4B2gemm_1_64_56_56_64_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 56;
int Ny = 56;
int Nh = 3;
long long Astrides[6] = {0,2,4,6,8,10};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int xy5=0;xy5<3136+0;xy5+=3136)
{
for(int f5=0;f5<64+0;f5+=64)
{
for(int c5=0;c5<64+0;c5+=64)
{
for(int c4=c5;c4<min(64, 64+c5);c4+=32)
{
for(int f4=f5;f4<min(64, 64+f5);f4+=Tf2)
{
for(int xy4=xy5;xy4<min(3136, 3136+xy5);xy4+=3136)
{
for(int c3=c4;c3<min(64, 32+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(64, Tf2+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(3136, 3136+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(3136, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(64, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(3136, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(64, 16+f2);f1+=16)
{
int ctile=min(Tc1, 64-c1);
int x1=xy1/56;
int y1=xy1%56/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*831744+c1_1*12996+2*x1*114+2*y1*1+c1_2*1;
int offsetB=0+kf1_1*9216+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*200704+of1_1*3136+x1*56+y1*1+of1_2*1;
if(56-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(56*56-xy1>=6){
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]+=116;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]-=116;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
}
|
mxnet_op.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return omp_get_max_threads();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
static_cast<size_t>(N), static_cast<size_t>(omp_threads))) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
/*!
* \brief Wrap Kernel<OP, xpu>::Launch* with some special-case helpers
*/
template<typename OP, typename xpu>
struct KernelWrapper {
/*!
* \brief Launch 'mshadow_op-type' op (i.e. DType (*)( ... ) { return <operation> }
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream object pointer (unused)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename DType, typename ...Args>
MSHADOW_CINLINE static void LaunchMShadowOpEx(mshadow::Stream<xpu> *s,
const int N,
DType *dest,
Args... args) {
mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned<
typename OP::Operation, DType>(s, N, dest, args...);
}
/*!
* \brief Launch 'mxnet_op-type' op (i.e. void (*)(int N, DType *out, ... )
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream object pointer (unused)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename DType, typename ...Args>
MSHADOW_CINLINE static void LaunchMXNetOpEx(mshadow::Stream<xpu> *s,
const int N,
DType *dest,
Args... args) {
mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned<OP, DType>(s, N, dest, args...);
}
};
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
_MXNET_TUNABLE_MXNET_OP_FWD(set_zero); // _ prefix denotes "already in mxnet_op namespace"
_MXNET_TUNABLE_MXNET_OP_FWD(set_one);
} // namespace mxnet_op
/*!
* \brief Tuning specializations for the simple ops in <mshadow/base.h>
* Basically, this overrides mxnet::op::mxnet_op::Kernel<OP, cpu>::Launch() and
* redirects to mxnet::op::mxnet_op::KernelWrapper<OP, cpu>::Launch????OpEx(),
* which eventually leads back to mxnet::op::mxnet_op::Kernel<OP, cpu>::LaunchTuned()
*/
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::identity)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::plus)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::minus)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::mul)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::div)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::right)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
progress.c
|
/*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdint.h>
#include <omp.h>
#define GANG_SCHEDULING
#define MEASURE_BARRIER
#define PERIOD 2500000000UL
#define ITERATIONS 10
#define STACK_SIZE (64 * 1024)
static inline uint64_t rdtsc(void)
{
uint64_t eax, edx;
__asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
return (edx << 32) | eax;
}
int main(int argc, char *argv[])
{
int nthreads;
if(argc == 2) {
nthreads = atoi(argv[1]);
backend_span_domain(14, STACK_SIZE);
bomp_custom_init(NULL);
omp_set_num_threads(nthreads);
} else {
assert(!"Specify number of threads");
}
volatile uint64_t workcnt[32] = { 0 };
uint64_t last = rdtsc();
#ifndef CPU_BOUND
volatile uint64_t exittime[ITERATIONS] = { 0 };
#endif
for(int iter = 0;; iter = (iter + 1) % ITERATIONS) {
#ifdef CPU_BOUND
volatile bool exitnow = false;
#else
#ifdef MEASURE_BARRIER
# define MAXTHREADS 16
# define WORKMAX 5000000
static uint64_t starta[MAXTHREADS][WORKMAX];
#endif
#endif
#ifdef GANG_SCHEDULING
#pragma omp parallel
{
bomp_synchronize();
}
#endif
// Do some work
#pragma omp parallel
for(uint64_t i = 0;; i++) {
#ifndef CPU_BOUND
# ifdef MEASURE_BARRIER
uint64_t lasta = rdtsc();
# endif
# pragma omp barrier
# ifdef MEASURE_BARRIER
if(i < WORKMAX) {
starta[omp_get_thread_num()][i] = rdtsc() - lasta;
}
# endif
#endif
workcnt[omp_get_thread_num()]++;
#pragma omp master
if(rdtsc() >= last + PERIOD) {
printf("%lu: threads %d (%s), progress ", rdtsc(), nthreads, "static");
for(int n = 0; n < 32; n++) {
printf("%lu ", workcnt[n]);
}
printf("\n");
last += PERIOD;
#ifndef CPU_BOUND
if(exittime[iter] == 0) {
exittime[iter] = i + 3;
exittime[(iter + ITERATIONS - 2) % ITERATIONS] = 0;
}
}
if(exittime[iter] != 0 && exittime[iter] == i) {
break;
}
#else
exitnow = true;
}
if(exitnow) {
break;
}
#endif
}
#ifndef CPU_BOUND
static uint64_t hgram[15] = { 0 };
printf("exittime = %lu\n", exittime[iter]);
assert(exittime[iter] <= WORKMAX);
uint64_t endtime = exittime[iter] < WORKMAX ? exittime[iter] : WORKMAX;
for(int i = 0; i < endtime; i++) {
for(int n = 0; n < nthreads; n++) {
uint64_t val = starta[n][i];
for(int j = 0; j < 15; j++) {
val /= 10;
if(val == 0) {
hgram[j]++;
break;
}
}
}
}
uint64_t val = 1;
for(int i = 0; i < 15; i++) {
val *= 10;
printf("%lu\t%lu\n", val, hgram[i]);
}
#endif
}
}
|
z_solve.c
|
//-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the z-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the z-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void z_solve()
{
int i, j, k, k1, k2, m;
double ru1, fac1, fac2;
//---------------------------------------------------------------------
// Prepare for z-solve, array redistribution
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \
ru1,fac1,fac2)
for (j = 1; j <= ny2; j++) {
lhsinitj(nz2+1, nx2);
//---------------------------------------------------------------------
// Computes the left hand side for the three z-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
for (k = 0; k <= nz2+1; k++) {
ru1 = c3c4*rho_i[k][j][i];
cv[k] = ws[k][j][i];
rhos[k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1));
}
for (k = 1; k <= nz2; k++) {
lhs[k][i][0] = 0.0;
lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k];
lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[k][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
k = 1;
lhs[k][i][2] = lhs[k][i][2] + comz5;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
k = 2;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
for (k = 3; k <= nz2-2; k++) {
for (i = 1; i <= nx2; i++) {
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
}
for (i = 1; i <= nx2; i++) {
k = nz2-1;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
k = nz2;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c)
//---------------------------------------------------------------------
for (k = 1; k <= nz2; k++) {
for (i = 1; i <= nx2; i++) {
lhsp[k][i][0] = lhs[k][i][0];
lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k-1][j][i];
lhsp[k][i][2] = lhs[k][i][2];
lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k+1][j][i];
lhsp[k][i][4] = lhs[k][i][4];
lhsm[k][i][0] = lhs[k][i][0];
lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k-1][j][i];
lhsm[k][i][2] = lhs[k][i][2];
lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k+1][j][i];
lhsm[k][i][4] = lhs[k][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0]*lhs[k][i][3];
lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0]*rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[k1][i][2];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = fac2*rhs[k1][j][i][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0]*lhsp[k][i][3];
lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0]*lhsp[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0]*lhsm[k][i][3];
lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0]*lhsm[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0]*rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately (some of this is overkill
// if this is the last cell)
//---------------------------------------------------------------------
rhs[k1][j][i][3] = rhs[k1][j][i][3]/lhsp[k1][i][2];
rhs[k1][j][i][4] = rhs[k1][j][i][4]/lhsm[k1][i][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4];
}
//---------------------------------------------------------------------
// Whether or not this is the last processor, we always have
// to complete the back-substitution
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] -
lhs[k][i][3]*rhs[k1][j][i][m] -
lhs[k][i][4]*rhs[k2][j][i][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] -
lhsp[k][i][3]*rhs[k1][j][i][3] -
lhsp[k][i][4]*rhs[k2][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] -
lhsm[k][i][3]*rhs[k1][j][i][4] -
lhsm[k][i][4]*rhs[k2][j][i][4];
}
}
}
tzetar();
}
|
scan.c
|
#include<math.h>
#include<string.h>
#define N 16
int main(){
int r,b,v;
int a[N], simd_scan[N], scan_a, scan_b;
for(int i = 0; i < N; i++){
a[i] = i;
simd_scan[i] = 0;
}
scan_a = 0;
scan_b = 10;
#pragma omp simd reduction(inscan, +:scan_a)
for(int i = 0; i < N; i++){
simd_scan[i] = scan_a;
#pragma omp scan exclusive(r,b,v)
scan_a += a[i];
scan_b -= a[i];
}
return 0;
}
|
lca_comms.h
|
/*
//@HEADER
// *****************************************************************************
//
// XtraPuLP: Xtreme-Scale Graph Partitioning using Label Propagation
// Copyright (2016) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact George M. Slota ([email protected])
// Siva Rajamanickam ([email protected])
// Kamesh Madduri ([email protected])
//
// *****************************************************************************
//@HEADER
*/
#ifndef _LCA_COMMS_H_
#define _LCA_COMMS_H_
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include "comms.h"
#include "bicc_dist.h"
#include "util.h"
extern int procid, nprocs;
extern bool verbose, debug, verify;
#define MAX_SEND_SIZE 2147483648
#define LCA_THREAD_QUEUE_SIZE 6144
struct lca_thread_data_t {
int32_t tid;
uint64_t* thread_queue;
uint64_t* thread_finish;
uint64_t thread_queue_size;
uint64_t thread_finish_size;
};
struct lca_queue_data_t {
uint64_t* queue;
uint64_t* queue_next;
uint64_t* finish;
uint64_t queue_size;
uint64_t next_size;
uint64_t finish_size;
uint64_t queue_length;
};
inline void init_queue_lca(dist_graph_t* g, lca_queue_data_t* lcaq){
if (debug) { printf("Task %d init_queue_lca() start\n", procid);}
lcaq->queue_length = g->m_local*10;//g->n_local + g->n_ghost;
lcaq->queue = (uint64_t*)malloc(lcaq->queue_length*sizeof(uint64_t));
lcaq->queue_next = (uint64_t*)malloc(lcaq->queue_length*sizeof(uint64_t));
lcaq->finish = (uint64_t*)malloc(10);
if (lcaq->queue == NULL || lcaq->queue_next == NULL || lcaq->finish == NULL)
throw_err("init_queue_lca(), unable to allocate resources\n",procid);
lcaq->queue_size = 0;
lcaq->next_size = 0;
lcaq->finish_size = 0;
if(debug){printf("Task %d init_queue_lca() success\n", procid); }
}
inline void clear_queue_lca(lca_queue_data_t* lcaq){
if(debug){ printf("Task %d clear_queue_lca() start\n",procid); }
free(lcaq->queue);
free(lcaq->queue_next);
free(lcaq->finish);
if(debug) {printf("Task %d clear_queue_lca() success\n", procid); }
}
inline void init_thread_lca(lca_thread_data_t* lcat) {
if (debug) { printf("Task %d init_thread_queue() start\n", procid);}
lcat->tid = omp_get_thread_num();
lcat->thread_queue = (uint64_t*)malloc(LCA_THREAD_QUEUE_SIZE*sizeof(uint64_t));
lcat->thread_finish = (uint64_t*)malloc(LCA_THREAD_QUEUE_SIZE*sizeof(uint64_t));
if (lcat->thread_queue == NULL || lcat->thread_finish == NULL)
throw_err("init_thread_lca(), unable to allocate resources\n", procid, lcat->tid);
lcat->tid = omp_get_thread_num();
lcat->thread_queue_size = 0;
lcat->thread_finish_size = 0;
if (debug) {printf("Task %d init_thread_queue() success\n", procid); }
}
inline void clear_thread_lca(lca_thread_data_t* lcat){
free(lcat->thread_queue);
free(lcat->thread_finish);
}
inline void init_sendbuf_lca(mpi_data_t* comm){
comm->sdispls_temp[0] = 0;
comm->total_send = comm->sendcounts_temp[0];
for (int32_t i = 1; i < nprocs; ++i){
comm->sdispls_temp[i] = comm->sdispls_temp[i-1] + comm->sendcounts_temp[i-1];
comm->total_send += comm->sendcounts_temp[i];
}
if (debug) printf("Task %d total_send %lu\n", procid, comm->total_send);
comm->sendbuf_vert = (uint64_t*)malloc(comm->total_send*sizeof(uint64_t));
if (comm->sendbuf_vert == NULL)
throw_err("init_sendbuf_lca(), unable to allocate resources\n", procid);
}
inline void clear_recvbuf_lca(mpi_data_t* comm){
free(comm->recvbuf_vert);
for (int32_t i = 0; i < nprocs; ++i)
comm->sendcounts[i] = 0;
for (int32_t i = 0; i < nprocs; ++i)
comm->sendcounts_temp[i] = 0;
}
inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1,
uint64_t vert2, uint64_t pred2, uint64_t level2);
inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq);
inline void add_to_lca_bridge(
lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert);
inline void empty_lca_queue_bridge(
lca_thread_data_t* lcat, lca_queue_data_t* lcaq);
// inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
// uint64_t vert1, uint64_t pred1, uint64_t level1);
// inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq);
inline void update_lca_send(
thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
inline void empty_lca_send(
thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq);
inline void update_lca_send_bridge(
thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
inline void empty_lca_send_bridge(
thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq);
// inline void update_lca_finish(dist_graph_t* g,
// thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
//(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
// inline void empty_lca_finish(dist_graph_t* g,
// thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq);
inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm);
inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1,
uint64_t vert2, uint64_t pred2, uint64_t level2)
{
lcat->thread_queue[lcat->thread_queue_size++] = vert1;
lcat->thread_queue[lcat->thread_queue_size++] = pred1;
lcat->thread_queue[lcat->thread_queue_size++] = level1;
lcat->thread_queue[lcat->thread_queue_size++] = vert2;
lcat->thread_queue[lcat->thread_queue_size++] = pred2;
lcat->thread_queue[lcat->thread_queue_size++] = level2;
if (lcat->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE)
empty_lca_queue(lcat, lcaq);
}
inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = lcaq->next_size += lcat->thread_queue_size;
start_offset -= lcat->thread_queue_size;
for (uint64_t i = 0; i < lcat->thread_queue_size; ++i)
lcaq->queue_next[start_offset + i] = lcat->thread_queue[i];
lcat->thread_queue_size = 0;
}
inline void add_to_lca_bridge(
lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert)
{
lcat->thread_queue[lcat->thread_queue_size++] = vert;
if (lcat->thread_queue_size+1 >= LCA_THREAD_QUEUE_SIZE)
empty_lca_queue_bridge(lcat, lcaq);
}
inline void empty_lca_queue_bridge(
lca_thread_data_t* lcat, lca_queue_data_t* lcaq)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = lcaq->next_size += lcat->thread_queue_size;
start_offset -= lcat->thread_queue_size;
for (uint64_t i = 0; i < lcat->thread_queue_size; ++i)
lcaq->queue_next[start_offset + i] = lcat->thread_queue[i];
lcat->thread_queue_size = 0;
}
// inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
// uint64_t vert1, uint64_t pred1, uint64_t level1)
// {
// lcat->thread_finish[lcat->thread_finish_size++] = vert1;
// lcat->thread_finish[lcat->thread_finish_size++] = pred1;
// lcat->thread_finish[lcat->thread_finish_size++] = level1;
// if (lcat->thread_finish_size+3 >= LCA_THREAD_QUEUE_SIZE)
// empty_finish_queue(lcat, lcaq);
// }
// inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq)
// {
// uint64_t start_offset;
// #pragma omp atomic capture
// start_offset = lcaq->finish_size += lcat->thread_finish_size;
// start_offset -= lcat->thread_finish_size;
// for (uint64_t i = 0; i < lcat->thread_finish_size; ++i)
// lcaq->finish[start_offset + i] = lcat->thread_finish[i];
// lcat->thread_finish_size = 0;
// }
inline void update_lca_send(
thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
{
tc->sendbuf_rank_thread[tc->thread_queue_size/6] = send_rank;
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+3];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+4];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+5];
//++tc->thread_queue_size;
//++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE)
empty_lca_send(tc, comm, lcaq);
}
inline void empty_lca_send(
thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; i+=6)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i/6];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] =
tc->sendbuf_vert_thread[i+1];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] =
tc->sendbuf_vert_thread[i+2];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+3] =
tc->sendbuf_vert_thread[i+3];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+4] =
tc->sendbuf_vert_thread[i+4];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+5] =
tc->sendbuf_vert_thread[i+5];
tc->thread_starts[cur_rank] += 6;
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
inline void update_lca_send_bridge(
thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
{
tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank;
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index];
if (tc->thread_queue_size+1 >= LCA_THREAD_QUEUE_SIZE)
empty_lca_send_bridge(tc, comm, lcaq);
}
inline void empty_lca_send_bridge(
thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; ++i)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
tc->thread_starts[cur_rank] += 1;
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
// inline void update_lca_finish(dist_graph_t* g,
// thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
// {
// // for (int32_t i = 0; i < nprocs; ++i)
// // tc->v_to_rank[i] = false;
// // uint64_t out_degree = out_degree(g, vert_index);
// // uint64_t* outs = out_vertices(g, vert_index);
// // for (uint64_t j = 0; j < out_degree; ++j)
// // {
// // uint64_t out_index = outs[j];
// // if (out_index >= g->n_local)
// // {
// // int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
// // if (!tc->v_to_rank[out_rank])
// // {
// // tc->v_to_rank[out_rank] = true;
// // add_vid_data_to_send(tc, comm,
// // g->local_unmap[vert_index], data, out_rank);
// // }
// // }
// // }
// //tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank;
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+1];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+2];
// //++tc->thread_queue_size;
// //++tc->sendcounts_thread[send_rank];
// if (tc->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE)
// empty_lca_finish(g, tc, comm, lcaq);
// }
// inline void add_data_to_finish(thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
// {
// tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank;
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2];
// ++tc->thread_queue_size;
// ++tc->sendcounts_thread[send_rank];
// if (tc->thread_queue_size+3 >= LCA_THREAD_QUEUE_SIZE)
// empty_lca_finish(tc, comm, lcaq);
// }
// inline void empty_lca_finish(dist_graph_t* g,
// thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq)
// {
// for (int32_t i = 0; i < nprocs; ++i)
// {
// #pragma omp atomic capture
// tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
// tc->thread_starts[i] -= tc->sendcounts_thread[i];
// }
// for (uint64_t i = 0; i < tc->thread_queue_size; i+=3)
// {
// int32_t cur_rank = get_rank(g, tc->sendbuf_vert_thread[i]);
// comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
// tc->sendbuf_vert_thread[i];
// comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] =
// tc->sendbuf_vert_thread[i+1];
// comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] =
// tc->sendbuf_vert_thread[i+2];
// tc->thread_starts[cur_rank] += 3;
// }
// for (int32_t i = 0; i < nprocs; ++i)
// {
// tc->thread_starts[i] = 0;
// tc->sendcounts_thread[i] = 0;
// }
// tc->thread_queue_size = 0;
// }
inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm)
{
for (int32_t i = 0; i < nprocs; ++i)
comm->recvcounts_temp[i] = 0;
for (int32_t i = 0; i < nprocs; ++i)
comm->sdispls_temp[i] -= comm->sendcounts_temp[i];
MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T,
comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD);
comm->total_recv = 0;
for (int i = 0; i < nprocs; ++i)
comm->total_recv += comm->recvcounts_temp[i];
if (debug) printf("Task %d total_recv %lu\n", procid, comm->total_recv);
comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t));
if (comm->recvbuf_vert == NULL)
throw_err("exchange_lca() unable to allocate recv buffers", procid);
uint64_t task_queue_size = comm->total_send;
uint64_t current_global_size = 0;
MPI_Allreduce(&task_queue_size, ¤t_global_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = current_global_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
if (buf_v == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t data = comm->sendbuf_vert[comm->sdispls_temp[i]+j];
buf_v[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_v, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_vert+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
free(buf_v);
sum_recv += cur_recv;
sum_send += cur_send;
}
free(comm->sendbuf_vert);
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
}
#endif
|
unit_cell_symmetry.h
|
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file unit_cell_symmetry.h
*
* \brief Contains definition and implementation of sirius::Unit_cell_symmetry class.
*/
#ifndef __UNIT_CELL_SYMMETRY_H__
#define __UNIT_CELL_SYMMETRY_H__
extern "C" {
#include <spglib.h>
}
#include "geometry3d.hpp"
#include "constants.h"
#include "utils.h"
#include "gvec.hpp"
namespace sirius {
/// Descriptor of the space group symmetry operation.
struct space_group_symmetry_descriptor
{
/// Rotational part of symmetry operation (fractional coordinates).
matrix3d<int> R;
matrix3d<int> invR;
/// Fractional translation.
vector3d<double> t;
/// Proper (+1) or improper (-1) rotation.
int proper;
/// Proper rotation matrix in Cartesian coordinates.
matrix3d<double> rotation;
/// Three Euler angles that generate the proper rotation matrix.
vector3d<double> euler_angles;
};
/// Descriptor of the magnetic group symmetry operation.
struct magnetic_group_symmetry_descriptor
{
/// Element of space group symmetry.
space_group_symmetry_descriptor spg_op;
/// Index of the space group symmetry operation.
/** This index is used to search for the transfomation of atoms under the current space group operation
* in the precomputed symmetry table. */
int isym;
/// Proper rotation matrix in Cartesian coordinates.
matrix3d<double> spin_rotation;
};
class Unit_cell_symmetry
{
private:
/// Matrix of lattice vectors.
/** Spglib requires this matrix to have a positively defined determinant. */
matrix3d<double> lattice_vectors_;
matrix3d<double> inverse_lattice_vectors_;
int num_atoms_;
mdarray<double, 2> positions_;
std::vector<int> types_;
double tolerance_;
/// Crystal structure descriptor returned by spglib.
SpglibDataset* spg_dataset_;
/// Symmetry table for atoms.
/** For each atom ia and symmetry isym sym_table_(ia, isym) stores index of atom ja to which original atom
* transforms under symmetry operation. */
mdarray<int, 2> sym_table_;
/// List of all space group symmetry operations.
std::vector<space_group_symmetry_descriptor> space_group_symmetry_;
/// List of all magnetic group symmetry operations.
std::vector<magnetic_group_symmetry_descriptor> magnetic_group_symmetry_;
/// Compute Euler angles corresponding to the proper rotation part of the given symmetry.
vector3d<double> euler_angles(matrix3d<double> const& rot__) const;
/// Generate rotation matrix from three Euler angles
/** Euler angles \f$ \alpha, \beta, \gamma \f$ define the general rotation as three consecutive rotations:
* - about \f$ \hat e_z \f$ through the angle \f$ \gamma \f$ (\f$ 0 \le \gamma < 2\pi \f$)
* - about \f$ \hat e_y \f$ through the angle \f$ \beta \f$ (\f$ 0 \le \beta \le \pi \f$)
* - about \f$ \hat e_z \f$ through the angle \f$ \alpha \f$ (\f$ 0 \le \gamma < 2\pi \f$)
*
* The total rotation matrix is defined as a product of three rotation matrices:
* \f[
* R(\alpha, \beta, \gamma) =
* \left( \begin{array}{ccc} \cos(\alpha) & -\sin(\alpha) & 0 \\
* \sin(\alpha) & \cos(\alpha) & 0 \\
* 0 & 0 & 1 \end{array} \right)
* \left( \begin{array}{ccc} \cos(\beta) & 0 & \sin(\beta) \\
* 0 & 1 & 0 \\
* -\sin(\beta) & 0 & \cos(\beta) \end{array} \right)
* \left( \begin{array}{ccc} \cos(\gamma) & -\sin(\gamma) & 0 \\
* \sin(\gamma) & \cos(\gamma) & 0 \\
* 0 & 0 & 1 \end{array} \right) =
* \left( \begin{array}{ccc} \cos(\alpha) \cos(\beta) \cos(\gamma) - \sin(\alpha) \sin(\gamma) &
* -\sin(\alpha) \cos(\gamma) - \cos(\alpha) \cos(\beta) \sin(\gamma) &
* \cos(\alpha) \sin(\beta) \\
* \sin(\alpha) \cos(\beta) \cos(\gamma) + \cos(\alpha) \sin(\gamma) &
* \cos(\alpha) \cos(\gamma) - \sin(\alpha) \cos(\beta) \sin(\gamma) &
* \sin(\alpha) \sin(\beta) \\
* -\sin(\beta) \cos(\gamma) &
* \sin(\beta) \sin(\gamma) &
* \cos(\beta) \end{array} \right)
* \f]
*/
matrix3d<double> rot_mtrx_cart(vector3d<double> euler_angles__) const;
/// Get axis and angle from rotation matrix.
static std::pair<vector3d<double>, double> axis_angle(matrix3d<double> R__)
{
vector3d<double> u;
/* make proper rotation */
R__ = R__ * R__.det();
u[0] = R__(2, 1) - R__(1, 2);
u[1] = R__(0, 2) - R__(2, 0);
u[2] = R__(1, 0) - R__(0, 1);
double sint = u.length() / 2.0;
double cost = (R__(0, 0) + R__(1, 1) + R__(2, 2) - 1) / 2.0;
double theta = Utils::phi_by_sin_cos(sint, cost);
/* rotation angle is zero */
if (std::abs(theta) < 1e-12) {
u = vector3d<double>({0, 0, 1});
} else if (std::abs(theta - pi) < 1e-12) { /* rotation angle is Pi */
/* rotation matrix for Pi angle has this form
[-1+2ux^2 | 2 ux uy | 2 ux uz]
[2 ux uy | -1+2uy^2 | 2 uy uz]
[2 ux uz | 2 uy uz | -1+2uz^2] */
if (R__(0, 0) >= R__(1, 1) && R__(0, 0) >= R__(2, 2)) { /* x-component is largest */
u[0] = std::sqrt(std::abs(R__(0, 0) + 1) / 2);
u[1] = (R__(0, 1) + R__(1, 0)) / 4 / u[0];
u[2] = (R__(0, 2) + R__(2, 0)) / 4 / u[0];
} else if (R__(1, 1) >= R__(0, 0) && R__(1, 1) >= R__(2, 2)) { /* y-component is largest */
u[1] = std::sqrt(std::abs(R__(1, 1) + 1) / 2);
u[0] = (R__(1, 0) + R__(0, 1)) / 4 / u[1];
u[2] = (R__(1, 2) + R__(2, 1)) / 4 / u[1];
} else {
u[2] = std::sqrt(std::abs(R__(2, 2) + 1) / 2);
u[0] = (R__(2, 0) + R__(0, 2)) / 4 / u[2];
u[1] = (R__(2, 1) + R__(1, 2)) / 4 / u[2];
}
} else {
u = u * (1.0 / u.length());
}
return std::pair<vector3d<double>, double>(u, theta);
}
static mdarray<double_complex, 2> spinor_rotation_matrix(vector3d<double> u__, double theta__)
{
mdarray<double_complex, 2> rotm(2, 2);
auto cost = std::cos(theta__ / 2);
auto sint = std::sin(theta__ / 2);
rotm(0, 0) = double_complex(cost, -u__[2] * sint);
rotm(1, 1) = double_complex(cost, u__[2] * sint);
rotm(0, 1) = double_complex(-u__[1] * sint, -u__[0] * sint);
rotm(1, 0) = double_complex( u__[1] * sint, -u__[0] * sint);
return std::move(rotm);
}
public:
Unit_cell_symmetry(matrix3d<double>& lattice_vectors__,
int num_atoms__,
mdarray<double, 2>& positions__,
mdarray<double, 2>& spins__,
std::vector<int>& types__,
double tolerance__);
~Unit_cell_symmetry()
{
spg_free_dataset(spg_dataset_);
}
inline int atom_symmetry_class(int ia__)
{
return spg_dataset_->equivalent_atoms[ia__];
}
inline int spacegroup_number()
{
return spg_dataset_->spacegroup_number;
}
inline std::string international_symbol()
{
return spg_dataset_->international_symbol;
}
inline std::string hall_symbol()
{
return spg_dataset_->hall_symbol;
}
matrix3d<double> transformation_matrix() const
{
return matrix3d<double>(spg_dataset_->transformation_matrix);
}
vector3d<double> origin_shift() const
{
return vector3d<double>(spg_dataset_->origin_shift[0],
spg_dataset_->origin_shift[1],
spg_dataset_->origin_shift[2]);
}
inline int num_spg_sym() const
{
return static_cast<int>(space_group_symmetry_.size());
}
inline space_group_symmetry_descriptor const& space_group_symmetry(int isym__) const
{
assert(isym__ >= 0 && isym__ < num_spg_sym());
return space_group_symmetry_[isym__];
}
inline int num_mag_sym() const
{
return static_cast<int>(magnetic_group_symmetry_.size());
}
inline magnetic_group_symmetry_descriptor const& magnetic_group_symmetry(int isym__) const
{
assert(isym__ >= 0 && isym__ < num_mag_sym());
return magnetic_group_symmetry_[isym__];
}
inline int sym_table(int ia__, int isym__) const
{
return sym_table_(ia__, isym__);
}
void check_gvec_symmetry(Gvec const& gvec__, Communicator const& comm__) const;
/// Symmetrize scalar function.
/** The following operation is performed:
* \f[
* f({\bf x}) = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} f({\bf \hat P x})
* \f]
* For the function expanded in plane-waves we have:
* \f[
* f({\bf x}) = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G \hat P x}} f({\bf G})
* = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G (Rx + t)}} f({\bf G})
* = \frac{1}{N_{sym}} \sum_{{\bf \hat P}} \sum_{\bf G} e^{i{\bf G t}} e^{i{\bf G Rx}} f({\bf G})
* \f]
* Now we do a mapping \f$ {\bf GR} \rightarrow \tilde {\bf G} \f$ and find expansion coefficients of the
* symmetry transformed function:
* \f[
* f(\tilde{\bf G}) = e^{i{\bf G t}} f({\bf G})
* \f]
*/
void symmetrize_function(double_complex* f_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const;
//void symmetrize_function(double_complex* f_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const;
void symmetrize_vector_function(double_complex* fz_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const;
void symmetrize_vector_function(double_complex* fx_pw__,
double_complex* fy_pw__,
double_complex* fz_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const;
//void symmetrize_function(double_complex* f_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const;
//void symmetrize_vector_function(double_complex* fz_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const;
//void symmetrize_vector_function(double_complex* fx_pw__,
// double_complex* fy_pw__,
// double_complex* fz_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const;
void symmetrize_function(mdarray<double, 3>& frlm__,
Communicator const& comm__) const;
void symmetrize_vector_function(mdarray<double, 3>& fz_rlm__,
Communicator const& comm__) const;
void symmetrize_vector_function(mdarray<double, 3>& fx_rlm__,
mdarray<double, 3>& fy_rlm__,
mdarray<double, 3>& fz_rlm__,
Communicator const& comm__) const;
int get_irreducible_reciprocal_mesh(vector3d<int> k_mesh__,
vector3d<int> is_shift__,
mdarray<double, 2>& kp__,
std::vector<double>& wk__) const;
matrix3d<double> const& lattice_vectors() const
{
return lattice_vectors_;
}
matrix3d<double> const& inverse_lattice_vectors() const
{
return inverse_lattice_vectors_;
}
};
inline Unit_cell_symmetry::Unit_cell_symmetry(matrix3d<double>& lattice_vectors__,
int num_atoms__,
mdarray<double, 2>& positions__,
mdarray<double, 2>& spins__,
std::vector<int>& types__,
double tolerance__)
: lattice_vectors_(lattice_vectors__)
, num_atoms_(num_atoms__)
, types_(types__)
, tolerance_(tolerance__)
{
PROFILE("sirius::Unit_cell_symmetry::Unit_cell_symmetry");
if (lattice_vectors__.det() < 0) {
std::stringstream s;
s << "spglib requires positive determinant for a matrix of lattice vectors";
TERMINATE(s);
}
double lattice[3][3];
for (int i: {0, 1, 2}) {
for (int j: {0, 1, 2}) {
lattice[i][j] = lattice_vectors_(i, j);
}
}
positions_ = mdarray<double, 2>(3, num_atoms_);
for (int ia = 0; ia < num_atoms_; ia++) {
for (int x: {0, 1, 2}) {
positions_(x, ia) = positions__(x, ia);
}
}
sddk::timer t1("sirius::Unit_cell_symmetry::Unit_cell_symmetry|spg");
spg_dataset_ = spg_get_dataset(lattice, (double(*)[3])&positions_(0, 0), &types_[0], num_atoms_, tolerance_);
if (spg_dataset_ == NULL) {
TERMINATE("spg_get_dataset() returned NULL");
}
if (spg_dataset_->spacegroup_number == 0) {
TERMINATE("spg_get_dataset() returned 0 for the space group");
}
if (spg_dataset_->n_atoms != num_atoms__) {
std::stringstream s;
s << "spg_get_dataset() returned wrong number of atoms (" << spg_dataset_->n_atoms << ")" << std::endl
<< "expected number of atoms is " << num_atoms__;
TERMINATE(s);
}
t1.stop();
inverse_lattice_vectors_ = inverse(lattice_vectors_);
sddk::timer t2("sirius::Unit_cell_symmetry::Unit_cell_symmetry|sym1");
for (int isym = 0; isym < spg_dataset_->n_operations; isym++) {
space_group_symmetry_descriptor sym_op;
sym_op.R = matrix3d<int>(spg_dataset_->rotations[isym]);
sym_op.t = vector3d<double>(spg_dataset_->translations[isym][0],
spg_dataset_->translations[isym][1],
spg_dataset_->translations[isym][2]);
int p = sym_op.R.det();
if (!(p == 1 || p == -1)) {
TERMINATE("wrong rotation matrix");
}
sym_op.proper = p;
sym_op.rotation = lattice_vectors_ * matrix3d<double>(sym_op.R * p) * inverse_lattice_vectors_;
sym_op.euler_angles = euler_angles(sym_op.rotation);
for (int i = 0; i < spg_dataset_->n_operations; i++) {
auto m = matrix3d<int>(spg_dataset_->rotations[isym]) * matrix3d<int>(spg_dataset_->rotations[i]);
if (m(0, 0) == 1 && m(1, 1) == 1 && m(2, 2) == 1 &&
m(0, 1) == 0 && m(1, 0) == 0 &&
m(0, 2) == 0 && m(2, 0) == 0 &&
m(1, 2) == 0 && m(2, 1) == 0)
{
sym_op.invR = matrix3d<int>(spg_dataset_->rotations[i]);
break;
}
}
space_group_symmetry_.push_back(sym_op);
}
t2.stop();
sddk::timer t3("sirius::Unit_cell_symmetry::Unit_cell_symmetry|sym2");
sym_table_ = mdarray<int, 2>(num_atoms_, num_spg_sym());
/* loop over spatial symmetries */
#pragma omp parallel for schedule(static)
for (int isym = 0; isym < num_spg_sym(); isym++) {
for (int ia = 0; ia < num_atoms_; ia++) {
auto R = space_group_symmetry(isym).R;
auto t = space_group_symmetry(isym).t;
/* spatial transform */
vector3d<double> pos(positions__(0, ia), positions__(1, ia), positions__(2, ia));
auto v = reduce_coordinates(R * pos + t);
int ja = -1;
/* check for equivalent atom */
for (int k = 0; k < num_atoms_; k++) {
vector3d<double> pos1(positions__(0, k), positions__(1, k), positions__(2, k));
if ((v.first - pos1).length() < tolerance_) {
ja = k;
break;
}
}
if (ja == -1) {
TERMINATE("equivalent atom was not found");
}
sym_table_(ia, isym) = ja;
}
}
t3.stop();
sddk::timer t4("sirius::Unit_cell_symmetry::Unit_cell_symmetry|sym3");
/* loop over spatial symmetries */
for (int isym = 0; isym < num_spg_sym(); isym++) {
/* loop over spin symmetries */
for (int jsym = 0; jsym < num_spg_sym(); jsym++) {
/* take proper part of rotation matrix */
auto Rspin = space_group_symmetry(jsym).rotation;
int n{0};
/* check if all atoms transfrom under spatial and spin symmetries */
for (int ia = 0; ia < num_atoms_; ia++) {
int ja = sym_table_(ia, isym);
/* now check that vector field transforms from atom ia to atom ja */
/* vector field of atom is expected to be in Cartesian coordinates */
auto vd = Rspin * vector3d<double>(spins__(0, ia), spins__(1, ia), spins__(2, ia)) -
vector3d<double>(spins__(0, ja), spins__(1, ja), spins__(2, ja));
if (vd.length() < 1e-10) {
n++;
}
}
/* if all atoms transform under spin rotaion, add it to a list */
if (n == num_atoms_) {
magnetic_group_symmetry_descriptor mag_op;
mag_op.spg_op = space_group_symmetry(isym);
mag_op.isym = isym;
mag_op.spin_rotation = Rspin;
magnetic_group_symmetry_.push_back(mag_op);
break;
}
}
}
t4.stop();
}
inline matrix3d<double> Unit_cell_symmetry::rot_mtrx_cart(vector3d<double> euler_angles) const
{
double alpha = euler_angles[0];
double beta = euler_angles[1];
double gamma = euler_angles[2];
matrix3d<double> rm;
rm(0, 0) = std::cos(alpha) * std::cos(beta) * std::cos(gamma) - std::sin(alpha) * std::sin(gamma);
rm(0, 1) = -std::cos(gamma) * std::sin(alpha) - std::cos(alpha) * std::cos(beta) * std::sin(gamma);
rm(0, 2) = std::cos(alpha) * std::sin(beta);
rm(1, 0) = std::cos(beta) * std::cos(gamma) * std::sin(alpha) + std::cos(alpha) * std::sin(gamma);
rm(1, 1) = std::cos(alpha) * std::cos(gamma) - std::cos(beta) * std::sin(alpha) * std::sin(gamma);
rm(1, 2) = std::sin(alpha) * std::sin(beta);
rm(2, 0) = -std::cos(gamma) * std::sin(beta);
rm(2, 1) = std::sin(beta) * std::sin(gamma);
rm(2, 2) = std::cos(beta);
return rm;
}
inline vector3d<double> Unit_cell_symmetry::euler_angles(matrix3d<double> const& rot__) const
{
vector3d<double> angles(0, 0, 0);
if (std::abs(rot__.det() - 1) > 1e-10)
{
std::stringstream s;
s << "determinant of rotation matrix is " << rot__.det();
TERMINATE(s);
}
if (std::abs(rot__(2, 2) - 1.0) < 1e-10) // cos(beta) == 1, beta = 0
{
angles[0] = Utils::phi_by_sin_cos(rot__(1, 0), rot__(0, 0));
}
else if (std::abs(rot__(2, 2) + 1.0) < 1e-10) // cos(beta) == -1, beta = Pi
{
angles[0] = Utils::phi_by_sin_cos(-rot__(0, 1), rot__(1, 1));
angles[1] = pi;
}
else
{
double beta = std::acos(rot__(2, 2));
angles[0] = Utils::phi_by_sin_cos(rot__(1, 2) / std::sin(beta), rot__(0, 2) / std::sin(beta));
angles[1] = beta;
angles[2] = Utils::phi_by_sin_cos(rot__(2, 1) / std::sin(beta), -rot__(2, 0) / std::sin(beta));
}
auto rm1 = rot_mtrx_cart(angles);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (std::abs(rot__(i, j) - rm1(i, j)) > 1e-8) {
std::stringstream s;
s << "matrices don't match" << std::endl
<< "initial symmetry matrix: " << std::endl
<< rot__(0, 0) << " " << rot__(0, 1) << " " << rot__(0, 2) << std::endl
<< rot__(1, 0) << " " << rot__(1, 1) << " " << rot__(1, 2) << std::endl
<< rot__(2, 0) << " " << rot__(2, 1) << " " << rot__(2, 2) << std::endl
<< "euler angles : " << angles[0] / pi << " " << angles[1] / pi << " " << angles[2] / pi << std::endl
<< "computed symmetry matrix : " << std::endl
<< rm1(0, 0) << " " << rm1(0, 1) << " " << rm1(0, 2) << std::endl
<< rm1(1, 0) << " " << rm1(1, 1) << " " << rm1(1, 2) << std::endl
<< rm1(2, 0) << " " << rm1(2, 1) << " " << rm1(2, 2) << std::endl;
TERMINATE(s);
}
}
}
return angles;
}
inline int Unit_cell_symmetry::get_irreducible_reciprocal_mesh(vector3d<int> k_mesh__,
vector3d<int> is_shift__,
mdarray<double, 2>& kp__,
std::vector<double>& wk__) const
{
int nktot = k_mesh__[0] * k_mesh__[1] * k_mesh__[2];
mdarray<int, 2> grid_address(3, nktot);
std::vector<int> ikmap(nktot);
double lattice[3][3];
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++) lattice[i][j] = lattice_vectors_(i, j);
}
int nknr = spg_get_ir_reciprocal_mesh((int(*)[3])&grid_address(0, 0),
&ikmap[0],
&k_mesh__[0],
&is_shift__[0],
1,
lattice,
(double(*)[3])&positions_(0, 0),
&types_[0],
num_atoms_,
tolerance_);
std::map<int, int> wknr;
for (int ik = 0; ik < nktot; ik++)
{
if (wknr.count(ikmap[ik]) == 0) wknr[ikmap[ik]] = 0;
wknr[ikmap[ik]] += 1;
}
wk__ = std::vector<double>(nknr);
kp__ = mdarray<double, 2>(3, nknr);
int n = 0;
for (auto it = wknr.begin(); it != wknr.end(); it++) {
wk__[n] = double(it->second) / nktot;
for (int x = 0; x < 3; x++) {
kp__(x, n) = double(grid_address(x, it->first) + is_shift__[x] / 2.0) / k_mesh__[x];
}
n++;
}
return nknr;
}
inline void Unit_cell_symmetry::check_gvec_symmetry(Gvec const& gvec__, Communicator const& comm__) const
{
PROFILE("sirius::Unit_cell_symmetry::check_gvec_symmetry");
int gvec_count = gvec__.gvec_count(comm__.rank());
int gvec_offset = gvec__.gvec_offset(comm__.rank());
#pragma omp parallel for
for (int isym = 0; isym < num_mag_sym(); isym++) {
auto sm = magnetic_group_symmetry(isym).spg_op.R;
for (int igloc = 0; igloc < gvec_count; igloc++) {
int ig = gvec_offset + igloc;
auto gv = gvec__.gvec(ig);
/* apply symmetry operation to the G-vector */
auto gv_rot = transpose(sm) * gv;
//== /* check limits */
//== for (int x: {0, 1, 2}) {
//== auto limits = gvec__.fft_box().limits(x);
//== /* check boundaries */
//== if (gv_rot[x] < limits.first || gv_rot[x] > limits.second) {
//== std::stringstream s;
//== s << "rotated G-vector is outside of grid limits" << std::endl
//== << "original G-vector: " << gv << ", length: " << gvec__.cart(ig).length() << std::endl
//== << "rotation matrix: " << std::endl
//== << sm(0, 0) << " " << sm(0, 1) << " " << sm(0, 2) << std::endl
//== << sm(1, 0) << " " << sm(1, 1) << " " << sm(1, 2) << std::endl
//== << sm(2, 0) << " " << sm(2, 1) << " " << sm(2, 2) << std::endl
//== << "rotated G-vector: " << gv_rot << std::endl
//== << "limits: "
//== << gvec__.fft_box().limits(0).first << " " << gvec__.fft_box().limits(0).second << " "
//== << gvec__.fft_box().limits(1).first << " " << gvec__.fft_box().limits(1).second << " "
//== << gvec__.fft_box().limits(2).first << " " << gvec__.fft_box().limits(2).second;
//== TERMINATE(s);
//== }
//== }
int ig_rot = gvec__.index_by_gvec(gv_rot);
/* special case where -G is equal to G */
if (gvec__.reduced() && ig_rot < 0) {
gv_rot = gv_rot * (-1);
ig_rot = gvec__.index_by_gvec(gv_rot);
}
if (ig_rot < 0 || ig_rot >= gvec__.num_gvec()) {
std::stringstream s;
s << "rotated G-vector index is wrong" << std::endl
<< "original G-vector: " << gv << std::endl
<< "rotation matrix: " << std::endl
<< sm(0, 0) << " " << sm(0, 1) << " " << sm(0, 2) << std::endl
<< sm(1, 0) << " " << sm(1, 1) << " " << sm(1, 2) << std::endl
<< sm(2, 0) << " " << sm(2, 1) << " " << sm(2, 2) << std::endl
<< "rotated G-vector: " << gv_rot << std::endl
<< "rotated G-vector index: " << ig_rot << std::endl
<< "number of G-vectors: " << gvec__.num_gvec();
TERMINATE(s);
}
}
}
}
inline void Unit_cell_symmetry::symmetrize_function(double_complex* f_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_function_pw");
auto v = remap_gvec__.remap_forward(f_pw__);
std::vector<double_complex> sym_f_pw(v.size(), 0);
sddk::timer t1("sirius::Unit_cell_symmetry::symmetrize_function_pw|local");
#pragma omp parallel
{
int nt = omp_get_max_threads();
int tid = omp_get_thread_num();
for (int igloc = 0; igloc < remap_gvec__.a2a_recv.size(); igloc++) {
vector3d<int> G(&remap_gvec__.gvec_remapped_(0, igloc));
int igsh = remap_gvec__.gvec_shell_remapped(igloc);
if (igsh % nt == tid) {
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
auto R = magnetic_group_symmetry(i).spg_op.R;
auto z = v[igloc] * sym_phase_factors__(0, G[0], i) *
sym_phase_factors__(1, G[1], i) *
sym_phase_factors__(2, G[2], i);
/* apply symmetry operation to the G-vector;
* remember that we move R from acting on x to acting on G: G(Rx) = (GR)x;
* GR is a vector-matrix multiplication [G][.....]
* [..R..]
* [.....]
* which can also be written as matrix^{T}-vector operation
*/
auto gv_rot = transpose(R) * G;
/* index of a rotated G-vector */
int ig_rot = remap_gvec__.index_by_gvec(gv_rot);
if (ig_rot == -1) {
gv_rot = gv_rot * (-1);
ig_rot = remap_gvec__.index_by_gvec(gv_rot);
assert(ig_rot >=0 && ig_rot < (int)v.size());
sym_f_pw[ig_rot] += std::conj(z);
} else {
assert(ig_rot >=0 && ig_rot < (int)v.size());
sym_f_pw[ig_rot] += z;
}
}
}
}
}
t1.stop();
double nrm = 1 / double(num_mag_sym());
#pragma omp parallel for schedule(static)
for (int ig = 0; ig < remap_gvec__.a2a_recv.size(); ig++) {
sym_f_pw[ig] *= nrm;
}
remap_gvec__.remap_backward(sym_f_pw, f_pw__);
}
//inline void Unit_cell_symmetry::symmetrize_function(double_complex* f_pw__,
// remap_gvec_to_shells const& remap_gvec__,
// mdarray<double_complex, 3> const& sym_phase_factors__) const
//{
// PROFILE("sirius::Unit_cell_symmetry::symmetrize_function_pw");
//
// auto v = remap_gvec__.remap_forward(f_pw__);
//
// std::vector<double_complex> sym_f_pw(v.size(), 0);
// std::vector<bool> is_done(v.size(), false);
//
// double norm = 1 / double(num_mag_sym());
//
// auto phase_factor = [&](int isym, vector3d<int> G)
// {
// return sym_phase_factors__(0, G[0], isym) *
// sym_phase_factors__(1, G[1], isym) *
// sym_phase_factors__(2, G[2], isym);
// };
//
// sddk::timer t1("sirius::Unit_cell_symmetry::symmetrize_function_pw|local");
// #pragma omp parallel
// {
// int nt = omp_get_max_threads();
// int tid = omp_get_thread_num();
//
// for (int igloc = 0; igloc < remap_gvec__.a2a_recv.size(); igloc++) {
// vector3d<int> G(&remap_gvec__.gvec_remapped_(0, igloc));
//
// int igsh = remap_gvec__.gvec_shell_remapped(igloc);
//
// /* each thread is working on full shell of G-vectors */
// if (igsh % nt == tid && !is_done[igloc]) {
// double_complex zsym(0, 0);
//
// for (int i = 0; i < num_mag_sym(); i++) {
// /* full space-group symmetry operation is {R|t} */
// auto R = magnetic_group_symmetry(i).spg_op.R;
//
// /* phase factor exp^{i * 2 * pi * {\vec G} * {\vec \tau})} */
// double_complex phase = phase_factor(i, G);
//
// /* apply symmetry operation to the G-vector;
// * remember that we move R from acting on x to acting on G: G(Rx) = (GR)x;
// * GR is a vector-matrix multiplication [G][.....]
// * [..R..]
// * [.....]
// * which can also be written as matrix^{T}-vector operation
// */
// auto gv_rot = transpose(R) * G;
//
// /* index of a rotated G-vector */
// int ig_rot = remap_gvec__.index_by_gvec(gv_rot);
//
// if (ig_rot == -1) {
// gv_rot = gv_rot * (-1);
// ig_rot = remap_gvec__.index_by_gvec(gv_rot);
// assert(ig_rot >= 0 && ig_rot < (int)v.size());
// zsym += std::conj(v[ig_rot]) * phase;
//
// } else {
// assert(ig_rot >= 0 && ig_rot < (int)v.size());
//
// zsym += v[ig_rot] * std::conj(phase);
// }
// } /* loop over symmetries */
//
// zsym *= norm;
//
// for (int i = 0; i < num_mag_sym(); i++) {
// /* full space-group symmetry operation is {R|t} */
// auto R = magnetic_group_symmetry(i).spg_op.R;
//
// /* phase factor exp^{i * 2 * pi * {\vec G} * {\vec \tau})} */
// double_complex phase = phase_factor(i, G);
//
// /* apply symmetry operation to the G-vector;
// * remember that we move R from acting on x to acting on G: G(Rx) = (GR)x;
// * GR is a vector-matrix multiplication [G][.....]
// * [..R..]
// * [.....]
// * which can also be written as matrix^{T}-vector operation
// */
// auto gv_rot = transpose(R) * G;
//
// /* index of a rotated G-vector */
// int ig_rot = remap_gvec__.index_by_gvec(gv_rot);
//
// if (ig_rot == -1) {
// gv_rot = gv_rot * (-1);
// ig_rot = remap_gvec__.index_by_gvec(gv_rot);
// assert(ig_rot >= 0 && ig_rot < (int)v.size());
// sym_f_pw[ig_rot] = std::conj(zsym * phase);
//
// } else {
// assert(ig_rot >= 0 && ig_rot < (int)v.size());
//
// sym_f_pw[ig_rot] = zsym * phase;
// }
// is_done[ig_rot] = true;
// } /* loop over symmetries */
// }
// } /* loop over igloc */
// }
// t1.stop();
//
// remap_gvec__.remap_backward(sym_f_pw, f_pw__);
//}
//inline void Unit_cell_symmetry::symmetrize_function(double_complex* f_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const
//{
// PROFILE("sirius::Unit_cell_symmetry::symmetrize_function_pw");
//
// int gvec_count = gvec__.gvec_count(comm__.rank());
// int gvec_offset = gvec__.gvec_offset(comm__.rank());
//
// mdarray<double_complex, 1> sym_f_pw(gvec__.num_gvec());
// sym_f_pw.zero();
//
// double* ptr = (double*)&sym_f_pw(0);
//
// sddk::timer t1("sirius::Unit_cell_symmetry::symmetrize_function_pw|local");
// #pragma omp parallel for
// for (int i = 0; i < num_mag_sym(); i++) {
// /* full space-group symmetry operation is {R|t} */
// auto R = magnetic_group_symmetry(i).spg_op.R;
// auto t = magnetic_group_symmetry(i).spg_op.t;
//
// for (int igloc = 0; igloc < gvec_count; igloc++) {
// int ig = gvec_offset + igloc;
//
// double_complex z = f_pw__[ig] * std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t)));
//
// /* apply symmetry operation to the G-vector;
// * remember that we move R from acting on x to acting on G: G(Rx) = (GR)x;
// * GR is a vector-matrix multiplication [G][.....]
// * [..R..]
// * [.....]
// * which can also be written as matrix^{T}-vector operation
// */
// auto gv_rot = transpose(R) * gvec__.gvec(ig);
//
// /* index of a rotated G-vector */
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
// if (gvec__.reduced() && ig_rot == -1) {
// gv_rot = gv_rot * (-1);
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
// #pragma omp atomic update
// ptr[2 * ig_rot] += z.real();
//
// #pragma omp atomic update
// ptr[2 * ig_rot + 1] -= z.imag();
// } else {
// assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec());
//
// #pragma omp atomic update
// ptr[2 * ig_rot] += z.real();
//
// #pragma omp atomic update
// ptr[2 * ig_rot + 1] += z.imag();
// }
// }
// }
// t1.stop();
//
// sddk::timer t2("sirius::Unit_cell_symmetry::symmetrize_function_pw|mpi");
// comm__.allreduce(&sym_f_pw(0), gvec__.num_gvec());
// t2.stop();
//
// double nrm = 1 / double(num_mag_sym());
// #pragma omp parallel for
// for (int ig = 0; ig < gvec__.num_gvec(); ig++) {
// f_pw__[ig] = sym_f_pw(ig) * nrm;
// }
//}
//inline void Unit_cell_symmetry::symmetrize_vector_function(double_complex* fz_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const
//{
// PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_pw");
//
// int gvec_count = gvec__.gvec_count(comm__.rank());
// int gvec_offset = gvec__.gvec_offset(comm__.rank());
//
// mdarray<double_complex, 1> sym_f_pw(gvec__.num_gvec());
// sym_f_pw.zero();
//
// double* ptr = (double*)&sym_f_pw(0);
//
// #pragma omp parallel for
// for (int i = 0; i < num_mag_sym(); i++)
// {
// /* full space-group symmetry operation is {R|t} */
// auto R = magnetic_group_symmetry(i).spg_op.R;
// auto t = magnetic_group_symmetry(i).spg_op.t;
// auto S = magnetic_group_symmetry(i).spin_rotation;
//
// for (int igloc = 0; igloc < gvec_count; igloc++) {
// int ig = gvec_offset + igloc;
//
// auto gv_rot = transpose(R) * gvec__.gvec(ig);
//
// /* index of a rotated G-vector */
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
// double_complex z = fz_pw__[ig] * std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t))) * S(2, 2);
//
// if (gvec__.reduced() && ig_rot == -1) {
// gv_rot = gv_rot * (-1);
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
// #pragma omp atomic update
// ptr[2 * ig_rot] += z.real();
//
// #pragma omp atomic update
// ptr[2 * ig_rot + 1] -= z.imag();
// } else {
// assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec());
//
// #pragma omp atomic update
// ptr[2 * ig_rot] += z.real();
//
// #pragma omp atomic update
// ptr[2 * ig_rot + 1] += z.imag();
// }
// }
// }
// comm__.allreduce(&sym_f_pw(0), gvec__.num_gvec());
//
// for (int ig = 0; ig < gvec__.num_gvec(); ig++) {
// fz_pw__[ig] = sym_f_pw(ig) / double(num_mag_sym());
// }
//}
inline void Unit_cell_symmetry::symmetrize_vector_function(double_complex* fz_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_pw");
auto v = remap_gvec__.remap_forward(fz_pw__);
std::vector<double_complex> sym_f_pw(v.size(), 0);
#pragma omp parallel
{
int nt = omp_get_max_threads();
int tid = omp_get_thread_num();
for (int igloc = 0; igloc < remap_gvec__.a2a_recv.size(); igloc++) {
vector3d<int> G(&remap_gvec__.gvec_remapped_(0, igloc));
int igsh = remap_gvec__.gvec_shell_remapped(igloc);
if (igsh % nt == tid) {
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
auto R = magnetic_group_symmetry(i).spg_op.R;
auto S = magnetic_group_symmetry(i).spin_rotation;
auto z = v[igloc] * sym_phase_factors__(0, G[0], i) *
sym_phase_factors__(1, G[1], i) *
sym_phase_factors__(2, G[2], i) * S(2, 2);
auto gv_rot = transpose(R) * G;
/* index of a rotated G-vector */
int ig_rot = remap_gvec__.index_by_gvec(gv_rot);
if (ig_rot == -1) {
gv_rot = gv_rot * (-1);
ig_rot = remap_gvec__.index_by_gvec(gv_rot);
assert(ig_rot >=0 && ig_rot < (int)v.size());
sym_f_pw[ig_rot] += std::conj(z);
} else {
assert(ig_rot >=0 && ig_rot < (int)v.size());
sym_f_pw[ig_rot] += z;
}
}
}
}
}
double nrm = 1 / double(num_mag_sym());
#pragma omp parallel for schedule(static)
for (int ig = 0; ig < remap_gvec__.a2a_recv.size(); ig++) {
sym_f_pw[ig] *= nrm;
}
remap_gvec__.remap_backward(sym_f_pw, fz_pw__);
}
//inline void Unit_cell_symmetry::symmetrize_vector_function(double_complex* fx_pw__,
// double_complex* fy_pw__,
// double_complex* fz_pw__,
// Gvec const& gvec__,
// Communicator const& comm__) const
//{
// PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_pw");
//
// int gvec_count = gvec__.gvec_count(comm__.rank());
// int gvec_offset = gvec__.gvec_offset(comm__.rank());
// mdarray<double_complex, 1> sym_fx_pw(gvec__.num_gvec());
// mdarray<double_complex, 1> sym_fy_pw(gvec__.num_gvec());
// mdarray<double_complex, 1> sym_fz_pw(gvec__.num_gvec());
// sym_fx_pw.zero();
// sym_fy_pw.zero();
// sym_fz_pw.zero();
//
// double* ptr_x = (double*)&sym_fx_pw(0);
// double* ptr_y = (double*)&sym_fy_pw(0);
// double* ptr_z = (double*)&sym_fz_pw(0);
//
// std::vector<double_complex*> v_pw_in({fx_pw__, fy_pw__, fz_pw__});
//
// #pragma omp parallel for
// for (int i = 0; i < num_mag_sym(); i++) {
// /* full space-group symmetry operation is {R|t} */
// auto R = magnetic_group_symmetry(i).spg_op.R;
// auto t = magnetic_group_symmetry(i).spg_op.t;
// auto S = magnetic_group_symmetry(i).spin_rotation;
//
// for (int igloc = 0; igloc < gvec_count; igloc++) {
// int ig = gvec_offset + igloc;
//
// auto gv_rot = transpose(R) * gvec__.gvec(ig);
//
// /* index of a rotated G-vector */
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
//
// double_complex phase = std::exp(double_complex(0, twopi * (gvec__.gvec(ig) * t)));
// vector3d<double_complex> vz;
// for (int j: {0, 1, 2}) {
// for (int k: {0, 1, 2}) {
// vz[j] += phase * S(j, k) * v_pw_in[k][ig];
// }
// }
// if (gvec__.reduced() && ig_rot == -1) {
// gv_rot = gv_rot * (-1);
// int ig_rot = gvec__.index_by_gvec(gv_rot);
//
// #pragma omp atomic update
// ptr_x[2 * ig_rot] += vz[0].real();
//
// #pragma omp atomic update
// ptr_y[2 * ig_rot] += vz[1].real();
//
// #pragma omp atomic update
// ptr_z[2 * ig_rot] += vz[2].real();
//
// #pragma omp atomic update
// ptr_x[2 * ig_rot + 1] -= vz[0].imag();
//
// #pragma omp atomic update
// ptr_y[2 * ig_rot + 1] -= vz[1].imag();
//
// #pragma omp atomic update
// ptr_z[2 * ig_rot + 1] -= vz[2].imag();
// } else {
// assert(ig_rot >= 0 && ig_rot < gvec__.num_gvec());
//
// #pragma omp atomic update
// ptr_x[2 * ig_rot] += vz[0].real();
//
// #pragma omp atomic update
// ptr_y[2 * ig_rot] += vz[1].real();
//
// #pragma omp atomic update
// ptr_z[2 * ig_rot] += vz[2].real();
//
// #pragma omp atomic update
// ptr_x[2 * ig_rot + 1] += vz[0].imag();
//
// #pragma omp atomic update
// ptr_y[2 * ig_rot + 1] += vz[1].imag();
//
// #pragma omp atomic update
// ptr_z[2 * ig_rot + 1] += vz[2].imag();
// }
// }
// }
// comm__.allreduce(&sym_fx_pw(0), gvec__.num_gvec());
// comm__.allreduce(&sym_fy_pw(0), gvec__.num_gvec());
// comm__.allreduce(&sym_fz_pw(0), gvec__.num_gvec());
//
// for (int ig = 0; ig < gvec__.num_gvec(); ig++) {
// fx_pw__[ig] = sym_fx_pw(ig) / double(num_mag_sym());
// fy_pw__[ig] = sym_fy_pw(ig) / double(num_mag_sym());
// fz_pw__[ig] = sym_fz_pw(ig) / double(num_mag_sym());
// }
//}
inline void Unit_cell_symmetry::symmetrize_vector_function(double_complex* fx_pw__,
double_complex* fy_pw__,
double_complex* fz_pw__,
remap_gvec_to_shells const& remap_gvec__,
mdarray<double_complex, 3> const& sym_phase_factors__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_pw");
auto vx = remap_gvec__.remap_forward(fx_pw__);
auto vy = remap_gvec__.remap_forward(fy_pw__);
auto vz = remap_gvec__.remap_forward(fz_pw__);
std::vector<double_complex> sym_fx_pw(vx.size(), 0);
std::vector<double_complex> sym_fy_pw(vx.size(), 0);
std::vector<double_complex> sym_fz_pw(vx.size(), 0);
#pragma omp parallel
{
int nt = omp_get_max_threads();
int tid = omp_get_thread_num();
for (int igloc = 0; igloc < remap_gvec__.a2a_recv.size(); igloc++) {
vector3d<int> G(&remap_gvec__.gvec_remapped_(0, igloc));
int igsh = remap_gvec__.gvec_shell_remapped(igloc);
if (igsh % nt == tid) {
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
auto R = magnetic_group_symmetry(i).spg_op.R;
auto S = magnetic_group_symmetry(i).spin_rotation;
auto phase = sym_phase_factors__(0, G[0], i) *
sym_phase_factors__(1, G[1], i) *
sym_phase_factors__(2, G[2], i);
vector3d<double_complex> v_rot;
for (int j: {0, 1, 2}) {
v_rot[j] = phase * (S(j, 0) * vx[igloc] + S(j, 1) * vy[igloc] + S(j, 2) * vz[igloc]);
}
auto gv_rot = transpose(R) * G;
/* index of a rotated G-vector */
int ig_rot = remap_gvec__.index_by_gvec(gv_rot);
if (ig_rot == -1) {
gv_rot = gv_rot * (-1);
ig_rot = remap_gvec__.index_by_gvec(gv_rot);
assert(ig_rot >=0 && ig_rot < (int)vx.size());
sym_fx_pw[ig_rot] += std::conj(v_rot[0]);
sym_fy_pw[ig_rot] += std::conj(v_rot[1]);
sym_fz_pw[ig_rot] += std::conj(v_rot[2]);
} else {
assert(ig_rot >=0 && ig_rot < (int)vx.size());
sym_fx_pw[ig_rot] += v_rot[0];
sym_fy_pw[ig_rot] += v_rot[1];
sym_fz_pw[ig_rot] += v_rot[2];
}
}
}
}
}
double nrm = 1 / double(num_mag_sym());
#pragma omp parallel for schedule(static)
for (int ig = 0; ig < remap_gvec__.a2a_recv.size(); ig++) {
sym_fx_pw[ig] *= nrm;
sym_fy_pw[ig] *= nrm;
sym_fz_pw[ig] *= nrm;
}
remap_gvec__.remap_backward(sym_fx_pw, fx_pw__);
remap_gvec__.remap_backward(sym_fy_pw, fy_pw__);
remap_gvec__.remap_backward(sym_fz_pw, fz_pw__);
}
inline void Unit_cell_symmetry::symmetrize_function(mdarray<double, 3>& frlm__,
Communicator const& comm__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_function_mt");
int lmmax = (int)frlm__.size(0);
int nrmax = (int)frlm__.size(1);
if (num_atoms_ != (int)frlm__.size(2)) TERMINATE("wrong number of atoms");
splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank());
int lmax = Utils::lmax_by_lmmax(lmmax);
mdarray<double, 2> rotm(lmmax, lmmax);
mdarray<double, 3> fsym(lmmax, nrmax, spl_atoms.local_size());
fsym.zero();
double alpha = 1.0 / double(num_mag_sym());
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
int pr = magnetic_group_symmetry(i).spg_op.proper;
auto eang = magnetic_group_symmetry(i).spg_op.euler_angles;
int isym = magnetic_group_symmetry(i).isym;
SHT::rotation_matrix(lmax, eang, pr, rotm);
for (int ia = 0; ia < num_atoms_; ia++) {
int ja = sym_table_(ia, isym);
auto location = spl_atoms.location(ja);
if (location.rank == comm__.rank()) {
linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha, rotm.at<CPU>(), rotm.ld(),
frlm__.at<CPU>(0, 0, ia), frlm__.ld(), 1.0,
fsym.at<CPU>(0, 0, location.local_index), fsym.ld());
}
}
}
double* sbuf = spl_atoms.local_size() ? fsym.at<CPU>() : nullptr;
comm__.allgather(sbuf, frlm__.at<CPU>(),
lmmax * nrmax * spl_atoms.global_offset(),
lmmax * nrmax * spl_atoms.local_size());
}
inline void Unit_cell_symmetry::symmetrize_vector_function(mdarray<double, 3>& vz_rlm__,
Communicator const& comm__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_mt");
int lmmax = (int)vz_rlm__.size(0);
int nrmax = (int)vz_rlm__.size(1);
splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank());
if (num_atoms_ != (int)vz_rlm__.size(2)) {
TERMINATE("wrong number of atoms");
}
int lmax = Utils::lmax_by_lmmax(lmmax);
mdarray<double, 2> rotm(lmmax, lmmax);
mdarray<double, 3> fsym(lmmax, nrmax, spl_atoms.local_size());
fsym.zero();
double alpha = 1.0 / double(num_mag_sym());
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
int pr = magnetic_group_symmetry(i).spg_op.proper;
auto eang = magnetic_group_symmetry(i).spg_op.euler_angles;
int isym = magnetic_group_symmetry(i).isym;
auto S = magnetic_group_symmetry(i).spin_rotation;
SHT::rotation_matrix(lmax, eang, pr, rotm);
for (int ia = 0; ia < num_atoms_; ia++) {
int ja = sym_table_(ia, isym);
auto location = spl_atoms.location(ja);
if (location.rank == comm__.rank()) {
linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha * S(2, 2), rotm.at<CPU>(), rotm.ld(),
vz_rlm__.at<CPU>(0, 0, ia), vz_rlm__.ld(), 1.0,
fsym.at<CPU>(0, 0, location.local_index), fsym.ld());
}
}
}
double* sbuf = spl_atoms.local_size() ? fsym.at<CPU>() : nullptr;
comm__.allgather(sbuf, vz_rlm__.at<CPU>(),
lmmax * nrmax * spl_atoms.global_offset(),
lmmax * nrmax * spl_atoms.local_size());
}
inline void Unit_cell_symmetry::symmetrize_vector_function(mdarray<double, 3>& vx_rlm__,
mdarray<double, 3>& vy_rlm__,
mdarray<double, 3>& vz_rlm__,
Communicator const& comm__) const
{
PROFILE("sirius::Unit_cell_symmetry::symmetrize_vector_function_mt");
int lmmax = (int)vx_rlm__.size(0);
int nrmax = (int)vx_rlm__.size(1);
splindex<block> spl_atoms(num_atoms_, comm__.size(), comm__.rank());
int lmax = Utils::lmax_by_lmmax(lmmax);
mdarray<double, 2> rotm(lmmax, lmmax);
mdarray<double, 4> v_sym(lmmax, nrmax, spl_atoms.local_size(), 3);
v_sym.zero();
mdarray<double, 3> vtmp(lmmax, nrmax, 3);
double alpha = 1.0 / double(num_mag_sym());
std::vector<mdarray<double, 3>*> vrlm({&vx_rlm__, &vy_rlm__, &vz_rlm__});
for (int i = 0; i < num_mag_sym(); i++) {
/* full space-group symmetry operation is {R|t} */
int pr = magnetic_group_symmetry(i).spg_op.proper;
auto eang = magnetic_group_symmetry(i).spg_op.euler_angles;
int isym = magnetic_group_symmetry(i).isym;
auto S = magnetic_group_symmetry(i).spin_rotation;
SHT::rotation_matrix(lmax, eang, pr, rotm);
for (int ia = 0; ia < num_atoms_; ia++) {
int ja = sym_table_(ia, isym);
auto location = spl_atoms.location(ja);
if (location.rank == comm__.rank()) {
for (int k: {0, 1, 2}) {
linalg<CPU>::gemm(0, 0, lmmax, nrmax, lmmax, alpha, rotm.at<CPU>(), rotm.ld(),
vrlm[k]->at<CPU>(0, 0, ia), vrlm[k]->ld(), 0.0,
vtmp.at<CPU>(0, 0, k), vtmp.ld());
}
#pragma omp parallel
for (int k: {0, 1, 2}) {
for (int j: {0, 1, 2}) {
#pragma omp for
for (int ir = 0; ir < nrmax; ir++) {
for (int lm = 0; lm < lmmax; lm++) {
v_sym(lm, ir, location.local_index, k) += S(k, j) * vtmp(lm, ir, j);
}
}
}
}
}
}
}
for (int k: {0, 1, 2}) {
double* sbuf = spl_atoms.local_size() ? v_sym.at<CPU>(0, 0, 0, k) : nullptr;
comm__.allgather(sbuf, vrlm[k]->at<CPU>(),
lmmax * nrmax * spl_atoms.global_offset(),
lmmax * nrmax * spl_atoms.local_size());
}
}
} // namespace
/** \page sym Symmetry
* \section section1 Definition of symmetry operation
*
* SIRIUS uses Spglib to find the spacial symmetry operations. Spglib defines symmetry operation in fractional
* coordinates:
* \f[
* {\bf x'} = \{ {\bf R} | {\bf t} \} {\bf x} \equiv {\bf R}{\bf x} + {\bf t}
* \f]
* where \b R is the proper or improper rotation matrix with elements equal to -1,0,1 and determinant of 1
* (pure rotation) or -1 (rotoreflection) and \b t is the fractional translation, associated with the symmetry
* operation. The inverse of the symmetry operation is:
* \f[
* {\bf x} = \{ {\bf R} | {\bf t} \}^{-1} {\bf x'} = {\bf R}^{-1} ({\bf x'} - {\bf t}) =
* {\bf R}^{-1} {\bf x'} - {\bf R}^{-1} {\bf t}
* \f]
*
* We will always use an \a active transformation (transformation of vectors or functions) and never a passive
* transformation (transformation of coordinate system). However one should remember definition of the function
* transformation:
* \f[
* \hat {\bf P} f({\bf r}) \equiv f(\hat {\bf P}^{-1} {\bf r})
* \f]
*
* It is straightforward to get the rotation matrix in Cartesian coordinates. We know how the vector in Cartesian
* coordinates is obtained from the vector in fractional coordinates:
* \f[
* {\bf v} = {\bf L} {\bf x}
* \f]
* where \b L is the 3x3 matrix which clomuns are three lattice vectors. The backward transformation is simply
* \f[
* {\bf x} = {\bf L}^{-1} {\bf v}
* \f]
* Now we write rotation operation in fractional coordinates and apply the backward transformation to Cartesian
* coordinates:
* \f[
* {\bf x'} = {\bf R}{\bf x} \rightarrow {\bf L}^{-1} {\bf v'} = {\bf R} {\bf L}^{-1} {\bf v}
* \f]
* from which we derive the rotation operation in Cartesian coordinates:
* \f[
* {\bf v'} = {\bf L} {\bf R} {\bf L}^{-1} {\bf v}
* \f]
*/
#endif // __UNIT_CELL_SYMMETRY_H__
|
3d25pt.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(24*t3+Nx+11,128));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),32*t4+30);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
task_types.c
|
// RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
#include <math.h>
int main() {
//initialize the OpenMP runtime
omp_get_num_threads();
// initial task
print_ids(0);
int x;
// implicit task
#pragma omp parallel num_threads(1)
{
print_ids(0);
x++;
}
#pragma omp parallel num_threads(2)
{
// explicit task
#pragma omp single
#pragma omp task
{
print_ids(0);
x++;
}
// explicit task with undeferred
#pragma omp single
#pragma omp task if (0)
{
print_ids(0);
x++;
}
// explicit task with untied
#pragma omp single
#pragma omp task untied
{
// Output of thread_id is needed to know on which thread task is executed
printf("%" PRIu64 ": explicit_untied\n", ompt_get_thread_data()->value);
print_ids(0);
print_frame(1);
x++;
#pragma omp taskyield
printf("%" PRIu64 ": explicit_untied(2)\n",
ompt_get_thread_data()->value);
print_ids(0);
print_frame(1);
x++;
#pragma omp taskwait
printf("%" PRIu64 ": explicit_untied(3)\n",
ompt_get_thread_data()->value);
print_ids(0);
print_frame(1);
x++;
}
// explicit task with final
#pragma omp single
#pragma omp task final(1)
{
print_ids(0);
x++;
// nested explicit task with final and undeferred
#pragma omp task
{
print_ids(0);
x++;
}
}
// Mergeable task test deactivated for now
// explicit task with mergeable
/*
#pragma omp task mergeable if((int)sin(0))
{
print_ids(0);
x++;
}
*/
// TODO: merged task
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=0
// CHECK-SAME: parent_task_frame.exit=[[NULL]]
// CHECK-SAME: parent_task_frame.reenter=[[NULL]]
// CHECK-SAME: new_task_id=[[INITIAL_TASK_ID:[0-9]+]], codeptr_ra=[[NULL]]
// CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no
// CHECK-NOT: 0: parallel_data initially not null
// initial task
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}}
// CHECK-SAME: task_id=[[INITIAL_TASK_ID]], exit_frame=[[NULL]]
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_initial=1, thread_num=0
// implicit task
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}}
// CHECK-SAME: task_id={{[0-9]+}}, exit_frame={{0x[0-f]+}}
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_implicit|ompt_task_undeferred=134217730
// CHECK-SAME: thread_num=0
// explicit task
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit=4
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_1:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_TASK_ID]]
// CHECK: [[THREAD_ID_1]]: task level 0: parallel_id=[[PARALLEL_ID:[0-9]+]]
// CHECK-SAME: task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}
// CHECK-SAME: reenter_frame=[[NULL]], task_type=ompt_task_explicit=4
// CHECK-SAME: thread_num={{[01]}}
// explicit task with undeferred
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_2:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]]
// CHECK: [[THREAD_ID_2]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK-SAME: thread_num={{[01]}}
// explicit task with untied
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_UNTIED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: has_dependences=no
// Here the thread_id cannot be taken from a schedule event as there
// may be multiple of those
// CHECK: [[THREAD_ID_3:[0-9]+]]: explicit_untied
// CHECK: [[THREAD_ID_3]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: thread_num={{[01]}}
// after taskyield
// CHECK: [[THREAD_ID_3_2:[0-9]+]]: explicit_untied(2)
// CHECK: [[THREAD_ID_3_2]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: thread_num={{[01]}}
// after taskwait
// CHECK: [[THREAD_ID_3_3:[0-9]+]]: explicit_untied(3)
// CHECK: [[THREAD_ID_3_3]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: thread_num={{[01]}}
// explicit task with final
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_FINAL_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_4:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_FINAL_TASK_ID]]
// CHECK: [[THREAD_ID_4]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_FINAL_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916
// CHECK-SAME: thread_num={{[01]}}
// nested explicit task with final and undeferred
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred
// CHECK-SAME:|ompt_task_final=671088644
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_5:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]]
// CHECK: [[THREAD_ID_5]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred
// CHECK-SAME:|ompt_task_final=671088644
// CHECK-SAME: thread_num={{[01]}}
return 0;
}
|
hmacSHA1_fmt_plug.c
|
/*
* This software is Copyright (c) 2012, 2013 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
* Originally based on hmac-md5 by Bartavelle
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hmacSHA1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hmacSHA1);
#else
#include <string.h>
#include "arch.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "HMAC-SHA1"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define SHA1_N (SIMD_PARA_SHA1 * SIMD_COEF_32)
#endif
#define ALGORITHM_NAME "password is key, SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 64
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_LENGTH PAD_SIZE
#define SALT_ALIGN MEM_ALIGN_NONE
#define CIPHERTEXT_LENGTH (2 * SALT_LENGTH + 2 * BINARY_SIZE)
#define HEXCHARS "0123456789abcdef"
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SHA1_N
#define MAX_KEYS_PER_CRYPT SHA1_N
#define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i) & 3)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * 4 * SIMD_COEF_32)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"The quick brown fox jumps over the lazy dog#de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9", "key"},
{"#fbdb1d1b18aa6c08324b7d64b71fb76370690e1d", ""},
{"Beppe#Grillo#DEBBDB4D549ABE59FAB67D0FB76B76FDBC4431F1", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
{"7oTwG04WUjJ0BTDFFIkTJlgl#293b75c1f28def530c17fc8ae389008179bf4091", "late*night"}, // from the test suite
{"D2hIU7fdd78WARm5dt95k6MD#e741a6100ccfd1205a8ffe1321b61fc5aa06f6db", "123"},
{"6Fv5kYoxuEuroTkagbf3ZRsV#370edad3540b1ad3e96b03ccf3956645306074b7", "123456789"},
{"3uqqtMBC7vzh9tdVMPJ9bAwE#65ed35cf94e2180d6a797e5ad5e4175891427572", "passWOrd"},
{NULL}
};
#ifdef SIMD_COEF_32
#define cur_salt hmacsha1_cur_salt
static unsigned char *crypt_key;
static unsigned char *ipad, *prep_ipad;
static unsigned char *opad, *prep_opad;
JTR_ALIGN(MEM_ALIGN_SIMD) unsigned char cur_salt[SHA_BUF_SIZ * 4 * SHA1_N];
static int bufsize;
#else
static unsigned char cur_salt[SALT_LENGTH];
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char (*opad)[PAD_SIZE];
static SHA_CTX *ipad_ctx;
static SHA_CTX *opad_ctx;
#endif
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int new_keys;
#define SALT_SIZE sizeof(cur_salt)
#ifdef SIMD_COEF_32
static void clear_keys(void)
{
memset(ipad, 0x36, bufsize);
memset(opad, 0x5C, bufsize);
}
#endif
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_32
unsigned int i;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * SHA_BUF_SIZ * 4;
crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt *
BINARY_SIZE,
sizeof(*prep_ipad), MEM_ALIGN_SIMD);
prep_opad = mem_calloc_align(self->params.max_keys_per_crypt *
BINARY_SIZE,
sizeof(*prep_opad), MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80;
((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (BINARY_SIZE + 64) << 3;
}
clear_keys();
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad));
opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad));
ipad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*ipad_ctx));
opad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void done(void)
{
MEM_FREE(saved_plain);
#ifdef SIMD_COEF_32
MEM_FREE(prep_opad);
MEM_FREE(prep_ipad);
#else
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
#endif
MEM_FREE(opad);
MEM_FREE(ipad);
MEM_FREE(crypt_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int pos, i;
char *p;
p = strrchr(ciphertext, '#'); // allow # in salt
if (!p || p > &ciphertext[strlen(ciphertext) - 1]) return 0;
i = (int)(p - ciphertext);
#if SIMD_COEF_32
if (i > 55) return 0;
#else
if (i > SALT_LENGTH) return 0;
#endif
pos = i + 1;
if (strlen(ciphertext+pos) != BINARY_SIZE * 2) return 0;
for (i = pos; i < BINARY_SIZE*2+pos; i++)
{
if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(strrchr(out, '#'));
return out;
}
static void set_salt(void *salt)
{
memcpy(&cur_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
int len;
#ifdef SIMD_COEF_32
uint32_t *ipadp = (uint32_t*)&ipad[GETPOS(3, index)];
uint32_t *opadp = (uint32_t*)&opad[GETPOS(3, index)];
const uint32_t *keyp = (uint32_t*)key;
unsigned int temp;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
unsigned char k0[BINARY_SIZE];
SHA_CTX ctx;
int i;
SHA1_Init(&ctx);
SHA1_Update(&ctx, key, len);
SHA1_Final(k0, &ctx);
keyp = (unsigned int*)k0;
for (i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32)
{
temp = JOHNSWAP(*keyp++);
*ipadp ^= temp;
*opadp ^= temp;
}
}
else
while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) {
if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00))
{
((unsigned short*)ipadp)[1] ^=
(unsigned short)(temp >> 16);
((unsigned short*)opadp)[1] ^=
(unsigned short)(temp >> 16);
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0x000000ff))
break;
ipadp += SIMD_COEF_32;
opadp += SIMD_COEF_32;
}
#else
int i;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
if (len > PAD_SIZE) {
SHA_CTX ctx;
unsigned char k0[BINARY_SIZE];
SHA1_Init(&ctx);
SHA1_Update(&ctx, key, len);
SHA1_Final(k0, &ctx);
len = BINARY_SIZE;
for (i = 0; i < len; i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
for (i = 0; i < len; i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
#endif
new_keys = 1;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x, y = 0;
for (; y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++)
for (x = 0; x < SIMD_COEF_32; x++)
{
// NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32)
if (((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32 * SHA_BUF_SIZ])
return 1;
}
return 0;
#else
int index = 0;
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
for (index = 0; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < (BINARY_SIZE/4); i++)
// NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32)
if (((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#if _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
if (new_keys) {
SIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
SIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
}
SIMDSHA1body(cur_salt,
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
#else
SHA_CTX ctx;
if (new_keys) {
SHA1_Init(&ipad_ctx[index]);
SHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA1_Init(&opad_ctx[index]);
SHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, cur_salt, strlen((char*)cur_salt));
SHA1_Final((unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, crypt_key[index], BINARY_SIZE);
SHA1_Final((unsigned char*) crypt_key[index], &ctx);
#endif
}
new_keys = 0;
return count;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
uint32_t dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
// allow # in salt
p = strrchr(ciphertext, '#') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#ifdef SIMD_COEF_32
alter_endianity(out, BINARY_SIZE);
#endif
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
static unsigned char salt[SALT_LENGTH];
#ifdef SIMD_COEF_32
unsigned int i = 0;
unsigned int j;
unsigned total_len = 0;
#endif
memset(salt, 0, sizeof(salt));
// allow # in salt
memcpy(salt, ciphertext, strrchr(ciphertext, '#') - ciphertext);
#ifdef SIMD_COEF_32
while(((unsigned char*)salt)[total_len])
{
for (i = 0; i < SHA1_N; ++i)
cur_salt[GETPOS(total_len, i)] = ((unsigned char*)salt)[total_len];
++total_len;
}
for (i = 0; i < SHA1_N; ++i)
cur_salt[GETPOS(total_len, i)] = 0x80;
for (j = total_len + 1; j < SALT_LENGTH; ++j)
for (i = 0; i < SHA1_N; ++i)
cur_salt[GETPOS(j, i)] = 0;
for (i = 0; i < SHA1_N; ++i)
((unsigned int*)cur_salt)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (total_len + 64) << 3;
return cur_salt;
#else
return salt;
#endif
}
struct fmt_main fmt_hmacSHA1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_32
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
SPHCalcHydroForceFunctor.h
|
/**
* @file SPHCalcHydroForceFunctor.h
* @author seckler
* @date 22.01.18
*/
#pragma once
#include "autopas/sph/SPHKernels.h"
#include "autopas/sph/SPHParticle.h"
namespace autopas {
namespace sph {
/**
* Class that defines the hydrodynamic force functor.
* It is used to calculate the force based on the given SPH kernels.
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle, class ParticleCell>
class SPHCalcHydroForceFunctor : public Functor<SPHParticle, FullParticleCell<SPHParticle>, SPHParticle::SoAArraysType,
SPHCalcHydroForceFunctor<Particle, ParticleCell>> {
public:
/// soa arrays type
using SoAArraysType = typename Particle::SoAArraysType;
SPHCalcHydroForceFunctor()
// the actual cutoff used is dynamic. 0 is used to pass the sanity check.
: autopas::Functor<Particle, ParticleCell, SoAArraysType, SPHCalcHydroForceFunctor>(0.){};
bool isRelevantForTuning() override { return true; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
bool isAppropriateClusterSize(unsigned int clusterSize, DataLayoutOption::Value dataLayout) const override {
return dataLayout == DataLayoutOption::aos; // This functor does only support clusters via aos.
}
/**
* Calculates the contribution of the interaction of particle i and j to the
* hydrodynamic force.
* It is not symmetric, because the smoothing lenghts of the two particles can
* be different.
* @param i first particle of the interaction
* @param j second particle of the interaction
* @param newton3 defines whether or whether not to use newton 3
*/
void AoSFunctor(SPHParticle &i, SPHParticle &j, bool newton3 = true) override {
const std::array<double, 3> dr = utils::ArrayMath::sub(i.getR(), j.getR());
// const PS::F64vec dr = ep_i[i].pos - ep_j[j].pos;
double cutoff = i.getSmoothingLength() * autopas::sph::SPHKernels::getKernelSupportRadius();
if (autopas::utils::ArrayMath::dot(dr, dr) >= cutoff * cutoff) {
return;
}
const std::array<double, 3> dv = utils::ArrayMath::sub(i.getV(), j.getV());
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = utils::ArrayMath::dot(dv, dr);
const double w_ij = (dvdr < 0) ? dvdr / utils::ArrayMath::L2Norm(dr) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = i.getSoundSpeed() + j.getSoundSpeed() - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
i.checkAndSetVSigMax(v_sig);
if (newton3) {
j.checkAndSetVSigMax(v_sig); // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (i.getDensity() + j.getDensity()));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW(dr, i.getSmoothingLength()),
SPHKernels::gradW(dr, j.getSmoothingLength())),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale =
i.getPressure() / (i.getDensity() * i.getDensity()) + j.getPressure() / (j.getDensity() * j.getDensity()) + AV;
i.subAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * j.getMass()));
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
if (newton3) {
j.addAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * i.getMass()));
// Newton3, gradW_ij = -gradW_ji
}
double scale2i = j.getMass() * (i.getPressure() / (i.getDensity() * i.getDensity()) + 0.5 * AV);
i.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2i);
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j = i.getMass() * (j.getPressure() / (j.getDensity() * j.getDensity()) + 0.5 * AV);
j.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2j);
// Newton 3
}
}
/**
* @copydoc Functor::SoAFunctor(SoAView<SoAArraysType>, bool)
* This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3.
*/
void SoAFunctor(SoAView<SoAArraysType> soa, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict__ massptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::mass>();
double *const __restrict__ densityptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::density>();
double *const __restrict__ smthptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::smth>();
double *const __restrict__ soundSpeedptr =
soa.template begin<autopas::sph::SPHParticle::AttributeNames::soundSpeed>();
double *const __restrict__ pressureptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::pressure>();
double *const __restrict__ vsigmaxptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::vsigmax>();
double *const __restrict__ engDotptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::engDot>();
double *const __restrict__ xptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posX>();
double *const __restrict__ yptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posY>();
double *const __restrict__ zptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posZ>();
double *const __restrict__ velXptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velX>();
double *const __restrict__ velYptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velY>();
double *const __restrict__ velZptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velZ>();
double *const __restrict__ accXptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accX>();
double *const __restrict__ accYptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accY>();
double *const __restrict__ accZptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accZ>();
for (unsigned int i = 0; i < soa.getNumParticles(); ++i) {
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = i + 1; j < soa.getNumParticles(); ++j) {
const double drx = xptr[i] - xptr[j];
const double dry = yptr[i] - yptr[j];
const double drz = zptr[i] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr[i] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff) continue;
const double dvX = velXptr[i] - velXptr[j];
const double dvY = velYptr[i] - velYptr[j];
const double dvZ = velZptr[i] - velZptr[j];
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr[i] + soundSpeedptr[j] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
// vsigmaxptr[j] = std::max(vsigmaxptr[j], v_sig); // Newton 3
vsigmaxptr[j] = vsigmaxptr[j] > v_sig ? vsigmaxptr[j] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[i] + densityptr[j]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[i]),
SPHKernels::gradW({drx, dry, drz}, smthptr[j])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale =
pressureptr[i] / (densityptr[i] * densityptr[i]) + pressureptr[j] / (densityptr[j] * densityptr[j]) + AV;
const double massscale = scale * massptr[j];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
const double massscale2 = scale * massptr[i];
accXptr[j] += gradW_ij[0] * massscale2;
accYptr[j] += gradW_ij[1] * massscale2;
accZptr[j] += gradW_ij[2] * massscale2;
// Newton3, gradW_ij = -gradW_ji
double scale2i = massptr[j] * (pressureptr[i] / (densityptr[i] * densityptr[i]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
double scale2j = massptr[i] * (pressureptr[j] / (densityptr[j] * densityptr[j]) + 0.5 * AV);
engDotptr[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
engDotptr[i] += localengdotsum;
accXptr[i] += localAccX;
accYptr[i] += localAccY;
accZptr[i] += localAccZ;
vsigmaxptr[i] = std::max(localvsigmax, vsigmaxptr[i]);
}
}
/**
* @copydoc Functor::SoAFunctor(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool)
*/
void SoAFunctor(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override {
if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return;
double *const __restrict__ massptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::mass>();
double *const __restrict__ densityptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::density>();
double *const __restrict__ smthptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::smth>();
double *const __restrict__ soundSpeedptr1 =
soa1.template begin<autopas::sph::SPHParticle::AttributeNames::soundSpeed>();
double *const __restrict__ pressureptr1 =
soa1.template begin<autopas::sph::SPHParticle::AttributeNames::pressure>();
double *const __restrict__ vsigmaxptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::vsigmax>();
double *const __restrict__ engDotptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::engDot>();
double *const __restrict__ xptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::posX>();
double *const __restrict__ yptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::posY>();
double *const __restrict__ zptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::posZ>();
double *const __restrict__ velXptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::velX>();
double *const __restrict__ velYptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::velY>();
double *const __restrict__ velZptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::velZ>();
double *const __restrict__ accXptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::accX>();
double *const __restrict__ accYptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::accY>();
double *const __restrict__ accZptr1 = soa1.template begin<autopas::sph::SPHParticle::AttributeNames::accZ>();
double *const __restrict__ massptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::mass>();
double *const __restrict__ densityptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::density>();
double *const __restrict__ smthptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::smth>();
double *const __restrict__ soundSpeedptr2 =
soa2.template begin<autopas::sph::SPHParticle::AttributeNames::soundSpeed>();
double *const __restrict__ pressureptr2 =
soa2.template begin<autopas::sph::SPHParticle::AttributeNames::pressure>();
double *const __restrict__ vsigmaxptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::vsigmax>();
double *const __restrict__ engDotptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::engDot>();
double *const __restrict__ xptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::posX>();
double *const __restrict__ yptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::posY>();
double *const __restrict__ zptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::posZ>();
double *const __restrict__ velXptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::velX>();
double *const __restrict__ velYptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::velY>();
double *const __restrict__ velZptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::velZ>();
double *const __restrict__ accXptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::accX>();
double *const __restrict__ accYptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::accY>();
double *const __restrict__ accZptr2 = soa2.template begin<autopas::sph::SPHParticle::AttributeNames::accZ>();
for (unsigned int i = 0; i < soa1.getNumParticles(); ++i) {
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) {
const double drx = xptr1[i] - xptr2[j];
const double dry = yptr1[i] - yptr2[j];
const double drz = zptr1[i] - zptr2[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr1[i] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff) continue;
const double dvX = velXptr1[i] - velXptr2[j];
const double dvY = velYptr1[i] - velYptr2[j];
const double dvZ = velZptr1[i] - velZptr2[j];
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr1[i] + soundSpeedptr2[j] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
if (newton3) {
// vsigmaxptr2[j] = std::max(vsigmaxptr2[j], v_sig); // Newton 3
vsigmaxptr2[j] = vsigmaxptr2[j] > v_sig ? vsigmaxptr2[j] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr1[i] + densityptr2[j]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr1[i]),
SPHKernels::gradW({drx, dry, drz}, smthptr2[j])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale = pressureptr1[i] / (densityptr1[i] * densityptr1[i]) +
pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + AV;
const double massscale = scale * massptr2[j];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
if (newton3) {
const double massscale = scale * massptr1[i];
accXptr2[j] += gradW_ij[0] * massscale;
accYptr2[j] += gradW_ij[1] * massscale;
accZptr2[j] += gradW_ij[2] * massscale;
// Newton3, gradW_ij = -gradW_ji
}
double scale2i = massptr2[j] * (pressureptr1[i] / (densityptr1[i] * densityptr1[i]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j = massptr1[i] * (pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + 0.5 * AV);
engDotptr2[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
}
engDotptr1[i] += localengdotsum;
accXptr1[i] += localAccX;
accYptr1[i] += localAccY;
accZptr1[i] += localAccZ;
vsigmaxptr1[i] = std::max(localvsigmax, vsigmaxptr1[i]);
}
}
// clang-format off
/**
* @copydoc Functor::SoAFunctor(SoAView<SoAArraysType>, const std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>> &, size_t, size_t, bool)
*/
// clang-format on
void SoAFunctor(SoAView<SoAArraysType> soa,
const std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>> &neighborList, size_t iFrom,
size_t iTo, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict__ massptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::mass>();
double *const __restrict__ densityptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::density>();
double *const __restrict__ smthptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::smth>();
double *const __restrict__ soundSpeedptr =
soa.template begin<autopas::sph::SPHParticle::AttributeNames::soundSpeed>();
double *const __restrict__ pressureptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::pressure>();
double *const __restrict__ vsigmaxptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::vsigmax>();
double *const __restrict__ engDotptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::engDot>();
double *const __restrict__ xptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posX>();
double *const __restrict__ yptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posY>();
double *const __restrict__ zptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::posZ>();
double *const __restrict__ velXptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velX>();
double *const __restrict__ velYptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velY>();
double *const __restrict__ velZptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::velZ>();
double *const __restrict__ accXptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accX>();
double *const __restrict__ accYptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accY>();
double *const __restrict__ accZptr = soa.template begin<autopas::sph::SPHParticle::AttributeNames::accZ>();
for (unsigned int i = iFrom; i < iTo; ++i) {
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
auto ¤tList = neighborList[i];
size_t listSize = currentList.size();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = 0; j < listSize; ++j) {
const double drx = xptr[i] - xptr[currentList[j]];
const double dry = yptr[i] - yptr[currentList[j]];
const double drz = zptr[i] - zptr[currentList[j]];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr[i] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff) continue;
const double dvX = velXptr[i] - velXptr[currentList[j]];
const double dvY = velYptr[i] - velYptr[currentList[j]];
const double dvZ = velZptr[i] - velZptr[currentList[j]];
// const PS::F64vec dv = ep_i[i].vel - ep_j[currentList[j]].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr[i] + soundSpeedptr[currentList[j]] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[currentList[j]].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
if (newton3) {
// vsigmaxptr[currentList[j]] = std::max(vsigmaxptr[currentList[j]], v_sig); // Newton 3
vsigmaxptr[currentList[j]] =
vsigmaxptr[currentList[j]] > v_sig ? vsigmaxptr[currentList[j]] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[i] + densityptr[currentList[j]]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[currentList[j]].dens));
const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar(
utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[i]),
SPHKernels::gradW({drx, dry, drz}, smthptr[currentList[j]])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[currentList[j]].smth));
double scale = pressureptr[i] / (densityptr[i] * densityptr[i]) +
pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + AV;
const double massscale = scale * massptr[currentList[j]];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[currentList[j]].pres / (ep_j[currentList[j]].dens * ep_j[currentList[j]].dens) + AV) *
// gradW_ij;
if (newton3) {
const double massscale = scale * massptr[i];
accXptr[currentList[j]] += gradW_ij[0] * massscale;
accYptr[currentList[j]] += gradW_ij[1] * massscale;
accZptr[currentList[j]] += gradW_ij[2] * massscale;
// Newton3, gradW_ij = -gradW_ji
}
double scale2i = massptr[currentList[j]] * (pressureptr[i] / (densityptr[i] * densityptr[i]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j =
massptr[i] *
(pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + 0.5 * AV);
engDotptr[currentList[j]] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
}
engDotptr[i] += localengdotsum;
accXptr[i] += localAccX;
accYptr[i] += localAccY;
accZptr[i] += localAccZ;
vsigmaxptr[i] = std::max(localvsigmax, vsigmaxptr[i]);
}
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static const std::array<typename SPHParticle::AttributeNames, 16> getNeededAttr() {
///@todo distinguish between N3 and notN3
return std::array<typename SPHParticle::AttributeNames, 16>{
SPHParticle::AttributeNames::mass, SPHParticle::AttributeNames::density,
SPHParticle::AttributeNames::smth, SPHParticle::AttributeNames::soundSpeed,
SPHParticle::AttributeNames::pressure, SPHParticle::AttributeNames::vsigmax,
SPHParticle::AttributeNames::engDot, SPHParticle::AttributeNames::posX,
SPHParticle::AttributeNames::posY, SPHParticle::AttributeNames::posZ,
SPHParticle::AttributeNames::velX, SPHParticle::AttributeNames::velY,
SPHParticle::AttributeNames::velZ, SPHParticle::AttributeNames::accX,
SPHParticle::AttributeNames::accY, SPHParticle::AttributeNames::accZ};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static const std::array<typename SPHParticle::AttributeNames, 11> getNeededAttr(std::false_type) {
///@todo distinguish between N3 and notN3
return std::array<typename SPHParticle::AttributeNames, 11>{
SPHParticle::AttributeNames::mass, SPHParticle::AttributeNames::density,
SPHParticle::AttributeNames::smth, SPHParticle::AttributeNames::soundSpeed,
SPHParticle::AttributeNames::pressure, SPHParticle::AttributeNames::posX,
SPHParticle::AttributeNames::posY, SPHParticle::AttributeNames::posZ,
SPHParticle::AttributeNames::velX, SPHParticle::AttributeNames::velY,
SPHParticle::AttributeNames::velZ};
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static const std::array<typename sph::SPHParticle::AttributeNames, 5> getComputedAttr() {
return std::array<typename SPHParticle::AttributeNames, 5>{
SPHParticle::AttributeNames::vsigmax, SPHParticle::AttributeNames::engDot, SPHParticle::AttributeNames::accX,
SPHParticle::AttributeNames::accY, SPHParticle::AttributeNames::accZ};
}
/**
* Get the number of floating point operations used in one full kernel call
* @return the number of floating point operations
*/
static unsigned long getNumFlopsPerKernelCall() {
///@todo return correct flopcount
return 1ul;
}
};
} // namespace sph
} // namespace autopas
|
GB_unop__identity_uint32_fc64.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_fc64)
// op(A') function: GB (_unop_tran__identity_uint32_fc64)
// C type: uint32_t
// A type: GxB_FC64_t
// cast: uint32_t cij = GB_cast_to_uint32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_fc64)
(
uint32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bor_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bor_int64
// A.*B function (eWiseMult): GB_AemultB__bor_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bor_int64
// C+=b function (dense accum): GB_Cdense_accumb__bor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int64
// C=scalar+B GB_bind1st__bor_int64
// C=scalar+B' GB_bind1st_tran__bor_int64
// C=A+scalar GB_bind2nd__bor_int64
// C=A'+scalar GB_bind2nd_tran__bor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) | (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB_bind1st_tran__bor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB_bind2nd_tran__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
join.c
|
/* Copyright 2013-2015. The Regents of the University of California.
* Copyright 2015. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <[email protected]>
* 2015 Jonathan Tamir <[email protected]>
*/
#include <stdbool.h>
#include <complex.h>
#include <string.h>
#include "num/multind.h"
#include "num/init.h"
#include "misc/mmio.h"
#include "misc/debug.h"
#include "misc/misc.h"
#include "misc/opts.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char usage_str[] = "dimension <input1> ... <inputn> <output>";
static const char help_str[] =
"Join input files along {dimensions}. All other dimensions must have the same size.\n"
"\t Example 1: join 0 slice_001 slice_002 slice_003 full_data\n"
"\t Example 2: join 0 `seq -f \"slice_%%03g\" 0 255` full_data\n";
int main_join(int argc, char* argv[])
{
bool append = false;
const struct opt_s opts[] = {
OPT_SET('a', &append, "append - only works for cfl files!"),
};
cmdline(&argc, argv, 3, 1000, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
int N = DIMS;
int dim = atoi(argv[1]);
assert(dim < N);
int count = argc - 3;
if (append) {
count += 1;
// FIXME: check for cfl file
}
long in_dims[count][N];
long offsets[count];
complex float* idata[count];
long sum = 0;
// figure out size of output
for (int l = 0, i = 0; i < count; i++) {
const char* name = NULL;
if (append && (i == 0)) {
name = argv[argc - 1];
} else {
name = argv[2 + l++];
}
debug_printf(DP_DEBUG1, "loading %s\n", name);
idata[i] = load_cfl(name, N, in_dims[i]);
offsets[i] = sum;
sum += in_dims[i][dim];
for (int j = 0; j < N; j++)
assert((dim == j) || (in_dims[0][j] == in_dims[i][j]));
}
long out_dims[N];
for (int i = 0; i < N; i++)
out_dims[i] = in_dims[0][i];
out_dims[dim] = sum;
complex float* out_data = create_cfl(argv[argc - 1], N, out_dims);
long ostr[N];
md_calc_strides(N, ostr, out_dims, CFL_SIZE);
#pragma omp parallel for
for (int i = 0; i < count; i++) {
if (!(append && (0 == i))) {
long pos[N];
md_singleton_strides(N, pos);
pos[dim] = offsets[i];
long istr[N];
md_calc_strides(N, istr, in_dims[i], CFL_SIZE);
md_copy_block(N, pos, out_dims, out_data, in_dims[i], idata[i], CFL_SIZE);
}
unmap_cfl(N, in_dims[i], idata[i]);
debug_printf(DP_DEBUG1, "done copying file %d\n", i);
}
unmap_cfl(N, out_dims, out_data);
return 0;
}
|
threshold.c
|
/* Copyright 2014. The Regents of the University of California.
* Copyright 2015-2017. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013-2017 Martin Uecker <[email protected]>
* 2015-2016 Jon Tamir <[email protected]>
* 2015 Frank Ong <[email protected]>
*/
#include <stdbool.h>
#include <complex.h>
#include "num/flpmath.h"
#include "num/multind.h"
#include "num/init.h"
#include "num/ops_p.h"
#include "iter/prox.h"
#include "iter/thresh.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/opts.h"
#include "lowrank/lrthresh.h"
#include "linops/waveop.h"
#include "dfwavelet/prox_dfwavelet.h"
// FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input
#ifndef DIMS
#define DIMS 16
#endif
// FIXME: consider moving this to a more accessible location?
static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long minsize[D];
md_singleton_dims(D, minsize);
long course_scale[3] = MD_INIT_ARRAY(3, 16);
md_copy_dims(3, minsize, course_scale);
unsigned int wflags = 7; // FIXME
for (unsigned int i = 0; i < 3; i++)
if (dims[i] < minsize[i])
wflags = MD_CLEAR(wflags, i);
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false);
const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long blkdims[MAX_LEV][D];
int levels = llr_blkdims(blkdims, ~flags, dims, llrblk);
UNUSED(levels);
const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long minsize[3];
md_singleton_dims(3, minsize);
long coarse_scale[3] = MD_INIT_ARRAY(3, 16);
md_min_dims(3, ~0u, minsize, dims, coarse_scale);
complex float res[3];
res[0] = 1.;
res[1] = 1.;
res[2] = 1.;
assert(3 == dims[TE_DIM]);
const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long size = md_calc_size(DIMS, dims);
#pragma omp parallel for
for (long i = 0; i < size; i++)
out[i] = (cabsf(in[i]) > lambda) ? in[i] : 0.;
}
static void binary_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long size = md_calc_size(DIMS, dims);
#pragma omp parallel for
for (long i = 0; i < size; i++)
out[i] = (cabsf(in[i]) > lambda) ? 1. : 0.;
}
static const char usage_str[] = "lambda <input> <output>";
static const char help_str[] = "Perform (soft) thresholding with parameter lambda.";
int main_threshold(int argc, char* argv[argc])
{
unsigned int flags = 0;
enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD, BINARY } th_type = NONE;
int llrblk = 8;
const struct opt_s opts[] = {
OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"),
OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"),
OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"),
OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"),
OPT_SELECT('B', enum th_type, &th_type, BINARY, "thresholding with binary output"),
OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"),
OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"),
};
cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
const int N = DIMS;
long dims[N];
complex float* idata = load_cfl(argv[2], N, dims);
complex float* odata = create_cfl(argv[3], N, dims);
float lambda = atof(argv[1]);
switch (th_type) {
case WAV:
wthresh(N, dims, lambda, flags, odata, idata);
break;
case LLR:
lrthresh(N, dims, llrblk, lambda, flags, odata, idata);
break;
case DFW:
dfthresh(N, dims, lambda, odata, idata);
break;
case HARD:
hard_thresh(N, dims, lambda, odata, idata);
break;
case BINARY:
binary_thresh(N, dims, lambda, odata, idata);
break;
default:
md_zsoftthresh(N, dims, lambda, flags, odata, idata);
}
unmap_cfl(N, dims, idata);
unmap_cfl(N, dims, odata);
return 0;
}
|
GB_unaryop__identity_uint8_int16.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_int16
// op(A') function: GB_tran__identity_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wf3cte.c
|
/* WFC3 -- CTE loss correction for UVIS
M. sosey Aug-2014 Adapted for the pipeline from Jay Andersons CTE correction code for wfc3 UVIS
raw2raz_wfc3uv.F , an edited file was delivered december 2014, and both are different from the
fortran code currently served on the wfc3 website.
M. Sosey Aug-2016 Adapted to be used with Subarrays as well as full frame arrays,
as long as the subarray contains physical overscan pixels, which don't include the science team subarrays
which can span quads.
M.D. De La Pena Dec-2019: This routine has been significantly upgraded by Jay Anderson (JA) and delivered
in November 2019. As JA is the important resource for this algorithm, I have only cleaned up the comments
in his original delivered version, fixed up brace placement, and created defines for some of the
hard-coded values. Minimal changes were done explicitly to keep the code in a form familiar to JA for
possible future modifications.
M.D. De La Pena Mar-2020: Further changes to accommodate subarrays - only evaluate valid (non-zero) pixels.
Updates received from Jay Anderson. Removed deprecated routines: find_dadj, rsz2rsc, inverse_cte_blur, and raz2rsz.
Small bug found in original subarray code during testing.
M.D. De La Pena Apr-2021: Fix to address a problem detected when processing a Tungsten flat with a high
background. Uninitialized values were used for further computation causing an eventual exception.
*/
# include <time.h>
# include <string.h>
# include <math.h>
# include <stdlib.h>
# include <stdio.h>
# include <float.h>
# ifdef _OPENMP
# include <omp.h>
# endif
# include "hstcal.h"
# include "hstio.h"
# include "wf3.h"
# include "wf3info.h"
# include "hstcalerr.h"
# include "wf3corr.h"
# include "cte.h"
# include "trlbuf.h"
/*
These are defined in wf3.h.
NAMPS 4
RAZ_COLS 8412
RAZ_ROWS 2070
*/
# define NITMAX 299 /* Maximum number of iterations */
# define P_OVRSCN 25 /* Physical overscan */
# define V_OVRSCNX2 60 /* Virtual overscan x 2 */
# define XAMP_SCI_DIM 2048 /* X dimension of each AMP of science pixels */
# define YAMP_SCI_DIM 2051 /* Y dimension of each AMP of science pixels */
# define WsMAX 999 /* Maximum number of traps */
/* Used in find_raz2rnoival */
# define SPREAD_FOR_HISTO 4.5
# define LOW_CLIP 3.75
# define HIGH_CLIP 9.75
# define NUM_BINS 1001
int sub_ctecor_v2c(float *,
float *,
int,
double *,
double *,
float *,
float *,
float,
float,
int,
int,
float *);
float find_raz2rnoival(float *,
float *,
float *);
int WF3cte (char *input, char *output, CCD_Switch *cte_sw,
RefFileInfo *refnames, int printtime, int verbose, int onecpu) {
/*
input: filename
output: filename
cte_sw: the calibration flags
refnames: the names of the calibration reference files
onecpu: use parallel processing?
The following are new primary header keywords which will be added to the data
so that they can be updated by the code. They are also specified in the PCTETAB
reference file.
These are taken from the PCTETAB
CTE_NAME - name of cte algorithm
CTE_VER - version number of cte algorithm
CTEDATE0 - date of wfc3/uvis installation in HST, in MJD
CTEDATE1 - reference date of CTE model pinning, in MJD
PCTETLEN - max length of CTE trail
PCTESMIT - number of iterations used in CTE forward modeling
PCTESHFT - number of iterations used in the parallel transfer
PCTENSMD - readnoise mitigation algorithm
PCTETRSH - over-subtraction threshold
PCTEFRAC - cte scaling frac calculated from expstart
PCTERNOI - the readnoise clipping level to use ***NOTE: This value is no longer used
from the PCTETAB. If PCTERNOI keyword value in the raw science image
header is non-zero, it will be used for the CTE computations. Otherwise,
the value is computed on-the-fly based upon the raw image data. (March 2020)
#These are taken from getreffiles.c
DRKCFILE is a new dark reference file used only in the CTE branch *_DRC.fits
BIACFILE is a new super-bias reference file used only in the CTE branch *_BIC.fits
PCTETAB is a new reference file FITS table which will contain the software parameter switches for the CTE correction *_CTE.fit
This is the main workhorse function for removing the CTE from WFC3 UVIS images
Unfortunately this happens before anything else in wfc3, so there's a lot of reading files
at the beginning in order to populate needed information. The rest of the pipeline works
on one chip at a time and the structures are all defined to support that. None of these
structures are defined until the code enters the single chip loops. This differs from the
CTE correction in ACS which occurs later in the process after basic structures are defined.
*/
extern int status;
WF3Info wf3; /*structure with calibration switches and reference files for passing*/
Hdr phdr; /*primary header for input image, all output information saved here*/
Hdr scihdr; /*science header in case of subarray image to detect chip*/
IODescPtr ip = NULL;
CTEParams cte_pars; /*STRUCTURE HOLDING THE MODEL PARAMETERS*/
SingleGroup cd; /*SCI 1, chip 2*/
SingleGroup ab; /*SCI 2, chip 1*/
SingleGroup subcd; /*subarray chip*/
SingleGroup subab; /*subarray chip*/
SingleGroup raz; /* THE LARGE FORMAT COMBINATION OF CDAB*/
SingleGroup rsz; /* LARGE FORMAT READNOISE CORRECTED IMAGE */
SingleGroup rsc; /* CTE CORRECTED*/
SingleGroup rzc; /* FINAL CTE CORRECTED IMAGE */
SingleGroup chg; /* THE CHANGE DUE TO CTE */
SingleGroup raw; /* THE RAW IMAGE IN RAZ FORMAT */
int i,j; /*loop vars*/
int max_threads=1;
clock_t begin;
double time_spent;
float hardset=0.0;
/* These are used to find subarrays with physical overscan */
int sci_bin[2]; /* bin size of science image */
int sci_corner[2]; /* science image corner location */
int ref_bin[2];
int ref_corner[2];
int rsize = 1; /* reference pixel size */
int start=0; /*where the subarray starts*/
int finish=0; /*where the subarray ends*/
/* init header vars */
initHdr(&phdr);
initHdr(&scihdr);
float readNoise = 0.0;
int ret;
/*check if this is a subarray image.
This is necessary because the CTE routine will start with the raw images
from scratch and read them in so that both chips can be used. CTE is
outside of the normal processing where one chip goes through the pipeline
at a time, both chips are used at the same time for the correction.
For the case of subarrays, a fake second chip needs to be created.
The subarray is also placed inside the confines of a full size image
and a mask is created to ignore pixels not associated with the original
data during the cte correction. This is necessary because the pixel location
itself is used as part of the correction. A secondary option would be to set
the looping arrays to variable sizes and make sure all array references were
consistent with the current data being processed. I decided on masking which
might allow for other considerations in future updates.
Only subarrays which were taken with physical overscan pixels are currently valid
This distinction can be made with the CRDS ruleset for PCTECORR but it
should also be checked here incase users update the header themselves for
local runs. In order to check for overscan pixels I'm using the array start
location instead of the APERTURE keyword information (there are known user
apertures which do not have overscan pixels, but this gets around string
comparisons and any future name changes or aperture additions in the future)
*/
begin = (double)clock();
/*CONTAIN PARALLEL PROCESSING TO A SINGLE THREAD AS USER OPTION*/
# ifdef _OPENMP
trlmessage("Using parallel processing provided by OpenMP inside CTE routine");
if (onecpu){
omp_set_dynamic(0);
max_threads=1;
sprintf(MsgText,"onecpu == TRUE, Using only %i threads/cpu", max_threads);
} else {
omp_set_dynamic(0);
max_threads = omp_get_num_procs(); /*be nice, use 1 less than avail?*/
sprintf(MsgText,"Setting max threads to %i of %i cpus",max_threads, omp_get_num_procs());
}
omp_set_num_threads(max_threads);
trlmessage(MsgText);
# endif
/* COPY COMMAND-LINE ARGUMENTS INTO WF3. */
WF3Init (&wf3); /*sets default information*/
strcpy (wf3.input, input);
strcpy (wf3.output, output);
PrBegin ("WFC3CTE");
if (wf3.printtime)
TimeStamp("WFC3CTE Started: ",wf3.rootname);
/* CHECK WHETHER THE OUTPUT FILE ALREADY EXISTS. */
if (FileExists (wf3.output)){
WhichError(status);
return (ERROR_RETURN);
}
wf3.pctecorr = cte_sw->pctecorr;
wf3.darkcorr = cte_sw->darkcorr;
wf3.biascorr = cte_sw->biascorr;
wf3.blevcorr = cte_sw->blevcorr;
wf3.printtime = printtime;
wf3.verbose = verbose;
wf3.refnames = refnames;
PrFileName ("input", wf3.input);
PrFileName ("output", wf3.output);
if (wf3.biascorr == COMPLETE){
trlmessage("BIASCORR complete for input image, CTE can't be performed");
return(ERROR_RETURN);
}
if (wf3.darkcorr == COMPLETE){
trlmessage("DARKCORR complete for input image, CTE can't be performed");
return(ERROR_RETURN);
}
if (wf3.blevcorr == COMPLETE){
trlmessage("BLEVCORR complete for input image, CTE can't be performed");
return(ERROR_RETURN);
}
/* DETERMINE THE NAMES OF THE TRAILER FILES BASED ON THE INPUT
AND OUTPUT FILE NAMES, THEN INITIALIZE THE TRAILER FILE BUFFER
WITH THOSE NAMES.
*/
if (initCTETrl (input, output))
return (status);
/* OPEN INPUT IMAGE IN ORDER TO READ ITS PRIMARY HEADER. */
if (LoadHdr (wf3.input, &phdr) ){
WhichError(status);
return (ERROR_RETURN);
}
/* GET KEYWORD VALUES FROM PRIMARY HEADER. */
if (GetKeys (&wf3, &phdr)) {
freeHdr (&phdr);
return (status);
}
if (GetCTEFlags (&wf3, &phdr)) {
freeHdr(&phdr);
return (status);
}
/*SET UP THE ARRAYS WHICH WILL BE PASSED AROUND*/
initSingleGroup(&raz);
allocSingleGroup(&raz, RAZ_COLS, RAZ_ROWS, True);
initSingleGroup(&rsz);
allocSingleGroup(&rsz, RAZ_COLS, RAZ_ROWS, True);
initSingleGroup(&rsc);
allocSingleGroup(&rsc, RAZ_COLS, RAZ_ROWS, True);
initSingleGroup(&rzc);
allocSingleGroup(&rzc, RAZ_COLS, RAZ_ROWS, True);
initSingleGroup(&raw);
allocSingleGroup(&raw, RAZ_COLS, RAZ_ROWS, True);
initSingleGroup(&chg);
allocSingleGroup(&chg, RAZ_COLS, RAZ_ROWS, True);
/*hardset the science arrays*/
for (i=0;i<RAZ_COLS;i++){
for(j=0;j<RAZ_ROWS;j++){
Pix(raw.sci.data,i,j)=hardset;
Pix(raz.sci.data,i,j)=hardset;
Pix(rsz.sci.data,i,j)=hardset;
Pix(rsc.sci.data,i,j)=hardset;
Pix(rzc.sci.data,i,j)=hardset;
Pix(chg.sci.data,i,j)=hardset;
}
}
/*READ IN THE CTE PARAMETER TABLE*/
initCTEParams(&cte_pars);
if (GetCTEPars (wf3.pctetab.name, &cte_pars))
return (status);
if (verbose){
PrRefInfo ("pctetab", wf3.pctetab.name, wf3.pctetab.pedigree,
wf3.pctetab.descrip, wf3.pctetab.descrip2);
}
/* Full frame and subarrays always have group 1
If it's a subarray, the group can be from either chip
and will still be labled group 1 because it's the FIRST
and only group, so look at the ccdchip instead.
amps ab are in chip1, sci,2
amps cd are in chip2, sci,1
*/
if (wf3.subarray) {
/* OPEN INPUT IMAGE IN ORDER TO READ ITS SCIENCE HEADER. */
ip = openInputImage (wf3.input, "SCI", 1);
if (hstio_err()) {
sprintf (MsgText, "Image: \"%s\" is not present", wf3.input);
trlerror (MsgText);
return (status = OPEN_FAILED);
}
getHeader (ip, &scihdr);
if (ip != NULL)
closeImage (ip);
/* Get CCD-specific parameters. */
if (GetKeyInt (&scihdr, "CCDCHIP", USE_DEFAULT, 1, &wf3.chip)){
freeHdr(&scihdr);
return (status);
}
freeHdr(&scihdr);
if (wf3.chip == 2){ /*sci1,cd*/
start=0;
finish=0;
/*get CD subarray from first extension*/
initSingleGroup (&subcd);
getSingleGroup (wf3.input, 1, &subcd);
if (hstio_err()){
freeSingleGroup(&subcd);
return (status = OPEN_FAILED);
}
/*create an empty full size chip for pasting*/
initSingleGroup(&cd);
allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True);
cd.group_num=1;
CreateEmptyChip(&wf3, &cd);
if (GetCorner(&subcd.sci.hdr, rsize, sci_bin, sci_corner))
return (status);
if (GetCorner(&cd.sci.hdr, rsize, ref_bin, ref_corner))
return (status);
start = sci_corner[0] - ref_corner[0];
finish = start + subcd.sci.data.nx;
if ( start >= P_OVRSCN && finish + V_OVRSCNX2 <= (RAZ_COLS/2) - P_OVRSCN){
sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish);
trlmessage(MsgText);
return(ERROR_RETURN);
}
/*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE
AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE
CTE CODE VARIABLES.
*/
if (CompareCTEParams(&subcd, &cte_pars))
return (status);
/*Put the subarray data into full frame*/
Sub2Full(&wf3, &subcd, &cd, 0, 1, 1);
/* now create an empty chip 1*/
initSingleGroup(&ab);
allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True);
ab.group_num=2;
CreateEmptyChip(&wf3, &ab);
/* SAVE A COPY OF THE RAW IMAGE BEFORE BIAS FOR LATER */
makeRAZ(&cd,&ab,&raw);
/* Subtract the BIAC file from the subarray before continuing
The bias routine will take care of cutting out the correct
image location for the subarray.*/
if (doCteBias(&wf3,&subcd)){
freeSingleGroup(&subcd);
return(status);
}
/*reset the array after bias subtraction*/
Sub2Full(&wf3, &subcd, &cd, 0, 1, 1);
} else { /*chip is 1, ab, sci2*/
start=0;
finish=0;
initSingleGroup(&subab);
getSingleGroup(wf3.input, 1, &subab);
if (hstio_err()){
freeSingleGroup(&subab);
return (status = OPEN_FAILED);
}
/*make an empty fullsize chip for pasting*/
initSingleGroup(&ab);
allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True);
ab.group_num=2;
CreateEmptyChip(&wf3, &ab);
if ( GetCorner(&subab.sci.hdr, rsize, sci_bin, sci_corner))
return (status);
if ( GetCorner(&ab.sci.hdr, rsize, ref_bin, ref_corner))
return (status);
start = sci_corner[0] - ref_corner[0];
finish = start + subab.sci.data.nx;
if ( start >= P_OVRSCN && finish + V_OVRSCNX2 <= (RAZ_COLS/2) - P_OVRSCN){
sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish);
trlmessage(MsgText);
return(ERROR_RETURN);
}
/*add subarray to full frame image*/
Sub2Full(&wf3, &subab, &ab, 0, 1, 1);
/*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE
AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE
CTE CODE VARIABLES.
*/
if (CompareCTEParams(&subab, &cte_pars))
return (status);
/* now create an empty chip 2*/
initSingleGroup(&cd);
allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True);
cd.group_num=1;
CreateEmptyChip(&wf3, &cd);
/* SAVE A COPY OF THE RAW IMAGE FOR LATER */
makeRAZ(&cd,&ab,&raw);
/* Subtract the BIAC file from the subarray before continuing*/
subab.group_num=2;
if (doCteBias(&wf3,&subab)){
freeSingleGroup(&subab);
return(status);
}
/*reset the array after bias subtraction*/
Sub2Full(&wf3, &subab, &ab, 0, 1, 1);
}
} else {
/* Full frame image, just read in the groups
and init the mask to use all pixels
*/
initSingleGroup (&cd);
getSingleGroup (wf3.input, 1, &cd);
if (hstio_err()){
return (status = OPEN_FAILED);
}
initSingleGroup (&ab);
getSingleGroup (wf3.input, 2, &ab);
if (hstio_err()){
return (status = OPEN_FAILED);
}
/*setup the mask*/
for(i=0; i< ab.dq.data.nx; i++){
for(j=0; j< ab.dq.data.ny; j++){
PPix(&ab.dq.data, i, j) = 1;
PPix(&cd.dq.data, i, j) = 1;
}
}
/* SAVE A COPY OF THE RAW IMAGE FOR LATER */
makeRAZ(&cd,&ab,&raw);
/***SUBTRACT THE CTE BIAS FROM BOTH CHIPS IN PLACE***/
if (doCteBias(&wf3,&cd)){
freeSingleGroup(&cd);
return(status);
}
if (doCteBias(&wf3,&ab)){
freeSingleGroup(&ab);
return(status);
}
/*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE
AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE
CTE CODE VARIABLES.
*/
if (CompareCTEParams(&cd, &cte_pars))
return (status);
}
/*CONVERT TO RAZ, SUBTRACT BIAS AND CORRECT FOR GAIN*/
if (raw2raz(&wf3, &cd, &ab, &raz))
return (status);
SingleGroup fff;
initSingleGroup(&fff);
allocSingleGroup(&fff, RAZ_COLS, RAZ_ROWS, True);
double cte_ff;
cte_ff= (wf3.expstart - cte_pars.cte_date0)/
(cte_pars.cte_date1 - cte_pars.cte_date0);
printf("CTE_FF: %8.3f \n",cte_ff);
cte_pars.scale_frac=cte_ff;
for(i=0;i<RAZ_COLS;i++) {
for(j=0;j<RAZ_ROWS;j++) {
Pix(fff.sci.data,i,j) = cte_ff * (j+1)/((double)XAMP_SCI_DIM);
}
}
/*
* If the PCTERNOI value from the primary header of the science image is non-zero, it is
* used in the CTE algorithm. Otherwise the read noise must be computed via find_raz2rnoival.
* FLOAT_RNOIVAL and FLOAT_BKGDVAL are designed to be for diagnostic purposes only.
*/
float FLOAT_RNOIVAL = 0.;
float FLOAT_BKGDVAL = 0.;
readNoise = wf3.pcternoi;
sprintf(MsgText, "PCTERNOI: %8.4f (source: primary header of science image)\n\n", readNoise);
trlmessage(MsgText);
/* Comparison should be OK - read from FITS header and no computation */
if (readNoise == 0.0) {
readNoise = find_raz2rnoival(raz.sci.data.data, &FLOAT_RNOIVAL, &FLOAT_BKGDVAL);
sprintf(MsgText, "RNOIVAL: %8.4f BKGDVAL: %8.4f\n", FLOAT_RNOIVAL, FLOAT_BKGDVAL);
trlmessage(MsgText);
sprintf(MsgText, "PCTERNOI: %8.4f (source: computed on-the-fly from science image)", readNoise);
trlmessage(MsgText);
sprintf(MsgText, "This computed value supersedes any value obtained from the primary\nheader of the science image.\n\n");
trlmessage(MsgText);
}
/* The PCTERNOI value actually used is written to the PCTERNOI keyword in
* the output image header when it is updated below for a final time.
*/
/* Invoke the updated CTE correction which does the read noise
mitigation in each of the three forward-model iterations.
*/
trlmessage("CTE: jumping into the routine...");
ret = sub_ctecor_v2c(raz.sci.data.data,
fff.sci.data.data,
WsMAX,
cte_pars.qlevq_data,
cte_pars.dpdew_data,
cte_pars.rprof->data.data,
cte_pars.cprof->data.data,
readNoise,
cte_pars.thresh,
cte_pars.n_forward,
cte_pars.n_par,
rzc.sci.data.data);
trlmessage("CTE: returning from the routine...");
for (i=0;i<RAZ_COLS;i++){
for (j=0;j<RAZ_ROWS;j++){
Pix(chg.sci.data,i,j) = (Pix(rzc.sci.data,i,j) - Pix(raz.sci.data,i,j))/wf3.ccdgain;
Pix(rzc.sci.data,i,j) = Pix(raw.sci.data,i,j) + Pix(chg.sci.data,i,j);
}
}
freeSingleGroup(&fff);
/*BACK TO NORMAL FORMATTING*/
/*Copies rzc data to cd->sci.data and ab->sci.data */
undoRAZ(&cd,&ab,&rzc);
/* COPY BACK THE SCIENCE SUBARRAYS AND
SAVE THE NEW RAW FILE WITH UPDATED SCIENCE
ARRAYS AND PRIMARY HEADER TO RAC
*/
if (wf3.subarray) {
if (wf3.chip == 2) {
/*** SAVE USEFUL HEADER INFORMATION ***/
if (cteHistory (&wf3, subcd.globalhdr))
return (status);
/*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/
PutKeyDbl(subcd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart");
trlmessage("PCTEFRAC saved to header");
PutKeyFlt(subcd.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit");
trlmessage("PCTERNOI saved to header");
Full2Sub(&wf3, &subcd, &cd, 0, 1, 1);
putSingleGroup(output, 1, &subcd,0);
freeSingleGroup(&subcd);
} else {
/*** SAVE USEFUL HEADER INFORMATION ***/
if (cteHistory (&wf3, subab.globalhdr))
return (status);
/*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/
PutKeyDbl(subab.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart");
trlmessage("PCTEFRAC saved to header");
PutKeyFlt(subab.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit");
trlmessage("PCTERNOI saved to header");
Full2Sub(&wf3, &subab, &ab, 0, 1, 1);
putSingleGroup(output, 1, &subab,0);
freeSingleGroup(&subab);
}
} else { /*FUll FRAME*/
/*** SAVE USEFUL HEADER INFORMATION ***/
if (cteHistory (&wf3, cd.globalhdr))
return (status);
/*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/
PutKeyDbl(cd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart");
trlmessage("PCTEFRAC saved to header");
PutKeyFlt(cd.globalhdr, "PCTERNOI", readNoise,"read noise amp clip limit");
trlmessage("PCTERNOI saved to header");
putSingleGroup(output,cd.group_num, &cd,0);
putSingleGroup(output,ab.group_num, &ab,0);
}
/** CLEAN UP ON AISLE 3 **/
freeSingleGroup(&rzc);
freeSingleGroup(&rsc);
freeSingleGroup(&chg);
freeSingleGroup(&raz);
freeSingleGroup(&rsz);
freeSingleGroup(&raw);
freeSingleGroup(&cd);
freeSingleGroup(&ab);
time_spent = ((double) clock()- begin +0.0) / CLOCKS_PER_SEC;
if (verbose){
sprintf(MsgText,"CTE run time: %.2f(s) with %i procs/threads\n",time_spent/max_threads,max_threads);
trlmessage(MsgText);
}
PrSwitch("pctecorr", COMPLETE);
if(wf3.printtime)
TimeStamp("PCTECORR Finished",wf3.rootname);
return (status);
}
/********************* SUPPORTING SUBROUTINES *****************************/
int raw2raz(WF3Info *wf3, SingleGroup *cd, SingleGroup *ab, SingleGroup *raz){
/*
convert a raw file to raz file: CDAB longwise amps, save data array
for comparison with what jay has during testing
-->do an additional bias correction using the residual bias level measured for each amplifier from the
steadiest pixels in the horizontal overscan and subtracted fom the pixels for that amplifier.
---> convert into electrons at the end
---> add supplemental bias info to the header
allocate contiguous 2d array on the heap
with pointers and return the pointer to the head of the array
The Following macros are used to represent 2-d indexing.
Two dimensional arrays are stored in FITS order.
ny
^
N | a05 a15 a25 a35
A | a04 a14 a24 a34
X | a03 a13 a23 a33
I | a02 a12 a22 a32
S | a01 a11 a21 a31
2 | a00 a10 a20 a30
---------------------------> nx
NAXIS1
NAXIS1 is 4 and NAXIS2 is 6
PIX(a,1,4) accesses a14
In the raz image, each quadrant has been rotated such that the readout amp is located at the lower left.
The reoriented four quadrants are then arranged into a single 8412x2070 image (science pixels plus overscan),
with amps C, D, A, and B, in that order. In the raz image, pixels are all parallel-shifted down,
then serial-shifted to the left.
*/
extern int status;
int i,j,k; /*loop counters*/
int subcol = (RAZ_COLS/NAMPS); /* for looping over quads */
extern int status; /* variable for return status */
float bias_post[NAMPS];
float bsig_post[NAMPS];
float bias_pre[NAMPS];
float bsig_pre[NAMPS];
float gain;
/*INIT THE ARRAYS*/
for(i=0;i<NAMPS;i++){
bias_post[i]=0.;
bsig_post[i]=0.;
bias_pre[i]=0.;
bsig_pre[i]=0.;
}
gain=wf3->ccdgain;
/*REFORMAT TO RAZ*/
makeRAZ(cd,ab,raz);
/*SUBTRACT THE EXTRA BIAS CALCULATED, AND MULTIPLY BY THE GAIN
Note that for user subarray the image is in only 1 quad, and only
has prescan bias pixels so the regions are different for full and subarrays
*/
if (wf3->subarray){
findPreScanBias(raz, bias_pre, bsig_pre);
for (k=0;k<NAMPS;k++){
for (i=0; i<subcol;i++){
for (j=0;j<RAZ_ROWS; j++){
if(Pix(raz->dq.data,i+k*subcol,j)){
Pix(raz->sci.data,i+k*subcol,j) -= bias_pre[k];
Pix(raz->sci.data,i+k*subcol,j) *= gain;
}
}
}
}
} else {
findPostScanBias(raz, bias_post, bsig_post);
for (k=0;k<NAMPS;k++){
for (i=0; i<subcol;i++){
for (j=0;j<RAZ_ROWS; j++){
Pix(raz->sci.data,i+k*subcol,j) -= bias_post[k];
Pix(raz->sci.data,i+k*subcol,j) *= gain;
}
}
}
}
return(status);
}
/*calculate the post scan and bias after the biac file has been subtracted
add some history information to the header
Jay gave no explanation why plist is limited to 55377 for full arrays, his
subarray limitation was just 1/4 of this value. The value 55377 is the number of
post-scan pixels in the physical pixel vertical extent (27 x 2051 = 55377).
Value 2051 is the veritical number of science pixels in an amp, and
27 is the 30 post-scan pixels with two pixels stripped from the left boundary
and one stripped from the right boundary.
the serial virtual overscan pixels are also called the trailing-edge pixels
these only exist in full frame images
*/
int findPostScanBias(SingleGroup *raz, float *mean, float *sigma){
extern int status;
int arrsize = 55377;
int i,j,k; /*Looping variables */
float plist[arrsize]; /*bias bpixels to measure*/
float *plistSub;
float min=0.0;
float max=0.0;
float rmean=0.0;
float rsigma=0.0;
float sigreg =7.5; /*sigma clip*/
int subcol = RAZ_COLS/4;
int npix=0; /*track array size for resistant mean*/
/*init plist for full size
We'll allocate heap memory for smaller arrays
*/
for (i=0;i<arrsize;i++){
plist[i]=0.;
}
for (k=0;k<NAMPS;k++){ /*for each quadrant cdab = 0123*/
npix=0; /*reset for each quad*/
rmean=0.;
rsigma=0.;
for (i=RAZ_ROWS+5;i<= subcol-1; i++){ /*quad area for post scan bias pixels*/
for (j=0; j<YAMP_SCI_DIM; j++){
if (npix < arrsize){
if ( Pix(raz->dq.data,i+k*subcol,j)) {
plist[npix] = Pix(raz->sci.data,i+k*subcol,j);
npix+=1;
}
}
}
}
if (npix > 0 ){
plistSub = (float *) calloc(npix, sizeof(float));
if (plistSub == NULL){
trlerror("out of memory for resistmean entrance in findPostScanBias.");
free(plistSub);
return (ERROR_RETURN);
}
for(i=0; i<npix; i++){
plistSub[i]=plist[i];
}
resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max);
free(plistSub);
}
mean[k]= rmean;
sigma[k] = rsigma;
}
return status;
}
/*CALCULATE THE PRE SCAN AND BIAS AFTER THE BIAC FILE HAS BEEN SUBTRACTED
The serial physical overscan pixels are also known as the serial prescan,
they are the only pixels available for subarrays. For full frame arrays
the prescan is not used as part of the correction, instead the virtual
overscan pixels are used and modeled in findPostScanBias.
*/
int findPreScanBias(SingleGroup *raz, float *mean, float *sigma){
/** this calls resistmean, which does a better job clipping outlying pixels
that just a standard stddev clip single pass*/
extern int status;
int arrsize = 55377;
int i,j,k; /*Looping variables */
float plist[arrsize]; /*bias pixels to measure*/
float *plistSub; /*heap allocation for variable size plist array*/
float min=0.0;
float max=0.0;
float rmean;
float rsigma;
float sigreg =7.5; /*sigma clip*/
int subcol = RAZ_COLS/4;
int npix=0; /*track array size for resistant mean*/
/*init plist*/
for (i=0;i<arrsize;i++){
plist[i]=0.;
}
for (k=0;k<NAMPS;k++){ /*for each quadrant, CDAB ordered*/
npix=0;
rmean=0.;
rsigma=0.;
for (i=5;i<P_OVRSCN; i++){
for (j=0; j<YAMP_SCI_DIM; j++){ /*all rows*/
if (npix < arrsize ){
if (Pix(raz->dq.data,i+(k*subcol),j)){
plist[npix] = Pix(raz->sci.data,i+k*subcol,j);
npix+=1;
}
}
}
}
if (0 < npix ){
plistSub = (float *) calloc(npix, sizeof(float));
if (plistSub == NULL){
trlerror("out of memory for resistmean entrance in findPreScanBias.");
free(plistSub);
return (ERROR_RETURN);
}
for(i=0; i<npix; i++){
plistSub[i]=plist[i];
}
resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max);
free(plistSub);
}
mean[k]= rmean;
sigma[k] = rsigma;
if(npix>0)
printf("npix=%i\nmean[%i]=%f\nsigma[%i] = %f\n",npix,k+1,rmean,k+1,rsigma);
}
return status;
}
/*This is the workhorse subroutine; it simulates the readout
of one column pixi() and outputs this to pixo() using a single
iteration. It can be called successively to do the transfer
in steps.
JDIM == RAZ_ROWS
WDIM == TRAPS Ws is the input traps number < WsMAX
NITs == cte_pars->n_par
These are already in the parameter structure CTEParams
int Ws the number of traps < WsMAX
float q_w[TRAPS]; the run of charge with level == qlevq_data
float dpde_w[TRAPS]; the run of charge loss with level == dpdew_data
float rprof_wt[TRAPS][100]; the emission probability as fn of downhill pixel == rprof fits image
float cprof_wt[TRAPS][100]; the cumulative probability cprof_t( 1) = 1. - rprof_t(1) == cprof fits image
W = wcol_data = trap id
q_w[TRAP] = qlev_q from QPROF traps as function of packet size = cte->qlevq_data[TRAP]
pixi (curr), pixo (read) , pixf(cteff) are passed and are 1d arrays which have values for a particular column
the ttrap reference to the image array has to be -1 for C
*/
int sim_colreadout_l(double *pixi, double *pixo, double *pixf, CTEParams *cte){
extern int status;
int j;
int ttrap;
int w;
double ftrap;
double pix_1;
double padd_2;
double padd_3;
double prem_3;
double pmax;
double fcarry;
padd_3=0.0;
prem_3=0.0;
padd_2=0.0;
fcarry=0.0;
pix_1=0.0;
w=0;
j=0;
ftrap=0.0;
ttrap=0;
FloatHdrData *rprof;
FloatHdrData *cprof;
/*from the reference table*/
rprof = cte->rprof;
cprof = cte->cprof;
/*FIGURE OUT WHICH TRAPS WE DON'T NEED TO WORRY ABOUT IN THIS COLUMN
PMAX SHOULD ALWAYS BE POSITIVE HERE */
pmax=10.;
for(j=0; j<RAZ_ROWS; j++){
pixo[j] = pixi[j];
if (pixo[j] > pmax)
pmax=pixo[j];
}
/*GO THROUGH THE TRAPS ONE AT A TIME, FROM HIGHEST TO LOWEST Q,
AND SEE WHEN THEY GET FILLED AND EMPTIED, ADJUST THE PIXELS ACCORDINGLY*/
for (w = cte->cte_traps-1; w>=0; w--){
if ( cte->qlevq_data[w] <= pmax ) {
ftrap = 0.0e0;
ttrap = cte->cte_len; /*for referencing the image at 0*/
fcarry = 0.0e0;
/*GO UP THE COLUMN PIXEL BY PIXEL*/
for(j=0; j<RAZ_ROWS;j++){
pix_1 = pixo[j];
if ( (ttrap < cte->cte_len) || ( pix_1 >= cte->qlevq_data[w] - 1. ) ){
if (pixo[j] >= 0 ){
pix_1 = pixo[j] + fcarry; /*shuffle charge in*/
fcarry = pix_1 - floor(pix_1); /*carry the charge remainder*/
pix_1 = floor(pix_1); /*reset pixel*/
}
/*HAPPENS AFTER FIRST PASS*/
/*SHUFFLE CHARGE IN*/
if ( j> 0 ) {
if (pixf[j] < pixf[j-1])
ftrap *= (pixf[j] / pixf[j-1]);
}
/*RELEASE THE CHARGE*/
padd_2=0.0;
if (ttrap <cte->cte_len){
ttrap += 1;
padd_2 = Pix(rprof->data,w,ttrap-1) *ftrap;
}
padd_3 = 0.0;
prem_3 = 0.0;
if ( pix_1 >= cte->qlevq_data[w]){
prem_3 = cte->dpdew_data[w] / cte->n_par * pixf[j]; /*dpdew is 1 in file */
if (ttrap < cte->cte_len)
padd_3 = Pix(cprof->data,w,ttrap-1)*ftrap;
ttrap=0;
ftrap=prem_3;
}
pixo[j] += padd_2 + padd_3 - prem_3;
} /*replaces trap continue*/
}/*end if j>0*/
}/* end if qlevq > pmax, replaces continue*/
}/*end for w*/
return(status);
}
int initCTETrl (char *input, char *output) {
extern int status;
char trl_in[CHAR_LINE_LENGTH+1]; /* trailer filename for input */
char trl_out[CHAR_LINE_LENGTH+1]; /* output trailer filename */
int exist;
int MkName (char *, char *, char *, char *, char *, int);
int TrlExists (char *);
/* Initialize internal variables */
trl_in[0] = '\0';
trl_out[0] = '\0';
exist = EXISTS_UNKNOWN;
/* Input and output suffixes. */
char *isuffix[] = {"_raw"};
char *osuffix[] = {"_rac_tmp"};
char *trlsuffix[] = {""};
int nsuffix = 1;
/* Start by stripping off suffix from input/output filenames */
if (MkOutName (input, isuffix, trlsuffix, nsuffix, trl_in, CHAR_LINE_LENGTH)) {
WhichError (status);
sprintf (MsgText, "Couldn't determine trailer filename for %s",
input);
trlmessage (MsgText);
}
if (MkOutName (output, osuffix, trlsuffix, nsuffix, trl_out, CHAR_LINE_LENGTH)) {
WhichError (status);
sprintf (MsgText, "Couldn't create trailer filename for %s",
output);
trlmessage (MsgText);
}
/* NOW, CONVERT TRAILER FILENAME EXTENSIONS FROM '.FITS' TO '.TRL' */
if (MkNewExtn (trl_in, TRL_EXTN) ) {
sprintf (MsgText, "Error with input trailer filename %s", trl_in);
trlerror (MsgText);
WhichError (status);
}
if (MkNewExtn (trl_out, TRL_EXTN) ) {
sprintf (MsgText, "Error with output trailer filename %s", trl_out);
trlerror (MsgText);
WhichError (status);
}
/* If we are working with a RAW file, then see if a TRL file
needs to be overwritten after the generic conversion comments. */
if (strstr(input, isuffix[0]) != NULL) {
/* Test whether the output file already exists */
exist = TrlExists(trl_out);
if (exist == EXISTS_YES) {
/* The output file exists, so we want to add to them
** the new trailer comments. */
SetTrlOverwriteMode (NO);
}
}
/* Sets up temp trailer file for output and copies input
** trailer file into it. */
InitTrlFile (trl_in, trl_out);
return(status);
}
/*
#2 int sim_colreadout_l_uvis_w --- CTE correction for one column
#3 int sub_ctecor_v2c --- reverse CTE correction for image
*/
/* ------------------------------------------- */
/* */
/* Readnoise correction for a single column. */
/* */
/* ------------------------------------------- */
int rm_rnZ_colj(double *pixj_chg,
double *pixj_rnz,
double *pixj_rsz,
double RNMIT) {
int NIT;
int j;
double dd;
double dtot;
int ntot;
double ftot;
double rtot;
double RNN;
double RNU;
double sqrt();
int pixj_ffu[RAZ_ROWS];
for(j=0;j<RAZ_ROWS;j++) {
pixj_rsz[j] = pixj_chg[j];
pixj_rnz[j] = 0.0;
pixj_ffu[j] = 0.0;
}
/*
* Find the upper and lower limits where there are valid
* pixels - this is done to accommodate subarrays
*/
int j1 = XAMP_SCI_DIM-1;
int j2 = 2;
/* There are no "greater than zero" pixels below j1 */
for (j=2; j<=XAMP_SCI_DIM-1; j++) {
if (j1==XAMP_SCI_DIM-1 && pixj_chg[j] > 0)
j1 = j;
}
/* There are no "greater than zero" pixels above j2*/
for (j=XAMP_SCI_DIM-1; j>=2; j--) {
if (j2==2 && pixj_chg[j] > 0)
j2 = j;
}
/*
* For each interation, allow a bit more noise. This way we can
* stop when just enough is allowed to go through the column from 2nd
* to 2nd from the top.
*/
for(NIT=1;NIT<NITMAX;NIT++) {
RNN = RNMIT*(1.00+5.0*NIT/(float)NITMAX);
/* Bounds of this loop include only the non-zero portion of the data */
for(j=j1; j<=j2; j++) {
/* Compare each pixel to the average of its up/down neighbors */
dd = pixj_rsz[j]-(pixj_rsz[j+1]+pixj_rsz[j-1])/2.0;
pixj_ffu[j] = 0.0;
/* If the pixel is within the readnoise window... */
if (fabs(dd) < RNN) {
/* Cap the adjustments we are willing to make at any time to 20% */
if (dd > RNMIT/5.0) dd = RNMIT/5.0;
if (dd < -RNMIT/5.0) dd = -RNMIT/5.0;
/* Take half of the maximum adjustment from the current pixel... */
pixj_rnz[j ] = pixj_rnz[j ] + dd*0.50;
/* ...and give half of the value to the pixel below and above */
pixj_rnz[j-1] = pixj_rnz[j-1] - dd*0.25;
pixj_rnz[j+1] = pixj_rnz[j+1] - dd*0.25;
/* Flag this pixel to be in the readnoise range so we can track
* the total noise in the nominal pixels
*/
pixj_ffu[j ] = 1.0;
}
}
/* Bounds of this loop include only the non-zero portion of the data */
for(j=j1; j<=j2; j++) {
pixj_rsz[j] = pixj_chg[j] - pixj_rnz[j];
}
dtot = 0.;
ntot = 0.;
ftot = 0.;
rtot = 0.;
/* Bounds of this loop include only the non-zero portion of the data */
for(j=j1; j<=j2; j++) {
ftot = ftot + pixj_ffu[j];
dtot = dtot + pixj_rnz[j]*pixj_rnz[j];
dd = pixj_rsz[j]-(pixj_rsz[j+1]+pixj_rsz[j-1])/2.0;
rtot = rtot + dd*dd;
ntot = ntot + 1;
}
RNU = sqrt(dtot/ftot);
if (RNU > RNMIT) return(0);
}
return(0);
}
/* ---------------------------------------------------------------*/
/* */
/* CTE correction for a single column. */
/* */
/* TDIM is the length of the trails that are being considered. */
/* */
/* ---------------------------------------------------------------*/
#define _TDIM_ 60
int sim_colreadout_l_uvis_w(double *pixi, // input column array (JDIM)
double *pixo, // outout column array (JDIM)
double *pixf, // scaling of model for each pixel (JDIM)
int J1, // bottom and top pixel in column
int J2, // bottom and top pixel in column
int JDIM, // number of pixels in column
double *q_w, // the charge level for trap#W (_WDIM_)
double *dpde_w, // the amt of charge this trap grabs (_WDIM_)
int NITs, // the num of iterations (dpde-->dpde/NITs)
float *rprof_wt, // the trap emission for t=T (_WDIM_,100)
float *cprof_wt, // the amount left in trap after t=T emission (_WDIM_,100)
int Ws) {
int j; // pixel location up the column
double ftrap; // total number of electrons in the trap
int ttrap; // shifts since the trap was last filled
int w; // trap counter
double pmax; // max pixel value in the column - tells us the highest relevant trap numbrer
int Wf; // highest relevant trap number
double prel_1; // amount in incidental release from trap
double prel_2; // amount in flush release of trap
double pgrb_3; // amount grabbed by filling trap
float rprof_t[_TDIM_];
float cprof_t[_TDIM_];
/* Bounds checking */
if (Ws>WsMAX) {
printf("Ws error\n");
return(1);
}
/* Figure out which traps we do not need
to worry about in this column
*/
pmax = 10;
for(j=0;j<JDIM;j++) {
pixo[j] = pixi[j];
if (pixo[j] > pmax) pmax = pixo[j];
}
/* Figure out the highest trap number we need to consider */
Wf = 1;
for (w=0;w<Ws;w++) {
if (pmax >=q_w[w]) Wf = w;
}
/* Go thru the traps one at a time (from highest to lowest q)
and see when they get filled and emptied; adjust the
pixel values accordingly
*/
for (w=Wf;w>=0;w--) { // loop backwards
for(ttrap=0;ttrap<_TDIM_;ttrap++) {
rprof_t[ttrap] = rprof_wt[w+ttrap*WsMAX];
cprof_t[ttrap] = cprof_wt[w+ttrap*WsMAX];
}
/* Initialize the flux in the trap to zero */
ftrap = 0.0;
/* Initialize the time-since-flush to the max */
ttrap = _TDIM_ + 1;
/* Go up the column, pixel-by-pixel */
for (j=J1;j<J2;j++) {
/* If we have an inversion of the density (i.e., a readout-cosmic issue),
then we do not want to flush too much
*/
if (j>J1) {
if (pixf[j] < pixf[j-1]) {
ftrap = pixf[j]/
pixf[j-1]*ftrap;
}
}
/* Set up accounting of pixel value changes */
prel_1 = 0.; // charge to be released
prel_2 = 0.; // charge to be flushed out
pgrb_3 = 0.; // charge to be trapped
/* Filled/refilled trap#W */
if (pixo[j] >= q_w[w]) {
/* Need to flush before filling? */
if (ttrap < (_TDIM_)) {
/* Increment time since filled */
ttrap = ttrap + 1;
/* Flush out amount for this shift, and ...*/
prel_1 = rprof_t[ttrap-1]*ftrap;
/* ...flush out the rest */
prel_2 = cprof_t[ttrap-1]*ftrap;
}
/* Amount to hold in the trap */
ftrap = dpde_w[w]/NITs*pixf[j];
/* Subtract that amount held from the pixel and reset
the time-since-filled counter
*/
pgrb_3 = ftrap;
ttrap = 0;
}
/* trap#W not filled at this time */
else {
/* Check if the trap contains charge, and if so, then
release the appropriate number of electrons
*/
if (ttrap < (_TDIM_)) {
ttrap = ttrap + 1;
prel_1 = rprof_t[ttrap-1]*ftrap;
}
}
/* Make adjustments to the output pixel: add the trail emission,
flush the trap, and fill the trap
*/
pixo[j] = pixo[j] + prel_1 + prel_2 - pgrb_3;
}
}
return(0);
}
/* --------------------------------- */
/* */
/* CTE correction for one column. */
/* */
/* --------------------------------- */
int sub_ctecor_v2c(float *pixz_raz,
float *pixz_fff,
int Ws,
double *q_w,
double *dpde_w,
float *rprof_wt,
float *cprof_wt,
float PCTERNOI,
float FIX_ROCR,
int PCTENFOR,
int PCTENPAR,
float *pixz_rzc)
{
extern int status;
int i;
int j;
int jj;
int jmax;
int NITFOR, NITFORs;
int NITPAR, NITPARs;
double RNOI;
int ret;
double *pixj_fff;
double *pixj_raz;
double *pixj_mod;
double *pixj_rnz;
double *pixj_rsz;
double *pixj_org;
double *pixj_obs;
double *pixj_chg;
int NCRX;
int DONE;
int NDONE = 0;
RNOI = PCTERNOI;
NITFORs = PCTENFOR;
NITPARs = PCTENPAR;
printf(" \n");
printf(" INSIDE sub_ctecor_v2.f... \n");
printf(" ---> PCTERNOI: %8.4f \n",PCTERNOI);
printf(" ---> FIX_ROCR: %8.4f \n",FIX_ROCR);
printf(" ---> NITFORs: %5d \n",NITFORs);
printf(" ---> NITPARs: %5d \n",NITPARs);
printf(" \n");
#pragma omp parallel \
shared(pixz_raz,pixz_fff,pixz_rzc, \
NITPARs,NITFORs, \
q_w,dpde_w, \
rprof_wt,cprof_wt,Ws,NDONE) \
private(i,j,ret,NCRX, DONE, NITFOR,NITPAR, \
pixj_fff, pixj_raz, pixj_mod, pixj_rnz, \
pixj_rsz, pixj_org, pixj_obs, pixj_chg)
#pragma omp for
for(i=0;i<RAZ_COLS;i++) {
pixj_fff = malloc(RAZ_ROWS*8);
pixj_raz = malloc(RAZ_ROWS*8);
pixj_mod = malloc(RAZ_ROWS*8);
pixj_rnz = malloc(RAZ_ROWS*8);
pixj_rsz = malloc(RAZ_ROWS*8);
pixj_org = malloc(RAZ_ROWS*8);
pixj_obs = malloc(RAZ_ROWS*8);
pixj_chg = malloc(RAZ_ROWS*8);
for(j=0;j<RAZ_ROWS;j++) {
pixj_raz[j] = pixz_raz[i+j*RAZ_COLS];
pixj_fff[j] = pixz_fff[i+j*RAZ_COLS];
}
NCRX = 0;
DONE = 0;
while(!DONE) {
NCRX = NCRX + 1;
DONE = 1;
for (j=0;j<RAZ_ROWS;j++) {
pixj_mod[j] = pixj_raz[j];
pixj_chg[j] = 0.0;
}
for(NITFOR=0;NITFOR<NITFORs;NITFOR++) {
ret = rm_rnZ_colj(pixj_mod,pixj_rnz,pixj_rsz,RNOI);
for(j=0;j<RAZ_ROWS;j++) {
pixj_org[j] = pixj_rsz[j];
}
for(NITPAR=1;NITPAR<=NITPARs;NITPAR++) {
ret = sim_colreadout_l_uvis_w(pixj_org,
pixj_obs,
pixj_fff,
1,RAZ_ROWS,RAZ_ROWS,
q_w,dpde_w,NITPARs,
rprof_wt,cprof_wt,Ws);
for (j=0;j<RAZ_ROWS;j++) {
pixj_org[j] = pixj_obs[j];
}
}
for(j=0;j<RAZ_ROWS;j++) {
pixj_chg[j] = pixj_obs[j] - pixj_rsz[j];
pixj_mod[j] = pixj_raz[j] - pixj_chg[j];
}
}
if (FIX_ROCR<0) {
for(j=15;j<=2060;j++) {
if (pixj_mod[j] < FIX_ROCR &&
pixj_mod[j]-pixj_raz[j] < FIX_ROCR &&
pixj_mod[j] < pixj_mod[j+1] &&
pixj_mod[j] < pixj_mod[j-1]) {
jmax = j;
for(jj=j-2;jj<j;jj++) {
if (pixj_mod[jj ]-pixj_raz[jj ] >
pixj_mod[jmax]-pixj_raz[jmax]) {
jmax = jj;
}
}
if (pixj_mod[jmax]-pixj_raz[jmax] > -2.5*FIX_ROCR) {
pixj_fff[jmax] = pixj_fff[jmax]*0.90;
DONE = NCRX >= 10;
}
}
}
}
}
for(j=0;j<RAZ_ROWS;j++) {
pixz_rzc[i+j*RAZ_COLS] = pixj_mod[j];
pixz_fff[i+j*RAZ_COLS] = pixj_fff[j];
}
free(pixj_fff);
free(pixj_raz);
free(pixj_mod);
free(pixj_rnz);
free(pixj_rsz);
free(pixj_org);
free(pixj_obs);
free(pixj_chg);
/* This variable exists for debuggin purposes. */
NDONE++;
/*if (NDONE==(NDONE/100)*100) { printf(" i = %5d %5d %5d \n",i,NCRX,NDONE); }*/
}
return(status);
}
/*
* This routine dynamically determines the noise in the input image.
*/
float find_raz2rnoival(float *raz_cdab, float *FLOAT_RNOIVAL, float *FLOAT_BKGDVAL) {
/* Return value */
float RNOIVALo;
int i, j, ik;
float b, d;
int ih, iih;
long dhist[NUM_BINS], dcum[NUM_BINS], vtot;
long vhist[NUM_BINS], vcum[NUM_BINS], dtot;
int ivmin, id1, id2;
int idmin, iv1, iv2;
long long vsum;
long long nsum;
int j1, j2;
float RNOIVAL;
float RNOIVALu;
float BKGDVAL;
float BKGDVALu;
FLOAT_RNOIVAL[0] = 3.33;
FLOAT_BKGDVAL[0] = raz_cdab[0];
iv1 = 1;
iv2 = 999;
id1 = 1;
id2 = 999;
/*
* Distill the image variation information and background into quick histograms
*/
for (ih=1; ih<=NUM_BINS; ih++) {
dhist[ih-1] = 0;
vhist[ih-1] = 0;
}
for (i=2; i<=RAZ_COLS; i++) {
/*
* Find the upper and lower limits where there are valid pixels
* to accommodate subarrays
*/
j1 = XAMP_SCI_DIM-1;
j2 = 2;
/* There are no "greater than zero" pixels below j1 */
for (j=2; j<=XAMP_SCI_DIM-1; j++) {
if (j1==XAMP_SCI_DIM-1 && raz_cdab[i+(j-1)*RAZ_COLS-1] > 0)
j1 = j;
}
/* There are no "greater than zero" pixels above j2 */
for (j=XAMP_SCI_DIM-1; j>=2; j--) {
if (j2==2 && raz_cdab[i+(j-1)*RAZ_COLS-1] > 0)
j2 = j;
}
/*
* Process the valid pixels. "ik" is the chip horizontal locator.
* For each pixel in the recorded part of the detector, find the
* average of the surrounding 8 pixels
*/
for (j=j1; j<=j2; j++) {
ik = i - (i-1)/2103*2103;
if (ik > 25 && ik < XAMP_SCI_DIM+24) {
b = (raz_cdab[i-1+(j+1-1)*RAZ_COLS-1]
+raz_cdab[i +(j+1-1)*RAZ_COLS-1]
+raz_cdab[i+1+(j+1-1)*RAZ_COLS-1]
+raz_cdab[i-1+(j -1)*RAZ_COLS-1]
+raz_cdab[i+1+(j -1)*RAZ_COLS-1]
+raz_cdab[i-1+(j-1-1)*RAZ_COLS-1]
+raz_cdab[i +(j-1-1)*RAZ_COLS-1]
+raz_cdab[i+1+(j-1-1)*RAZ_COLS-1])/8.00;
/* Local residual as proxy for noise */
d = raz_cdab[i+(j-1)*RAZ_COLS-1] - b;
/* Locate within the histogram bin; 4.5 is to spread the values out.
* The value of 4.5 is 3 times the gain, where the gain is 1.5
*/
ih = 501 + d*(SPREAD_FOR_HISTO) + 0.5;
if (ih < 1)
ih = 1;
if (ih > NUM_BINS)
ih = NUM_BINS;
/* Increment the histogram bin */
dhist[ih-1] = dhist[ih-1] + 1;
/* Locate the pixel value within the histogram bin */
ih = 501 + raz_cdab[i+(j-1)*RAZ_COLS-1]*(SPREAD_FOR_HISTO) + 0.5;
if (ih < 1)
ih = 1;
if (ih > NUM_BINS)
ih = NUM_BINS;
/* Increment the histogram bin */
vhist[ih-1] = vhist[ih-1] + 1;
}
}
}
/* Compute the cumulative distribution for the noise and for the background */
dtot = 0;
vtot = 0;
for (ih=1; ih<=NUM_BINS; ih++) {
if (ih > 1 && ih < NUM_BINS) {
dtot = dtot + dhist[ih-1];
vtot = vtot + vhist[ih-1];
}
dcum[ih-1] = dtot;
vcum[ih-1] = vtot;
}
idmin = 999;
ivmin = 999;
/*
* Find the closest 75% of the points and use them to determine the noise
* and the background
*/
for (ih=1; ih<=NUM_BINS-1; ih++) {
for (iih=ih+1; iih<=NUM_BINS-1; iih++) {
if (dcum[iih-1]-dcum[ih-1] > 0.75*dtot && iih-ih < idmin) {
id1 = ih;
id2 = iih;
idmin = iih-ih;
}
if (vcum[iih-1]-vcum[ih-1] > 0.75*vtot && iih-ih < ivmin) {
iv1 = ih;
iv2 = iih;
ivmin = iih-ih;
}
}
}
nsum = 0;
vsum = 0;
for (ih=iv1; ih<=iv2; ih++) {
nsum = nsum + vhist[ih-1];
vsum = vsum + vhist[ih-1]*(ih-501);
}
if (vsum==0 || nsum==0) {
RNOIVALu = 9.75 ;
BKGDVALu = 999.9 ;
*FLOAT_RNOIVAL = RNOIVALu;
*FLOAT_BKGDVAL = BKGDVALu;
return(RNOIVALu);
}
/* For debugging purposes only
printf(" \n");
printf(" vsum: %12lld \n",vsum);
printf(" nsum: %12lld \n",nsum);
printf(" \n");
printf(" dbar: %12.2f \n",idmin/2.30*(SPREAD_FOR_HISTO));
printf(" vbar: %12.2f %12lld %12lld \n",vsum/nsum*(SPREAD_FOR_HISTO),vsum,nsum);
printf(" \n");
*/
RNOIVAL = (int)(idmin/2.30/(SPREAD_FOR_HISTO)/sqrt(1+1/8.0)*4+0.5)/4.00;
RNOIVAL = idmin/2.30/(SPREAD_FOR_HISTO)/sqrt(1+1/8.0);
RNOIVALu = RNOIVAL;
if (RNOIVALu > HIGH_CLIP)
RNOIVALu = HIGH_CLIP;
BKGDVAL = 1.*vsum/nsum/(SPREAD_FOR_HISTO);
BKGDVALu = BKGDVAL;
if (BKGDVALu > 999.9)
BKGDVALu = 999.9;
/* Values which can be used for diagnostic analysis */
*FLOAT_RNOIVAL = RNOIVALu;
*FLOAT_BKGDVAL = BKGDVALu;
/* LOW_CLIP and HIGH_CLIP are imposed limits on the computed noise value */
RNOIVALo = RNOIVALu;
if (RNOIVALo < LOW_CLIP)
RNOIVALo = LOW_CLIP;
if (RNOIVALo > HIGH_CLIP)
RNOIVALo = HIGH_CLIP;
return(RNOIVALo);
}
|
GB_binop__hypot_fp32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp32)
// C=scalar+B GB (_bind1st__hypot_fp32)
// C=scalar+B' GB (_bind1st_tran__hypot_fp32)
// C=A+scalar GB (_bind2nd__hypot_fp32)
// C=A'+scalar GB (_bind2nd_tran__hypot_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = hypotf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = hypotf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_HYPOT || GxB_NO_FP32 || GxB_NO_HYPOT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__hypot_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__hypot_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__hypot_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = hypotf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__hypot_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = hypotf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = hypotf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__hypot_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = hypotf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pure_convection_edgebased.h
|
/*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
[email protected]
[email protected]
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: rrossi $
// Date: $Date: 2009-01-13 15:39:56 $
// Revision: $Revision: 1.3 $
//
//
#if !defined(KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED)
#define KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED
#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class PureConvectionEdgeBased
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef std::vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef std::vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef std::vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef std::vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
//constructor and destructor
PureConvectionEdgeBased(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part
)
: mr_matrix_container(mr_matrix_container),mr_model_part(mr_model_part)
{};
~PureConvectionEdgeBased(){};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
//unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mWork.resize(n_nodes);
mPi.resize(n_nodes);
mUn.resize(n_nodes);
mUn1.resize(n_nodes);
mphi_n.resize(n_nodes);
mphi_n1.resize(n_nodes);
mA.resize(n_nodes);
mHmin.resize(n_nodes);
mTau.resize(n_nodes);
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mUn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
void ComputeTimeStep(double CFLNumber)
{
KRATOS_TRY
//local variable for time step size
double delta_t = 1e10;
//getting value of current velocity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
//loop over all nodes
double n_nodes = mUn1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (norm_2(mUn1[i_node])/mHmin[i_node] ) ;
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
}
//perform MPI syncronization of the dt (minimum should be kept)
//write time step size to Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
CurrentProcessInfo[DELTA_TIME] = delta_t;
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
Vector Solve()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
ValuesVectorType rhs;
rhs.resize(n_nodes);
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mUn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute advective velocity - area average of the current velocity
double coefficient = 1;
CalculateAdvectiveVelocity(mUn,mUn1,mA, coefficient);
//compute intrinsic time
double time_inv = 1.0/delta_t;
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHmin[i_node];
array_1d<double, TDim>& a_i = mA[i_node];
double vel_norm = norm_2(a_i);
mTau[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv );
}
mr_matrix_container.AssignVectorToVector(mphi_n, mWork); //mWork = mphi_n
//first step of Runge Kutta
// mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5*delta_t , mr_matrix_container.GetInvertedMass(), rhs);
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5*delta_t , mr_matrix_container.GetInvertedMass(),rhs);
//third step
CalculateAdvectiveVelocity(mUn, mUn1,mA, coefficient);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, delta_t , mr_matrix_container.GetInvertedMass(), rhs);
//fourth step
CalculateAdvectiveVelocity(mUn, mUn1,mA, coefficient);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs );
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(mWork,mphi_n1);
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
// //compute ratio for iteration
Vector stop_criteria(TDim);
noalias(stop_criteria) = ZeroVector(TDim);
// stop_criteria[0] = 0.0;
// stop_criteria[1] = 0.0;
return stop_criteria;
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const ValuesVectorType& mphi,
const CalcVectorType& convective_velocity,
ValuesVectorType& rhs)
{
KRATOS_TRY
int n_nodes = mphi.size();
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& pi_i = mPi[i_node];
const double& phi_i = mphi[i_node];
//set to zero the projection
pi_i = 0;
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
//loop to all the edges surrounding node I
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const double& phi_j = mphi[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i,a_i,phi_i,a_j,phi_j);
}
//apply inverted mass matrix
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
pi_i *= m_inv;
// KRATOS_WATCH(pi_i);
}
//perform MPI syncronization
//calculating the RHS
double stab_low;
double stab_high;
#pragma omp parallel for private(stab_low,stab_high)
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
const double& phi_i = mphi[i_node];
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
const double& pi_i = mPi[i_node];
//double& h_i = mHmin[i_node];
//initializing with the external forces (e.g. gravity)
rhs_i = 0.0;
//loop to all the edges surrounding node I
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//double& rhs_j = rhs[j_neighbour];
const double& phi_j = mphi[j_neighbour];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const double& pi_j = mPi[j_neighbour];
//double& h_j = mHmin[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//convection operator
edge_ij.Sub_ConvectiveContribution(rhs_i,a_i,phi_i,a_j,phi_j);
//calculate stabilization part
edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,phi_i,a_j,phi_j);
double edge_tau = mTau[i_node];
edge_ij.CalculateConvectionStabilization_HIGH( stab_high,a_i,pi_i,a_j,pi_j);
edge_ij.Sub_StabContribution( rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
// KRATOS_WATCH(rhs_i);
}
KRATOS_CATCH("")
}
void CalculateAdvectiveVelocity(
const CalcVectorType& mUn,
const CalcVectorType& mUn1,
CalcVectorType& mA,
double coefficient)
{
int n_nodes = mUn1.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//reference for advective velocity of node i
array_1d<double, TDim>& a_i = mA[i_node];
const array_1d<double, TDim>& Un_i = mUn[i_node];
const array_1d<double, TDim>& Un1_i = mUn1[i_node];
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
a_i[k_comp] = coefficient * Un1_i[k_comp] + (1.0 - coefficient)* Un_i[k_comp];
}
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mWork.clear();
mPi.clear();
mUn.clear();
mUn1.clear();
mA.clear();
mphi_n.clear();
mphi_n1.clear();
mHmin.clear();
mTau.clear();
KRATOS_CATCH("")
}
private:
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool msmooth_convective_velocity;
bool minclude_shock_capturing;
//nodal values
//velocity vector U at time steps n and n+1
CalcVectorType mUn1,mUn;
//pressure vector p at time steps n and n+1
ValuesVectorType mWork, mPi;
ValuesVectorType mphi_n, mphi_n1; //variable to be convected
//advective velocity vector
CalcVectorType mA;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
//flag for first time step
bool mFirstStep;
//intrinsic time step size
ValuesVectorType mTau;
};
} //namespace Kratos
#endif //KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED defined
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.