file
stringlengths 18
26
| data
stringlengths 3
1.04M
|
---|---|
the_stack_data/25136997.c
|
/****************************************************************************
*
* Copyright 2017 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
/****************************************************************************
*
* Copyright © 2005-2014 Rich Felker, et al.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
***************************************************************************/
/************************************************************************
* Included Files
************************************************************************/
#include <math.h>
#ifdef CONFIG_HAVE_DOUBLE
/************************************************************************
* Public Functions
************************************************************************/
double fmax(double x, double y)
{
if (isnan(x)) {
return y;
}
if (isnan(y)) {
return x;
}
/* handle signed zeros, see C99 Annex F.9.9.2 */
if (signbit(x) != signbit(y)) {
return signbit(x) ? y : x;
}
return x < y ? y : x;
}
#endif
|
the_stack_data/20450573.c
|
#include <errno.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <bits/pthreadtypes.h>
static pthread_rwlock_t rwlock;
#define WORK_SIZE 1024
char work_area[WORK_SIZE];
int time_to_exit;
void *thread_function_read_o(void *arg);
void *thread_function_read_t(void *arg);
void *thread_function_write_o(void *arg);
void *thread_function_write_t(void *arg);
int main(int argc, char *argv[]) {
int res;
pthread_t a_thread, b_thread, c_thread, d_thread;
void *thread_result;
res = pthread_rwlock_init(&rwlock, NULL);
if (res != 0) {
perror("rwlock initialization failed");
exit(EXIT_FAILURE);
}
res = pthread_create(&a_thread, NULL, thread_function_read_o, NULL);//create new thread
if (res != 0) {
perror("Thread creation failed");
exit(EXIT_FAILURE);
}
res = pthread_create(&b_thread, NULL, thread_function_read_t, NULL);//create new thread
if (res != 0) {
perror("Thread creation failed");
exit(EXIT_FAILURE);
}
res = pthread_create(&c_thread, NULL, thread_function_write_o, NULL);//create new thread
if (res != 0) {
perror("Thread creation failed");
exit(EXIT_FAILURE);
}
res = pthread_create(&d_thread, NULL, thread_function_write_t, NULL);//create new thread
if (res != 0) {
perror("Thread creation failed");
exit(EXIT_FAILURE);
}
res = pthread_join(a_thread, &thread_result);
if (res != 0) {
perror("Thread join failed");
exit(EXIT_FAILURE);
}
res = pthread_join(b_thread, &thread_result);
if (res != 0) {
perror("Thread join failed");
exit(EXIT_FAILURE);
}
res = pthread_join(c_thread, &thread_result);
if (res != 0) {
perror("Thread join failed");
exit(EXIT_FAILURE);
}
res = pthread_join(d_thread, &thread_result);
if (res != 0) {
perror("Thread join failed");
exit(EXIT_FAILURE);
}
pthread_rwlock_destroy(&rwlock);
exit(EXIT_SUCCESS);
}
void *thread_function_read_o(void *arg) {
printf("thread read one try to get lock\n");
pthread_rwlock_rdlock(&rwlock);
while (strncmp("end", work_area, 3) != 0) {
printf("this is thread read one.");
printf("the characters is %s", work_area);
pthread_rwlock_unlock(&rwlock);
sleep(2);
pthread_rwlock_rdlock(&rwlock);
while (work_area[0] == '\0') {
pthread_rwlock_unlock(&rwlock);
sleep(2);
pthread_rwlock_rdlock(&rwlock);
}
}
pthread_rwlock_unlock(&rwlock);
time_to_exit = 1;
pthread_exit(0);
}
void *thread_function_read_t(void *arg) {
printf("thread read one try to get lock\n");
pthread_rwlock_rdlock(&rwlock);
while (strncmp("end", work_area, 3) != 0) {
printf("this is thread read two.");
printf("the characters is %s", work_area);
pthread_rwlock_unlock(&rwlock);
sleep(5);
pthread_rwlock_rdlock(&rwlock);
while (work_area[0] == '\0') {
pthread_rwlock_unlock(&rwlock);
sleep(5);
pthread_rwlock_rdlock(&rwlock);
}
}
pthread_rwlock_unlock(&rwlock);
time_to_exit = 1;
pthread_exit(0);
}
void *thread_function_write_o(void *arg) {
printf("this is write thread one try to get lock\n");
while (!time_to_exit) {
pthread_rwlock_wrlock(&rwlock);
printf("this is write thread one.\nInput some text. Enter 'end' to finish\n");
fgets(work_area, WORK_SIZE, stdin);
pthread_rwlock_unlock(&rwlock);
sleep(15);
}
pthread_rwlock_unlock(&rwlock);
pthread_exit(0);
}
void *thread_function_write_t(void *arg) {
sleep(10);
while (!time_to_exit) {
pthread_rwlock_wrlock(&rwlock);
printf("this is write thread two.\nInput some text. Enter 'end' to finish\n");
fgets(work_area, WORK_SIZE, stdin);
pthread_rwlock_unlock(&rwlock);
sleep(20);
}
pthread_rwlock_unlock(&rwlock);
pthread_exit(0);
}
|
the_stack_data/69409.c
|
union u {
char c;
float f;
};
static int foo(void)
{
union u u = { .f = 0.123 };
return u.c;
}
/*
* check-name: constant-union-size
* check description: the size of the initializer doesn't match
* check-command: test-linearize -fdump-ir $file
*
* check-output-ignore
* check-output-contains: load\\.
* check-output-excludes: ret\\..*\\$
*/
|
the_stack_data/108872.c
|
/*
* Copyright (c) 2002, Intel Corporation. All rights reserved.
* Created by: rolla.n.selbak REMOVE-THIS AT intel DOT com
* This file is licensed under the GPL license. For the full content
* of this license, see the COPYING file at the top level of this
* source tree.
Test the following symbols are defined by pthread.h
*/
#include <pthread.h>
#ifndef PTHREAD_CANCEL_ASYNCHRONOUS
#error PTHREAD_CANCEL_ASYNCHRONOUS not defined
#endif
#ifndef PTHREAD_CANCEL_ENABLE
#error PTHREAD_CANCEL_ENABLE not defined
#endif
#ifndef PTHREAD_CANCEL_DEFERRED
#error PTHREAD_CANCEL_DEFERRED not defined
#endif
#ifndef PTHREAD_CANCEL_DISABLE
#error PTHREAD_CANCEL_DISABLE not defined
#endif
#ifndef PTHREAD_CANCELED
#error PTHREAD_CANCELED not defined
#endif
#ifndef PTHREAD_COND_INITIALIZER
#error PTHREAD_COND_INTIALIZER not defined
#endif
#ifndef PTHREAD_CREATE_DETACHED
#error PTHREAD_CREATE_DETACHED not defined
#endif
#ifndef PTHREAD_CREATE_JOINABLE
#error PTHREAD_CREATE_JOINABLE not defined
#endif
#ifndef PTHREAD_EXPLICIT_SCHED
#error PTHREAD_EXPLICIT_SCHED not defined
#endif
#ifndef PTHREAD_INHERIT_SCHED
#error PTHREAD_INHERIT_SCHED not defined
#endif
#ifndef PTHREAD_MUTEX_INITIALIZER
#error PTHREAD_MUTEX_INITIALIZED not defined
#endif
#ifndef PTHREAD_ONCE_INIT
#error PTHREAD_ONCE_INIT not defined
#endif
#ifndef PTHREAD_PROCESS_SHARED
#error PTHREAD_PROCESS_SHARED not defined
#endif
#ifndef PTHREAD_PROCESS_PRIVATE
#error PTHREAD_PROCESS_PRIVATE not defined
#endif
|
the_stack_data/111078388.c
|
/**
* @file
* @brief How Basic RTOS features supported by LostArm
*
* # RTOS Objects are simple pointer like things inside a uintptr_t
* @anchor concept_rtos
*
* Also see: @ref concept_mutex
* Also see: @ref concept_semaphore
*
* RTOS elements are effectively pointers hidden inside a "uintptr_t"
*
* WHY?
* Because often one must include a large number of files for an RTOS elements.
* These often cause many problems when porting code across platforms (ugh)
* A "void *" works, but - not all OSes items use pointers, some use integers.
*
* For example some RTOSes - require certian elements to be pre-allocated
* ahead of time at startup or at compile time, and thus we are talking about
* the "NTH" mutex (which is an integer right) so we use a uintptr_t
* as the basic type for all rtos elements.
*
* Think of it as a handle to a thread, or a semaphore or a mutex.
*
*/
|
the_stack_data/61075281.c
|
#include <assert.h>
void test_int() {
volatile int a = 1;
volatile int result = __atomic_add_fetch(&a, 5, __ATOMIC_RELAXED);
assert(a == 6);
assert(result == 6);
}
void test_long() {
volatile long a = 1;
volatile long result = __atomic_add_fetch(&a, 5, __ATOMIC_RELAXED);
assert(a == 6);
assert(result == 6);
}
void test_longlong() {
volatile long long a = 1;
volatile long long result = __atomic_add_fetch(&a, 5, __ATOMIC_RELAXED);
assert(a == 6);
assert(result == 6);
}
int main() {
test_int();
test_long();
test_longlong();
return 0;
}
|
the_stack_data/237642183.c
|
/*
* @@name: SIMD.3c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
double work( double *a, double *b, int n )
{
int i;
double tmp, sum;
sum = 0.0;
#pragma omp simd private(tmp) reduction(+:sum)
for (i = 0; i < n; i++) {
tmp = a[i] + b[i];
sum += tmp;
}
return sum;
}
|
the_stack_data/86075805.c
|
// RUN: %check %s -Wsym-never-read
f(int i); // CHECK: !/warn/
g(int (*pf)(int x, int y)); // CHECK: !/warn/
f(int j) // CHECK: !/warn/
{
return j;
}
h(void (*pv)(int hello)) // CHECK: /warning: "pv" never read/
{
}
main()
{
int i; // CHECK: /warning: "i" never read/
int j; // CHECK: !/warn/
int k; // CHECK: /warning: "k" never written to/
// CHECK: ^/warning: "k" never read/
i = 1;
j = 2;
return f(j);
}
|
the_stack_data/527494.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int GetOverlapedSuffixLength(char *left, char *right)
{
int i, curLength, res;
curLength = 0;
res = 0;
for (i = 1; left[i] != 0; i++) {
curLength = 0;
while ((left[i + curLength] != '\0') && (right[curLength] != '\0')) {
if (left[i + curLength] != right[curLength]) {
curLength = 0;
break;
}
curLength++;
}
if (res < curLength) res = curLength;
}
return res;
}
int main(int argc, char **argv)
{
char** words;
int n, i, j, k, max, first, second;
scanf("%d", &n);
words = (char**)calloc(n, sizeof(char*));
for (i = 0; i < n; i++) {
words[i] = (char*)calloc(100, sizeof(char));
scanf("%s", words[i]);
}
for (k = 1; k < n; k++) {
max = -1;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if ((i != j) && (words[i][0] != '\0') && (words[j][0] != '\0')) {
int currentLength = GetOverlapedSuffixLength(words[i], words[j]);
if (currentLength > max) {
max = currentLength;
first = i;
second = j;
}
}
}
}
words[first][strlen(words[first])-max] = '\0';
strcat(words[first], words[second]);
words[second][0] = '\0';
}
printf("%d\n", (int)strlen(words[first]));
for (i = 0; i < n; i++) free(words[i]);
free(words);
return 0;
}
|
the_stack_data/175142184.c
|
#include <stdio.h>
#include <complex.h>
void scilab_rt_read_from_scilab_s0_z2(char* s, int nx, int ny, double complex aMatrix[nx][ny])
{
int i,j;
float val;
printf("%s", s);
scanf("%f", &val);
for (i = 0; i < nx; ++i) {
for (j = 0; j < ny; ++j) {
aMatrix[i][j] = val + val*I;
}
}
}
|
the_stack_data/45450180.c
|
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sys/dir.h>
#include <sys/stat.h>
#include <time.h>
/*
Copyright (c) 2021 Devine Lu Linvega
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE.
*/
#define NAME "Dense Earth"
#define DOMAIN "https://dense.earth/"
#define LICENSE "https://github.com/hundredrabbits/100r.co/blob/master/LICENSE.by-nc-sa-4.0.md"
#define SOURCE "https://github.com/hundredrabbits/100r.co/edit/master"
typedef struct Lexicon {
int len, refs[512];
char files[512][64];
} Lexicon;
/* clang-format off */
char clca(char c) { return c >= 'A' && c <= 'Z' ? c + ('a' - 'A') : c; } /* char to lowercase */
char cuca(char c) { return c >= 'a' && c <= 'z' ? c - ('a' - 'A') : c; } /* char to uppercase */
int slen(char *s) { int i = 0; while(s[i] && s[++i]) { ; } return i; } /* string length */
char *st__(char *s, char (*fn)(char)) { int i = 0; char c; while((c = s[i])) s[i++] = fn(c); return s; }
char *stuc(char *s) { return st__(s, cuca); } /* string to uppercase */
char *stlc(char *s) { return st__(s, clca); } /* string to lowercase */
char *scpy(char *src, char *dst, int len) { int i = 0; while((dst[i] = src[i]) && i < len - 2) i++; dst[i + 1] = '\0'; return dst; } /* string copy */
int scmp(char *a, char *b) { int i = 0; while(a[i] == b[i]) if(!a[i++]) return 1; return 0; } /* string compare */
char *scsw(char *s, char a, char b) { int i = 0; char c; while((c = s[i])) s[i++] = c == a ? b : c; return s; } /* string char swap */
char *scat(char *dst, const char *src) { char *ptr = dst + slen(dst); while(*src) *ptr++ = *src++; *ptr = '\0'; return dst; } /* string cat */
int ssin(char *s, char *ss) { int a = 0, b = 0; while(s[a]) { if(s[a] == ss[b]) { if(!ss[b + 1]) return a - b; b++; } else b = 0; a++; } return -1; } /* string substring index */
char *ccat(char *dst, char c) { int len = slen(dst); dst[len] = c; dst[len + 1] = '\0'; return dst; }
/* clang-format on */
int fpinject(FILE *f, Lexicon *l, char *filepath);
int
error(char *msg, char *val)
{
printf("Error: %s(%s)\n", msg, val);
return 0;
}
int
findf(Lexicon *l, char *f)
{
int i;
char filename[64];
scat(scsw(stlc(scpy(f, filename, 64)), ' ', '_'), ".htm");
for(i = 0; i < l->len; ++i)
if(scmp(l->files[i], filename))
return i;
return -1;
}
void
fpedited(FILE *f, char *path)
{
struct stat attr;
stat(path, &attr);
fputs("<span style='float:right'>", f);
fprintf(f, "Edited on %s ", ctime(&attr.st_mtime));
fprintf(f, "<a href='" SOURCE "/src/%s'>[edit]</a><br/>", path);
fputs("</span>", f);
}
int
fpportal(FILE *f, Lexicon *l, char *s, int head)
{
int target;
char srcpath[64], filename[64];
target = findf(l, s);
if(target < 0)
return error("Missing portal", s);
srcpath[0] = 0;
filename[0] = 0;
scat(scat(scat(srcpath, "inc/"), scpy(s, filename, 64)), ".htm");
if(head)
fprintf(f, "<h2 id='%s'><a href='%s.html'>%s</a></h2>", scsw(filename, ' ', '_'), filename, s);
fpinject(f, l, srcpath);
l->refs[target]++;
return 1;
}
int
fptemplate(FILE *f, Lexicon *l, char *s)
{
int target;
if(s[0] == '/')
return fpportal(f, l, s + 1, 1);
target = findf(l, s);
if(target < 0)
return error("Missing link", s);
fprintf(f, "<a href='%s.html' class='local'>", scsw(stlc(s), ' ', '_'));
fprintf(f, "%s</a>", scsw(stlc(s), '_', ' '));
l->refs[target]++;
return 1;
}
int
fpinject(FILE *f, Lexicon *l, char *filepath)
{
FILE *inc;
char c, s[1024];
unsigned char t = 0;
scsw(filepath, ' ', '_');
if(!(inc = fopen(filepath, "r")))
return error("Missing include", filepath);
s[0] = 0;
while((c = fgetc(inc)) != EOF) {
if(c == '}') {
t = 0;
if(!fptemplate(f, l, s))
return 0;
continue;
}
if(c == '{') {
s[0] = 0;
t = 1;
continue;
}
if(slen(s) > 1023)
return error("Templating error", filepath);
if(t)
ccat(s, c);
else
fprintf(f, "%c", c);
}
fclose(inc);
return 1;
}
int
fpindex(FILE *f)
{
struct dirent **d;
int n, i = 0;
n = scandir("inc", &d, NULL, alphasort);
if(n < 0)
return error("scandir", "failed");
fputs("<ul class='col2 capital'>", f);
while(i < n) {
char filepath[64], filename[64];
if(d[i]->d_name[0] != '.')
fprintf(f, "<li><a href='%sl'>%s</a></li>", scpy(d[i]->d_name, filepath, 64), scsw(scpy(d[i]->d_name, filename, ssin(d[i]->d_name, ".htm") + 1), '_', ' '));
free(d[i++]);
}
fputs("</ul>", f);
free(d);
return 1;
}
FILE *
build(FILE *f, Lexicon *l, char *name, char *srcpath)
{
if(!f)
return f;
/* begin */
fputs("<!DOCTYPE html><html lang='en'>", f);
fputs("<head>", f);
fprintf(f,
"<meta charset='utf-8'>"
"<meta name='thumbnail' content='" DOMAIN "media/services/thumbnail.jpg' />"
"<meta name='viewport' content='width=device-width,initial-scale=1'>"
"<link rel='alternate' type='application/rss+xml' title='RSS Feed' "
"href='../links/rss.xml' />"
"<link rel='stylesheet' type='text/css' href='../links/main.css'>"
"<link rel='shortcut icon' type='image/png' "
"href='../media/services/shortcut.png'>"
"<title>" NAME " — %s</title>",
name);
fputs("</head>", f);
fputs("<body>", f);
/* header */
fputs("<header>", f);
fputs("<a href='home.html'><img src='../media/interface/logo.svg' alt='" NAME "' height='50'></a>", f);
fputs("</header>", f);
/* nav */
fputs("<nav>", f);
if(!fpportal(f, l, "meta.nav", 0))
printf(">>> Building failed: %s\n", name);
fputs("</nav>", f);
/* main */
fputs("<main>\n\n", f);
fputs("<!-- Generated file, do not edit -->\n\n", f);
fprintf(f, "<h1>%s</h1>", name);
if(!fpinject(f, l, srcpath))
printf(">>> Building failed: %s\n", name);
fputs("\n\n</main>", f);
/* footer */
fputs("<footer><hr />", f);
fpedited(f, srcpath);
fputs("<b>Dense Earth</b> © 2021 — ", f);
fputs("<a href='" LICENSE "' target='_blank'>BY-NC-SA 4.0</a>", f);
fputs("</footer>", f);
/* end */
fputs("</body></html>", f);
return f;
}
int
generate(Lexicon *l)
{
int i = 0;
char srcpath[64], dstpath[64], filename[64];
for(i = 0; i < l->len; ++i) {
srcpath[0] = 0;
dstpath[0] = 0;
filename[0] = 0;
/* src */
scpy(l->files[i], filename, ssin(l->files[i], ".htm") + 1);
scat(srcpath, "inc/");
scat(srcpath, filename);
scat(srcpath, ".htm");
/* dst */
scat(dstpath, "../site/");
scat(dstpath, filename);
scat(dstpath, ".html");
fclose(build(fopen(dstpath, "w"), l, scsw(filename, '_', ' '), srcpath));
}
printf("Generated %d files\n", i);
return 1;
}
int
index(Lexicon *l, DIR *d)
{
FILE *f;
struct dirent *dir;
while((dir = readdir(d)))
if(ssin(dir->d_name, ".htm") > 0) {
l->refs[l->len] = 0;
scpy(dir->d_name, l->files[l->len++], 128);
}
closedir(d);
printf("Indexed %d terms\n", l->len);
l->refs[l->len] = 0;
scpy("index.htm", l->files[l->len++], 128);
f = fopen("inc/index.htm", "w");
fpindex(f);
fclose(f);
return 1;
}
void
inspect(Lexicon *l)
{
int i;
for(i = 0; i < l->len; ++i)
if(!l->refs[i])
error("Orphaned", l->files[i]);
}
int
main(void)
{
Lexicon lex;
DIR *d;
lex.len = 0;
if(!(d = opendir("inc")))
return error("Open", "Missing inc/ folder. ");
if(!index(&lex, d))
return error("Indexing", "Failed");
if(!generate(&lex))
return error("Generating", "Failed");
inspect(&lex);
return 0;
}
|
the_stack_data/145825.c
|
#include <stdio.h>
int main(void)
{
int n1, n2;
char *teens[] = { "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eightteen", "nineteen" };
char *tens[] = { "", "", "twenty", "thirty", "fourty", "fifty", "sixty", "seventy", "eighty", "ninety" };
char *ones[] = { "", "-one", "-two", "-three", "-four", "-five", "-six", "-seven", "-eight", "-nine" };
printf("Enter a two-digit number: ");
scanf("%1d%1d", &n1, &n2);
printf("You entered the number ");
if(n1 == 1)
{
printf("%s", teens[n2]);
}
else
{
printf("%s", tens[n1]);
printf("%s", ones[n2]);
}
printf("\n");
return 0;
}
|
the_stack_data/182954262.c
|
/***************************************************************
*Copyright (C), 2015, Shanghai Eastsoft Microelectronics Co., Ltd
***************************************************************/
extern unsigned long _sidata;
extern unsigned long __data_start__;
extern unsigned long __data_end__;
extern unsigned long __bss_start__;
extern unsigned long __bss_end__;
extern unsigned long _my_ram_end;
extern unsigned long _my_ram_end_d;
extern unsigned long _my_rom_end;
void InitRam(void)
{
unsigned long volatile *pulSrc, *pulDest;
unsigned int volatile _my_ram_len;
pulSrc = &_sidata;
for(pulDest = (unsigned long volatile *)&__data_start__; pulDest <(unsigned long volatile *)&__data_end__; )
{
*(pulDest++) = *(pulSrc++);
}
for(pulDest = (unsigned long volatile *)&__bss_start__; pulDest < (unsigned long volatile *)&__bss_end__; )
{
*(pulDest++) = 0;
}
_my_ram_len = (unsigned int)&_my_ram_end_d - (unsigned int)&_my_ram_end;
memcpy(&_my_ram_end, &_my_rom_end, _my_ram_len);
}
|
the_stack_data/45451208.c
|
#include<stdio.h>
int main(){
printf("Hello tehnic guys!");
return 0;
}
|
the_stack_data/26700833.c
|
// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=ITANIUM
// RUN: %clang_cc1 -triple %ms_abi_triple -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=MSABI
// Should be 3 hello strings, two global (of different sizes), the rest are
// shared.
// CHECK: @align = {{(dso_local )?}}global i8 [[ALIGN:[0-9]+]]
// ITANIUM: @.str = private unnamed_addr constant [6 x i8] c"hello\00"
// MSABI: @"??_C@_05CJBACGMB@hello?$AA@" = linkonce_odr dso_local unnamed_addr constant [6 x i8] c"hello\00", comdat, align 1
// ITANIUM: @f1.x = internal global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0)
// MSABI: @f1.x = internal global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @"??_C@_05CJBACGMB@hello?$AA@", i32 0, i32 0)
// CHECK: @f2.x = internal global [6 x i8] c"hello\00", align [[ALIGN]]
// CHECK: @f3.x = internal global [8 x i8] c"hello\00\00\00", align [[ALIGN]]
// ITANIUM: @f4.x = internal global %struct.s { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0) }
// MSABI: @f4.x = internal global %struct.s { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @"??_C@_05CJBACGMB@hello?$AA@", i32 0, i32 0) }
// CHECK: @x = {{(dso_local )?}}global [3 x i8] c"ola", align [[ALIGN]]
// XFAIL: hexagon
// Hexagon aligns arrays of size 8+ bytes to a 64-bit boundary, which
// fails the check for "@f3.x = ... align [ALIGN]", since ALIGN is derived
// from the alignment of a single i8, which is still 1.
#if defined(__s390x__)
unsigned char align = 2;
#else
unsigned char align = 1;
#endif
void bar(const char *);
// CHECK-LABEL: define {{.*}}void @f0()
void f0() {
bar("hello");
// ITANIUM: call {{.*}}void @bar({{.*}} @.str
// MSABI: call {{.*}}void @bar({{.*}} @"??_C@_05CJBACGMB@hello?$AA@"
}
// CHECK-LABEL: define {{.*}}void @f1()
void f1() {
static char *x = "hello";
bar(x);
// CHECK: [[T1:%.*]] = load i8*, i8** @f1.x
// CHECK: call {{.*}}void @bar(i8* noundef [[T1:%.*]])
}
// CHECK-LABEL: define {{.*}}void @f2()
void f2() {
static char x[] = "hello";
bar(x);
// CHECK: call {{.*}}void @bar({{.*}} @f2.x
}
// CHECK-LABEL: define {{.*}}void @f3()
void f3() {
static char x[8] = "hello";
bar(x);
// CHECK: call {{.*}}void @bar({{.*}} @f3.x
}
void gaz(void *);
// CHECK-LABEL: define {{.*}}void @f4()
void f4() {
static struct s {
char *name;
} x = { "hello" };
gaz(&x);
// CHECK: call {{.*}}void @gaz({{.*}} @f4.x
}
char x[3] = "ola";
|
the_stack_data/59513773.c
|
/*
gcc -std=c17 -lc -lm -pthread -o ../_build/c/string_wide_wmemset.exe ./c/string_wide_wmemset.c && (cd ../_build/c/;./string_wide_wmemset.exe)
https://en.cppreference.com/w/c/string/wide/wmemset
*/
#include <stdio.h>
#include <wchar.h>
#include <locale.h>
int main(void)
{
wchar_t ar[10] = L"1234567890"; // no trailing null in the array
wmemset(ar, L'\U0001f34c', 5); // replaces [12345] with the 🍌 bananas
wmemset(ar+5, L'蕉', 5); // replaces [67890] with the 蕉 bananas
setlocale(LC_ALL, "en_US.utf8");
for(size_t n = 0; n < sizeof ar/sizeof *ar; ++n)
putwchar(ar[n]);
putwchar(L'\n');
}
|
the_stack_data/161081912.c
|
#include <stdio.h>
#include <stdlib.h>
void display();
void main()
{
display();
display();
display();
system("pause");
}
void display()
{
static int i = 10;
printf("\n%d", i);
i++;
}
|
the_stack_data/132952412.c
|
#include <stdio.h>
// T: 逆序输出整数数字, 700不能正确输出
int main(int argc, char *argv[]) {
int d, x, t = 0;
scanf("%d", &x);
// 构建逆序整数
do {
d = x % 10;
t = t * 10 + d;
x /= 10;
} while (x > 0);
x = t;
do { // 逆序输出整数数字
d = x % 10;
printf("%d", d);
if (x > 9) {
printf("\t");
}
x /= 10;
} while (x > 0);
printf("\n");
return 0;
}
|
the_stack_data/64201571.c
|
/*
This file is part of MAMBO, a low-overhead dynamic binary modification tool:
https://github.com/beehive-lab/mambo
Copyright 2013-2016 Cosmin Gorgovan <cosmin at linux-geek dot org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef PLUGINS_NEW
#include <stdio.h>
#include <assert.h>
#include <stdarg.h>
#include "../plugins.h"
#ifdef __arm__
#include "../pie/pie-thumb-encoder.h"
#elif __aarch64__
#include "../pie/pie-a64-encoder.h"
#include "../api/emit_a64.h"
#endif
#define not_implemented() \
fprintf(stderr, "%s: Implement me\n", __PRETTY_FUNCTION__); \
while(1);
#ifdef __arm__
#define MAX_FCALL_ARGS 4
#elif __aarch64__
#define MAX_FCALL_ARGS 8
#endif
#ifdef __arm__
void emit_thumb_push_cpsr(mambo_context *ctx, enum reg tmp_reg) {
uint16_t *write_p = ctx->code.write_p;
// MRS tmp_reg, CPSR
thumb_mrs32(&write_p, tmp_reg);
write_p += 2;
// PUSH {tmp_reg}
thumb_push_regs(&write_p, 1 << tmp_reg);
ctx->code.write_p = write_p;
}
void emit_arm_push_cpsr(mambo_context *ctx, enum reg tmp_reg) {
emit_arm_mrs(ctx, tmp_reg);
emit_arm_push(ctx, 1 << tmp_reg);
}
void emit_thumb_pop_cpsr(mambo_context *ctx, enum reg tmp_reg) {
uint16_t *write_p = ctx->code.write_p;
// POP {tmp_reg}
thumb_pop_regs(&write_p, 1 << tmp_reg);
// MSR tmp_reg, CPSR_fs
thumb_msr32(&write_p, tmp_reg, 3);
write_p += 2;
ctx->code.write_p = write_p;
}
void emit_arm_pop_cpsr(mambo_context *ctx, enum reg tmp_reg) {
emit_arm_pop(ctx, 1 << tmp_reg);
emit_arm_msr(ctx, tmp_reg, 3);
}
void emit_thumb_copy_to_reg_32bit(mambo_context *ctx, enum reg reg, uint32_t value) {
if (value <= 0xFFFF) {
copy_to_reg_16bit((uint16_t **)&ctx->code.write_p, reg, value);
} else {
copy_to_reg_32bit((uint16_t **)&ctx->code.write_p, reg, value);
}
}
void emit_arm_copy_to_reg_32bit(mambo_context *ctx, enum reg reg, uint32_t value) {
if (value <= 0xFFFF) {
arm_copy_to_reg_16bit((uint32_t **)&ctx->code.write_p, reg, value);
} else {
arm_copy_to_reg_32bit((uint32_t **)&ctx->code.write_p, reg, value);
}
}
void emit_thumb_b16_cond(void *write_p, void *target, mambo_cond cond) {
thumb_b16_cond_helper((uint16_t *)write_p, (uint32_t)target, cond);
}
void emit_thumb_push(mambo_context *ctx, uint32_t regs) {
ctx->code.plugin_pushed_reg_count += count_bits(regs);
uint16_t *write_p = ctx->code.write_p;
thumb_push_regs(&write_p, regs);
ctx->code.write_p = write_p;
}
void emit_arm_push(mambo_context *ctx, uint32_t regs) {
ctx->code.plugin_pushed_reg_count += count_bits(regs);
uint32_t *write_p = ctx->code.write_p;
arm_push_regs(regs);
ctx->code.write_p = write_p;
}
void emit_thumb_pop(mambo_context *ctx, uint32_t regs) {
ctx->code.plugin_pushed_reg_count -= count_bits(regs);
assert(ctx->code.plugin_pushed_reg_count >= 0);
uint16_t *write_p = ctx->code.write_p;
thumb_pop_regs(&write_p, regs);
ctx->code.write_p = write_p;
}
void emit_arm_pop(mambo_context *ctx, uint32_t regs) {
ctx->code.plugin_pushed_reg_count -= count_bits(regs);
assert(ctx->code.plugin_pushed_reg_count >= 0);
uint32_t *write_p = ctx->code.write_p;
arm_pop_regs(regs);
ctx->code.write_p = write_p;
}
void emit_arm_fcall(mambo_context *ctx, void *function_ptr) {
emit_arm_copy_to_reg_32bit(ctx, lr, (uint32_t)function_ptr);
emit_arm_blx(ctx, lr);
}
void emit_thumb_fcall(mambo_context *ctx, void *function_ptr) {
emit_thumb_copy_to_reg_32bit(ctx, lr, (uint32_t)function_ptr);
emit_thumb_blx16(ctx, lr);
}
static inline int emit_arm_add_sub_shift(mambo_context *ctx, int rd, int rn, int rm,
unsigned int shift_type, unsigned int shift) {
if (shift < 0 || shift > 31 || shift_type > ROR) {
return -1;
}
if (rm < 0) {
rm = -rm;
emit_arm_sub(ctx, REG_PROC, 0, rd, rn, rm | (shift_type << 5) | (shift << 7));
} else {
emit_arm_add(ctx, REG_PROC, 0, rd, rn, rm | (shift_type << 5) | (shift << 7));
}
return 0;
}
static inline int emit_arm_add_sub(mambo_context *ctx, int rd, int rn, int rm) {
return emit_arm_add_sub_shift(ctx, rd, rn, rm, LSL, 0);
}
static inline int emit_thumb_add_sub_shift(mambo_context *ctx, int rd, int rn, int rm,
unsigned int shift_type, unsigned int shift) {
if (shift < 0 || shift > 31 || shift_type > ROR) {
return -1;
}
if (rm < 0) {
rm = -rm;
emit_thumb_sub32(ctx, 0, rn, shift >> 2, rd, shift, shift_type, rm);
} else {
emit_thumb_add32(ctx, 0, rn, shift >> 2, rd, shift, shift_type, rm);
}
return 0;
}
static inline int emit_thumb_add_sub(mambo_context *ctx, int rd, int rn, int rm) {
return emit_thumb_add_sub_shift(ctx, rd, rn, rm, LSL, 0);
}
#endif // __arm__
#ifdef __aarch64__
void emit_a64_push(mambo_context *ctx, uint32_t regs) {
int reg_no = count_bits(regs);
ctx->code.plugin_pushed_reg_count += reg_no;
uint32_t *write_p = ctx->code.write_p;
uint32_t to_push[2];
if (reg_no & 1) {
reg_no = get_highest_n_regs(regs, to_push, 1);
assert(reg_no == 1);
a64_push_reg(to_push[0]);
regs &= ~(1 << to_push[0]);
}
while (regs != 0) {
reg_no = get_highest_n_regs(regs, to_push, 2);
assert(reg_no == 2);
a64_push_pair_reg(to_push[1], to_push[0]);
regs &= ~((1 << to_push[0]) | (1 << to_push[1]));
}
ctx->code.write_p = write_p;
}
void emit_a64_pop(mambo_context *ctx, uint32_t regs) {
ctx->code.plugin_pushed_reg_count -= count_bits(regs);
assert(ctx->code.plugin_pushed_reg_count >= 0);
uint32_t *write_p = ctx->code.write_p;
uint32_t to_pop[2];
int reg_no;
while (regs != 0) {
reg_no = get_lowest_n_regs(regs, to_pop, 2);
assert(reg_no == 1 || reg_no == 2);
if (reg_no == 2) {
a64_pop_pair_reg(to_pop[0], to_pop[1]);
regs &= ~((1 << to_pop[0]) | (1 << to_pop[1]));
} else if (reg_no == 1) {
a64_pop_reg(to_pop[0]);
regs &= ~(1 << to_pop[0]);
}
}
ctx->code.write_p = write_p;
}
static inline int emit_a64_add_sub_shift(mambo_context *ctx, int rd, int rn, int rm,
unsigned int shift_type, unsigned int shift) {
if (shift < 0 || shift > 63 || shift_type > ASR) return -1;
int op = (rm < 0);
rm = abs(rm);
emit_a64_ADD_SUB_shift_reg(ctx, 1, op, 0, shift_type, rm, shift, rn, rd);
return 0;
}
static inline int emit_a64_add_sub(mambo_context *ctx, int rd, int rn, int rm) {
return emit_a64_add_sub_shift(ctx, rd, rn, rm, LSL, 0);
}
int emit_a64_add_sub_ext(mambo_context *ctx, int rd, int rn, int rm, int ext_option, int shift) {
int op = (rm < 0);
rm = abs(rm);
if (shift > 4 || shift < 0) return -1;
emit_a64_ADD_SUB_ext_reg(ctx, 1, op, 0, rm, ext_option, shift, rn, rd);
return 0;
}
#endif
void emit_push(mambo_context *ctx, uint32_t regs) {
#ifdef __arm__
inst_set isa = mambo_get_inst_type(ctx);
if (isa == ARM_INST) {
emit_arm_push(ctx, regs);
} else {
emit_thumb_push(ctx, regs);
}
#elif __aarch64__
emit_a64_push(ctx, regs);
#endif
}
void emit_pop(mambo_context *ctx, uint32_t regs) {
assert(ctx->code.plugin_pushed_reg_count >= 0);
#ifdef __arm__
inst_set isa = mambo_get_inst_type(ctx);
if (isa == ARM_INST) {
emit_arm_pop(ctx, regs);
} else {
emit_thumb_pop(ctx, regs);
}
#elif __aarch64__
emit_a64_pop(ctx, regs);
#endif
}
void emit_set_reg(mambo_context *ctx, enum reg reg, uintptr_t value) {
#ifdef __arm__
inst_set isa = mambo_get_inst_type(ctx);
if (isa == ARM_INST) {
emit_arm_copy_to_reg_32bit(ctx, reg, value);
} else {
emit_thumb_copy_to_reg_32bit(ctx, reg, value);
}
#elif __aarch64__
a64_copy_to_reg_64bits((uint32_t **)&ctx->code.write_p, reg, value);
#endif
}
int __emit_branch_cond(inst_set inst_type, void *write, uintptr_t target, mambo_cond cond, bool link) {
intptr_t diff = (target & (~THUMB)) - (uintptr_t)write;
if (cond != AL && link) return -1;
#ifdef __arm__
switch (inst_type) {
case THUMB_INST:
diff -= 4;
if (cond == AL) {
bool to_arm = link && !(target & THUMB);
target &= ~THUMB;
if (diff < -16777216 || diff > 16777214) return -1;
thumb_b_bl_helper(write, target, link, to_arm);
} else {
if (diff < -1048576 || diff > 1048574) return -1;
void *write_c = write;
thumb_b32_cond_helper((uint16_t **)&write, target, cond);
assert((write_c + 4) == write);
}
break;
case ARM_INST:
if (target & THUMB) return -1;
diff -= 8;
if (diff < -33554432 || diff > 33554428) return -1;
arm_branch_helper(write, target, link, cond);
break;
default:
return -1;
}
#endif
#ifdef __aarch64__
if (cond == AL) {
if (diff < -134217728 || diff > 134217724) return -1;
a64_branch_helper(write, target, link);
//a64_b_helper(write, target);
} else {
if (diff < -1048576 || diff > 1048572) return -1;
a64_b_cond_helper(write, target, cond);
}
#endif
return 0;
}
void emit_fcall(mambo_context *ctx, void *function_ptr) {
// First try an immediate call, and if that is out of range then generate an indirect call
int ret = __emit_branch_cond(ctx->code.inst_type, ctx->code.write_p, (uintptr_t)function_ptr, AL, true);
if (ret == 0) return;
emit_set_reg(ctx, lr, (uintptr_t)function_ptr);
#ifdef __arm__
inst_set type = mambo_get_inst_type(ctx);
if (type == ARM_INST) {
emit_arm_blx(ctx, lr);
} else {
emit_thumb_blx16(ctx, lr);
}
#elif __aarch64__
emit_a64_BLR(ctx, lr);
#endif
}
int emit_safe_fcall(mambo_context *ctx, void *function_ptr, int argno) {
uintptr_t to_push = (1 << lr);
#ifdef __arm__
to_push |= (1 << r0) | (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4);
#elif __aarch64__
to_push |= 0x1FF;
#endif
if (argno > MAX_FCALL_ARGS) return -1;
to_push &= ~(((1 << MAX_FCALL_ARGS)-1) >> (MAX_FCALL_ARGS - argno));
emit_push(ctx, to_push);
emit_set_reg_ptr(ctx, MAX_FCALL_ARGS, function_ptr);
emit_fcall(ctx, safe_fcall_trampoline);
emit_pop(ctx, to_push);
return 0;
}
int emit_safe_fcall_static_args(mambo_context *ctx, void *fptr, int argno, ...) {
va_list args;
uint32_t reglist = 0;
if (argno > MAX_FCALL_ARGS || argno < 0) return -1;
if (argno > 0) {
reglist = 0xFF >> (8-argno);
emit_push(ctx, reglist);
va_start(args, argno);
for (int a = 0; a < argno; a++) {
emit_set_reg(ctx, a, va_arg(args, uintptr_t));
}
va_end(args);
}
emit_safe_fcall(ctx, fptr, argno);
if (argno > 0) {
emit_pop(ctx, reglist);
}
return 0;
}
void emit_mov(mambo_context *ctx, enum reg rd, enum reg rn) {
#ifdef __arm__
assert(rd >= 0 && rd < pc && rn >= 0 && rn < pc);
if (mambo_get_inst_type(ctx) == THUMB_INST) {
emit_thumb_movh16(ctx, rd >> 3, rn, rd);
} else {
emit_arm_mov(ctx, REG_PROC, 0, rd, rn);
}
#elif __aarch64__
if (rn == sp) {
emit_a64_ADD_SUB_immed(ctx, 1, 0, 0, 0, 0, rn, rd);
} else {
emit_a64_logical_reg(ctx, 1, 1, 0, 0, rn, 0, 0x1F, rd);
}
#endif
}
#ifdef __arm__
#define SHIFTED_ADD_SUB_I_BITS 8
#define _emit_add_shift_imm(rd, rn, offset, shift) \
assert((shift & 1) == 0); \
emit_arm_add(ctx, IMM_PROC, 0, rd, rn, ((16 - (shift / 2)) << 8) | offset);
#define _emit_sub_shift_imm(rd, rn, offset, shift) \
assert((shift & 1) == 0); \
emit_arm_sub(ctx, IMM_PROC, 0, rd, rn, ((16 - (shift / 2)) << 8) | offset);
#elif __aarch64__
#define SHIFTED_ADD_SUB_I_BITS 12
#define _emit_add_shift_imm(rd, rn, offset, shift) \
assert((shift) == 0 || (shift) == 12); \
emit_a64_ADD_SUB_immed(ctx, 1, 0, 0, (shift == 12), (offset), (rn), (rd));
#define _emit_sub_shift_imm(rd, rn, offset, shift) \
assert((shift) == 0 || (shift) == 12); \
emit_a64_ADD_SUB_immed(ctx, 1, 1, 0, (shift == 12), (offset), (rn), (rd));
#endif
#define SHIFTED_ADD_SUB_I_MASK ((1 << SHIFTED_ADD_SUB_I_BITS) - 1)
#define SHIFTED_ADD_SUB_MAX (SHIFTED_ADD_SUB_I_MASK | (SHIFTED_ADD_SUB_I_MASK << SHIFTED_ADD_SUB_I_BITS))
int emit_add_sub_i(mambo_context *ctx, int rd, int rn, int offset) {
if (offset == 0) {
if (rd != rn) {
emit_mov(ctx, rd, rn);
return 0;
}
} else {
#ifdef __arm__
inst_set isa = mambo_get_inst_type(ctx);
if (isa == THUMB_INST) {
if (offset > 0xFFF || offset < -0xFFF) return -1;
if (offset < 0) {
offset = -offset;
emit_thumb_subwi32(ctx, offset >> 11, rn, offset >> 8, rd, offset);
} else {
emit_thumb_addwi32(ctx, offset >> 11, rn, offset >> 8, rd, offset);
}
return 0;
}
#endif
if (offset < -SHIFTED_ADD_SUB_MAX || offset > SHIFTED_ADD_SUB_MAX) return -1;
if (offset < 0) {
offset = -offset;
if (offset & SHIFTED_ADD_SUB_I_MASK) {
_emit_sub_shift_imm(rd, rn, offset & SHIFTED_ADD_SUB_I_MASK, 0);
rn = rd;
}
if (offset & (SHIFTED_ADD_SUB_I_MASK << SHIFTED_ADD_SUB_I_BITS)) {
_emit_sub_shift_imm(rd, rn, offset >> SHIFTED_ADD_SUB_I_BITS, SHIFTED_ADD_SUB_I_BITS);
}
} else {
if (offset & SHIFTED_ADD_SUB_I_MASK) {
_emit_add_shift_imm(rd, rn, offset & SHIFTED_ADD_SUB_I_MASK, 0);
rn = rd;
}
if (offset & (SHIFTED_ADD_SUB_I_MASK << SHIFTED_ADD_SUB_I_BITS)) {
_emit_add_shift_imm(rd, rn, offset >> SHIFTED_ADD_SUB_I_BITS, SHIFTED_ADD_SUB_I_BITS);
}
}
} // offset != 0
return 0;
}
inline int emit_add_sub_shift(mambo_context *ctx, int rd, int rn, int rm,
unsigned int shift_type, unsigned int shift) {
#ifdef __arm__
if (mambo_get_inst_type(ctx) == THUMB_INST) {
return emit_thumb_add_sub_shift(ctx, rd, rn, rm, shift_type, shift);
} else {
return emit_arm_add_sub_shift(ctx, rd, rn, rm, shift_type, shift);
}
#elif __aarch64__
return emit_a64_add_sub_shift(ctx, rd, rn, rm, shift_type, shift);
#endif
}
inline int emit_add_sub(mambo_context *ctx, int rd, int rn, int rm) {
return emit_add_sub_shift(ctx, rd, rn, rm, LSL, 0);
}
int emit_branch_cond(mambo_context *ctx, void *target, mambo_cond cond) {
void *write_p = mambo_get_cc_addr(ctx);
int ret = __emit_branch_cond(mambo_get_inst_type(ctx), write_p, (uintptr_t)target, cond, false);
if (ret == 0) {
mambo_set_cc_addr(ctx, write_p + 4);
}
return ret;
}
int emit_branch(mambo_context *ctx, void *target) {
return emit_branch_cond(ctx, target, AL);
}
int __emit_branch_cbz_cbnz(mambo_context *ctx, void *write_p, void *target, enum reg reg, bool is_cbz) {
int ret = -1;
#ifdef __aarch64__
ret = a64_cbz_cbnz_helper((uint32_t *)write_p, !is_cbz, (uint64_t)target, 1, reg);
#elif __arm__
if (mambo_get_inst_type(ctx) == THUMB_INST) {
ret = thumb_cbz_cbnz_helper((uint16_t *)write_p, (uint32_t)target, reg, is_cbz);
}
#endif
return ret;
}
int emit_branch_cbz_cbnz(mambo_context *ctx, void *target, enum reg reg, bool is_cbz) {
void *write_p = mambo_get_cc_addr(ctx);
int ret = __emit_branch_cbz_cbnz(ctx, write_p, target, reg, is_cbz);
if (ret == 0) {
#ifdef __aarch64__
mambo_set_cc_addr(ctx, write_p + 4);
#elif __arm__
mambo_set_cc_addr(ctx, write_p + 2);
#endif
}
return ret;
}
int emit_branch_cbz(mambo_context *ctx, void *target, enum reg reg) {
return emit_branch_cbz_cbnz(ctx, target, reg, true);
}
int emit_branch_cbnz(mambo_context *ctx, void *target, enum reg reg) {
return emit_branch_cbz_cbnz(ctx, target, reg, false);
}
int __mambo_reserve(mambo_context *ctx, mambo_branch *br, size_t incr) {
if (ctx->code.write_p) {
br->loc = ctx->code.write_p;
ctx->code.write_p += incr;
return 0;
}
return -1;
}
int mambo_reserve_branch(mambo_context *ctx, mambo_branch *br) {
return __mambo_reserve(ctx, br, 4);
}
int mambo_reserve_branch_cbz(mambo_context *ctx, mambo_branch *br) {
#ifdef __arm__
if (mambo_get_inst_type(ctx) == THUMB_INST) {
return __mambo_reserve(ctx, br, 2);
}
return -1;
#endif
return __mambo_reserve(ctx, br, 4);
}
int __emit_local_branch(mambo_context *ctx, mambo_branch *br, mambo_cond cond, bool link) {
uintptr_t target = (uintptr_t)mambo_get_cc_addr(ctx);
#ifdef __arm__
if (ctx->code.inst_type == THUMB_INST) {
target |= THUMB;
}
#endif
return __emit_branch_cond(mambo_get_inst_type(ctx), br->loc, target, cond, link);
}
int emit_local_branch_cond(mambo_context *ctx, mambo_branch *br, mambo_cond cond) {
return __emit_local_branch(ctx, br, cond, false);
}
int emit_local_branch(mambo_context *ctx, mambo_branch *br) {
return __emit_local_branch(ctx, br, AL, false);
}
int emit_local_fcall(mambo_context *ctx, mambo_branch *br) {
return __emit_local_branch(ctx, br, AL, true);
}
int emit_local_branch_cbz_cbnz(mambo_context *ctx, mambo_branch *br, enum reg reg, bool is_cbz) {
return __emit_branch_cbz_cbnz(ctx, br->loc, mambo_get_cc_addr(ctx), reg, is_cbz);
}
int emit_local_branch_cbz(mambo_context *ctx, mambo_branch *br, enum reg reg) {
return emit_local_branch_cbz_cbnz(ctx, br, reg, true);
}
int emit_local_branch_cbnz(mambo_context *ctx, mambo_branch *br, enum reg reg) {
return emit_local_branch_cbz_cbnz(ctx, br, reg, false);
}
void emit_counter64_incr(mambo_context *ctx, void *counter, unsigned incr) {
#ifdef __arm__
/* On AArch32 we use NEON rather than ADD and ADC to avoid having to save
and restore the PSR register, which is slow.
VPUSH {D0, D1}
PUSH {R0}
MOV{W,T} R0, counter
VLDR D1, [R0]
VMOV.I32 D0, #incr
VSHR.U64 D0, D0, #32
VADD.I64 D0, D1, D0
VSTR D0, [R0]
POP {R0}
VPOP {D0, D1}
*/
assert(incr <= 255);
switch(mambo_get_inst_type(ctx)) {
case THUMB_INST: {
emit_thumb_vfp_vpush(ctx, 1, 0, 0, 4);
emit_thumb_push(ctx, 1 << r0);
emit_thumb_copy_to_reg_32bit(ctx, r0, (uintptr_t)counter);
emit_thumb_vfp_vldr_dp(ctx, 1, r0, 0, 1, 0);
emit_thumb_neon_vmovi(ctx, 0, 0, 0, 0, 0, incr >> 7, incr >> 4, incr);
emit_thumb_neon_vshr(ctx, 1, 0, 0, 0, 0, 0, 1, 32);
emit_thumb_neon_vadd_i(ctx, 3, 0, 0, 0, 0, 1, 0, 0);
emit_thumb_vfp_vstr_dp(ctx, 1, 0, r0, 0, 0);
emit_thumb_pop(ctx, 1 << r0);
emit_thumb_vfp_vpop(ctx, 1, 0, 0, 4);
break;
}
case ARM_INST:
emit_arm_vfp_vpush_dp(ctx, 0, 0, 4);
emit_arm_push(ctx, (1 << r0));
emit_arm_copy_to_reg_32bit(ctx, r0, (uintptr_t)counter);
emit_arm_vfp_vldr_dp(ctx, 1, 0, r0, 1, 0);
emit_arm_neon_vmovi(ctx, 0, 0, 0, 0, 0, incr >> 7, incr >> 4, incr);
emit_arm_neon_vshr(ctx, 1, 0, 0, 0, 0, 0, 1, 32);
emit_arm_neon_vadd_i(ctx, 3, 0, 0, 0, 0, 1, 0, 0);
emit_arm_vfp_vstr_dp(ctx, 1, 0, r0, 0, 0);
emit_arm_pop(ctx, (1 << r0));
emit_arm_vfp_vpop_dp(ctx, 0, 0, 4);
break;
}
#endif
#ifdef __aarch64__
assert(incr <= 0xFFF);
emit_a64_push(ctx, (1 << x0) | (1 << x1));
a64_copy_to_reg_64bits((uint32_t **)&ctx->code.write_p, x0, (uintptr_t)counter);
emit_a64_LDR_STR_unsigned_immed(ctx, 3, 0, 1, 0, x0, x1);
emit_a64_ADD_SUB_immed(ctx, 1, 0, 0, 0, incr, x1, x1);
emit_a64_LDR_STR_unsigned_immed(ctx, 3, 0, 0, 0, x0, x1);
emit_a64_pop(ctx, (1 << x0) | (1 << x1));
#endif
}
#endif
|
the_stack_data/23576290.c
|
#include <stdio.h>
#include <unistd.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#define clear() printf("\033[H\033[J")
pthread_t ptidTracker;
void clearStdin(){
int c;
while ((c = getchar()) != '\n' && c != EOF);
}//end clearStdin
void* trackAndKill() {
FILE *fp;
int pids[50];
char popenCommand[256];
int i = 0;
pids[i] = (int) getpid();
int pid;
char watchOutput[50][256];
strcpy(watchOutput[i]," PID CMD STARTED");
char saveOutput[256];
int j;
int z;
char line[100];
int loopCount = 0;
int stop;
bool found = false;
while (loopCount < 5) {
for (z = 0; z <= i; z++) {
sprintf(popenCommand,"/bin/ps --ppid %d -o pid,cmd,lstart",pids[z]);
fp = popen(popenCommand, "r");
while (fgets(line, sizeof(line), fp) != NULL) {
if(strstr(line, "ps --ppid") == NULL && strstr(line, "PID CMD") == NULL && strstr(line, "defunct") == NULL) {
strcpy(saveOutput,line);
pid = atoi(strtok(line, " "));
for(j = 0; j <= i; j++) {
if(pids[j] == pid)
found = true;
}//end for
if (!found) {
i++;
strcpy(watchOutput[i],saveOutput);
pids[i] = pid;
}//end if
found = false;
}//end if
}//end while
}//end for
loopCount++;
if (loopCount == 5) {
printf("\nContinue (y/n)? ");
stop = getchar();
getchar();
if (stop == 'y')
loopCount = 0;
}//end if
}//end while
printf("\nChild Processes:\n");
for (z = 0; z <= i; z++) {
printf("%s\n",watchOutput[z]);
}//end for
char killCommand[100];
for(z = 1; z <= i; z++) {
sprintf(killCommand,"kill -9 %d >/dev/null 2>&1",pids[z]);
printf("Killing: %d\n",pids[z]);
system(killCommand);
}//end for
sleep(5);
pthread_exit(NULL);
}//end trackAndKill
int main() {
bool go = true;
int input;
bool loop = true;
char command[256];
int confirm;
while (go) {
clear();
input = 0;
printf("ezEmu: now with more ELF on the shelf\n\n");
printf("We are running as PID: %d\n\n",getpid());
printf("\t[1] sh via the system() function (T1059.004)\n\t[2] Python via the popen() function (T1059.006)\n\t[3] Perl via the system() function (T1059)\n\n");
printf("Select an execution procedure (or 0 to exit): ");
scanf("%d", &input);
getchar();
loop = true;
switch (input) {
case 1:
while (loop) {
clear();
printf("sh via system() execution\n\n");
printf("Enter a command to execute (or quit): ");
fgets(command, 256, stdin);
if (strcmp(command,"quit\n") != 0) {
int len = strlen(command);
if(command[len-1]=='\n')
command[len-1]='\0';
printf("\nAre you sure you want to execute %s with sh (y/n)? ", command);
confirm = getchar();
getchar();
if (confirm == 'y') {
clear();
pthread_t ptidTracker;
printf("\nCommand Output:\n\n");
pthread_create(&ptidTracker, NULL, &trackAndKill, NULL);
system(command);
printf("\n\n");
pthread_join(ptidTracker, NULL);
}//end if
}//end if
else {
loop = false;
strcpy(command, "");
break;
}//end else
}//end while
break;
case 2:
while (loop) {
clear();
printf("Python via the popen() execution\n\n");
printf("Enter a command to execute (or quit): ");
fgets(command, 256, stdin);
if (strcmp(command,"quit\n") != 0) {
int len = strlen(command);
if(command[len-1]=='\n')
command[len-1]='\0';
printf("\nAre you sure you want to execute %s with Python (y/n)? ", command);
confirm = getchar();
getchar();
if (confirm == 'y') {
clear();
pthread_t ptidTracker;
printf("\nCommand Output:\n\n");
pthread_create(&ptidTracker, NULL, &trackAndKill, NULL);
char pyCommand[256];
sprintf(pyCommand,"python -c \"import os;os.system(\'%s\')\"",command);
FILE *fp;
char output[1035];
fp = popen(pyCommand, "r");
while (fgets(output, sizeof(output), fp) != NULL) {
printf("%s", output);
}//end while
pclose(fp);
printf("\n\n");
pthread_join(ptidTracker, NULL);
}//end if
}//end if
else {
loop = false;
strcpy(command, "");
break;
}//end else
}//end while
break;
case 3:
while (loop) {
clear();
printf("Perl via the system() execution\n\n");
printf("Enter a file, with no args, to execute (or quit): ");
fgets(command, 256, stdin);
if (strcmp(command,"quit\n") != 0) {
int len = strlen(command);
if(command[len-1]=='\n')
command[len-1]='\0';
printf("\nAre you sure you want to execute %s with Perl (y/n)? ", command);
confirm = getchar();
getchar();
if (confirm == 'y') {
clear();
pthread_t ptidTracker;
printf("\nCommand Output:\n\n");
pthread_create(&ptidTracker, NULL, &trackAndKill, NULL);
char plCommand[256];
sprintf(plCommand,"perl -e \"system(\"%s\")\"",command);
system(plCommand);
printf("\n\n");
pthread_join(ptidTracker, NULL);
}//end if
}//end if
else {
loop = false;
strcpy(command, "");
break;
}//end else
}//end while
break;
case 0:
go = false;
clear();
printf("\nThanks for playing\n\n");
sleep(3);
break;
default:
printf("Bad input, try again.\n");
clearStdin();
sleep(1);
break;
}//end switch
}//end while
return 0;
}//end main
|
the_stack_data/150140038.c
|
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
char* readline();
// Complete the factorial function below.
int factorial(int n) {
if( n== 1)
return 1;
return factorial(n-1)*n;
}
int main()
{
FILE* fptr = fopen(getenv("OUTPUT_PATH"), "w");
char* n_endptr;
char* n_str = readline();
int n = strtol(n_str, &n_endptr, 10);
if (n_endptr == n_str || *n_endptr != '\0') { exit(EXIT_FAILURE); }
int result = factorial(n);
fprintf(fptr, "%d\n", result);
fclose(fptr);
return 0;
}
char* readline() {
size_t alloc_length = 1024;
size_t data_length = 0;
char* data = malloc(alloc_length);
while (true) {
char* cursor = data + data_length;
char* line = fgets(cursor, alloc_length - data_length, stdin);
if (!line) { break; }
data_length += strlen(cursor);
if (data_length < alloc_length - 1 || data[data_length - 1] == '\n') { break; }
size_t new_length = alloc_length << 1;
data = realloc(data, new_length);
if (!data) { break; }
alloc_length = new_length;
}
if (data[data_length - 1] == '\n') {
data[data_length - 1] = '\0';
}
data = realloc(data, data_length);
return data;
}
|
the_stack_data/43886879.c
|
#include <stdio.h>
#include <unistd.h>
int main() {
printf("Host have %d logical cores.\n", sysconf(_SC_NPROCESSORS_ONLN));
fflush(0);
#pragma offload target(mic)
{
printf("Co-Processor have %ld logical cores.\n",
sysconf(_SC_NPROCESSORS_ONLN ));
fflush(0);
}
}
|
the_stack_data/179830953.c
|
// PARAM: --set ana.activated "['base','threadid','threadflag','escape','uninit','mallocWrapper']" --set exp.privatization none
typedef struct {
int i, j;
} S;
S some_function(){
S xx;
xx.i = 42;
return xx; //WARN
}
int main(){
S ss;
ss = some_function();
return ss.j; //NOWARN
}
|
the_stack_data/74816.c
|
/* Copyright (C) 1999, 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <stdio.h>
#include <stdlib.h>
#include <wctype.h>
int
main (int argc, char *argv[])
{
int result = 0;
wint_t ch;
for (ch = 0; ch < 128; ++ch)
{
if (iswlower (ch))
{
/* Get corresponding upper case character. */
wint_t up = towupper (ch);
/* This should have no effect. */
wint_t low = towlower (ch);
if ((ch != low) || (up == ch) || (up == low))
{
printf ("iswlower/towupper/towlower for character \\%x failed\n", ch);
result++;
}
}
if (iswupper (ch))
{
/* Get corresponding lower case character. */
wint_t low = towlower (ch);
/* This should have no effect. */
wint_t up = towupper (ch);
if ((ch != up) || (low == ch) || (up == low))
{
printf ("iswupper/towlower/towupper for character \\%x failed\n", ch);
result++;
}
}
}
/* Finally some specific tests. */
ch = L'A';
if (!iswupper (ch) || iswlower (ch))
{
printf ("!iswupper/iswlower (L'A') failed\n");
result++;
}
ch = L'a';
if (iswupper (ch) || !iswlower (ch))
{
printf ("iswupper/!iswlower (L'a') failed\n");
result++;
}
if (towlower (L'A') != L'a')
{
printf ("towlower(L'A') failed\n");
result++;
}
if (towupper (L'a') != L'A')
{
printf ("towupper(L'a') failed\n");
result++;
}
if (result == 0)
puts ("All test successful!");
return result != 0;
}
|
the_stack_data/159514627.c
|
/*
* Copyright 2004, 2005, 2006 PathScale, Inc. All Rights Reserved.
*/
/*
Copyright (C) 1999-2001, Silicon Graphics, Inc. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2.1 of the GNU Lesser General Public License
as published by the Free Software Foundation.
This program is distributed in the hope that it would be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Further, any
license provided herein, whether implied or otherwise, is limited to
this program in accordance with the express provisions of the
GNU Lesser General Public License.
Patent licenses, if any, provided herein do not apply to combinations
of this program with other product or programs, or any other product
whatsoever. This program is distributed without any warranty that the
program is delivered free of the rightful claim of any third person by
way of infringement or the like.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
/* $Header: /proj/osprey/CVS/open64/osprey1.0/libU77/ltime_.c,v 1.1.1.1 2005/10/21 19:00:00 marcel Exp $ */
/*
*
* return broken down time
*
* calling sequence:
* integer time, t[9]
* call ltime(time, t)
* where:
* time is a system time. (see time(3F))
* t will receive the broken down time corrected for local timezone.
* (see ctime(3))
*/
#include <time.h>
#ifdef KEY /* Bug 1683, 5019 */
#include "pathf90_libU_intrin.h"
void
pathf90_ltime(pathf90_i4 *clock, pathf90_i4 *t)
{
int i;
struct tm temp;
localtime_r((time_t *)clock, &temp);
t[0] = temp.tm_sec;
t[1] = temp.tm_min;
t[2] = temp.tm_hour;
t[3] = temp.tm_mday;
t[4] = temp.tm_mon;
t[5] = temp.tm_year;
t[6] = temp.tm_wday;
t[7] = temp.tm_yday;
t[8] = temp.tm_isdst;
}
#else
extern void
ltime_ (int *clock, int *t)
{
int i;
int *l;
l = (int *) localtime((time_t *)clock);
for (i=0; i<9; i++)
*t++ = *l++;
}
#endif /* KEY Bug 1683, 5019 */
|
the_stack_data/472664.c
|
double scilab_rt_sum_d0_(double in0)
{
return in0;
}
|
the_stack_data/87639161.c
|
#define alpha( i,j ) A[ (j)*ldA + i ] // map alpha( i,j ) to array A
#define beta( i,j ) B[ (j)*ldB + i ] // map beta( i,j ) to array B
#define gamma( i,j ) C[ (j)*ldC + i ] // map gamma( i,j ) to array C
void dgemv_( char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int * );
void MyGemm( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
int i_one = 1;
double d_one = 1.0;
for ( int i=0; i<m; i++ )
dgemv_( "Transpose", &k, &n, &d_one, B, &ldA, &alpha( i, 0 ), &ldA,
&d_one, &gamma( i,0 ), &ldC );
}
|
the_stack_data/68886842.c
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
//! -- Simple utility to extract the first projection (corresponding to primary+scatter image) from a set
//! of MC-GPU projections, such as a DBT dataset, and concatenate the projections in a single raw file.
//! The input files names must end with the projection number and extension as "_%04d.raw".
//
// Example execution to combine 25 DBT projections (from 0001 to 0025):
// $ time ./extract_projections.x 3000 1500 25 1 mcgpu_image_22183101_scattered
//
// [Andreu Badal, 2019/10/04]
//
// ***************************************************************************************
// * DISCLAIMER
// * This software and documentation (the "Software") were developed at the Food and
// * Drug Administration (FDA) by employees of the Federal Government in the course
// * of their official duties. Pursuant to Title 17, Section 105 of the United States
// * Code, this work is not subject to copyright protection and is in the public
// * domain. Permission is hereby granted, free of charge, to any person obtaining a
// * copy of the Software, to deal in the Software without restriction, including
// * without limitation the rights to use, copy, modify, merge, publish, distribute,
// * sublicense, or sell copies of the Software or derivatives, and to permit persons
// * to whom the Software is furnished to do so. FDA assumes no responsibility
// * whatsoever for use by other parties of the Software, its source code,
// * documentation or compiled executables, and makes no guarantees, expressed or
// * implied, about its quality, reliability, or any other characteristic. Further,
// * use of this code in no way implies endorsement by the FDA or confers any
// * advantage in regulatory decisions. Although this software can be redistributed
// * and/or modified freely, we ask that any derivative works bear some notice that
// * they are derived from it, and any modified versions bear some notice that they
// * have been modified.
// ***************************************************************************************
int main(int argc, char **argv)
{
printf("\n\n\n *** Extract first projection (primary+scatter) from a set of MC-GPU projections ***\n\n");
if (argc!=6)
{
printf("\n !!ERROR!! Input 5 parameters: pix_x pix_y num_projections 1stProjectionNumber (eg, 1) InputFileNameRoot (eg, mc-gpu_image.dat)\n\n");
return -1;
}
char input_name[250], file_binary[250];
int i, pix_x=atoi(argv[1]), pix_y=atoi(argv[2]), num_projections=atoi(argv[3]), firstProj=atoi(argv[4]);
strncpy(input_name, argv[5], 250);
int pixels_per_image = pix_x * pix_y;
float *data = (float*)malloc(pixels_per_image*sizeof(float)); // Allocate space for a complete image
printf(" - Input values: pix_x=%d, pix_y=%d --> pixels_per_image=%d ; num_projections=%d ; file name root=%s\n\n", pix_x, pix_y, pixels_per_image, num_projections, input_name);
strncpy(file_binary, input_name, 250);
sprintf(file_binary, "%s_%dx%dpixels_%dproj.raw", input_name, pix_x, pix_y, num_projections);
printf(" - Output binary file: \'%s\'\n\n", file_binary); //!!VERBOSE!!
FILE* file_binary_out = fopen(file_binary, "wb");
if (file_binary_out==NULL)
{
printf("\n\n !!fopen ERROR report_image!! Binary file %s can not be opened for writing!!\n", file_binary);
exit(-3);
}
// -- Iterate for each projection:
for(i=firstProj; i<(num_projections+firstProj); i++)
{
sprintf(file_binary, "%s_%04d.raw", input_name, i); // Create the output file name with the input name + projection number (4 digits, padding with 0)
printf(" - (%d) Reading file: \'%s\'...\n", i, file_binary); //!!VERBOSE!!
fflush(stdout); // Clear the screen output buffer
FILE* file_binary_in = fopen(file_binary, "rb"); // !!BINARY!!
if (file_binary_in==NULL)
{
printf("\n\n !!fopen ERROR report_image!! Binary file %s can not be opened for reading!!\n", file_binary);
exit(-3);
}
// -- Read and write data:
int count = fread(data, sizeof(float), pixels_per_image, file_binary_in);
if (count!=pixels_per_image)
{
printf("\n\n !!ERROR reading input images!! fread read %d elements, but image has %d pixels!?\n\n", count, pixels_per_image);
exit(-3);
}
else
fwrite(data, sizeof(float), pixels_per_image, file_binary_out);
fclose(file_binary_in);
}
fclose(file_binary_out);
free(data);
printf("\n\n");
return 0;
}
|
the_stack_data/34504.c
|
/**
* Tests for decrement operation (--), both postfix and prefix
*/
#include <stdio.h>
int main(int argc, char** argv)
{
int a = 5;
printf("a-- = %d. Expected: 5\n", a--);
printf("a = %d. Expected: 4\n", a);
printf("--a = %d. Expected: 3\n", --a);
a--;
--a;
printf("a = %d. Expected: 1\n\n", a);
float b = 5.5;
printf("b-- = %f. Expected: 5.5\n", b--);
printf("b = %f. Expected: 4.5\n", b);
printf("--b = %f. Expected: 3.5\n", --b);
b--;
--b;
printf("b = %f. Expected: 1.5\n", b);
return 0;
}
|
the_stack_data/13242.c
|
#include <limits.h>
void assert(int);
int abs(int x)
{
int rv;
if (x > 0)
rv = x;
else if (x == INT_MIN)
rv = INT_MAX;
else
rv = -x;
return rv;
}
int limit(int x)
{
int rv;
if (x > 0)
rv = x;
else
rv = 0;
return rv;
}
int test(int x)
{
assert(limit(x) >= 0);
assert(abs(x) >= 0);
assert(abs(x) % 6 >= 0);
return 0;
}
|
the_stack_data/97011649.c
|
//
// main.c
// hashTable
//
// Created by kaysen on 2018/5/22.
// Copyright © 2018年 kaysen. All rights reserved.
//
#include <stdio.h>
#include<stdlib.h>
#include <assert.h>
#define N 7
typedef struct HashNode{
int m_data;
struct HashNode *link;
} HashNode, *HashTable[N];
void init_htable(HashTable pht)
{
for(int i=0; i<N; ++i)
{
pht[i] = NULL;
}
}
int hash_maker(int key)
{
return key % N;
}
void insert_elem_back(HashTable pht, const int val)
{
int index = hash_maker(val);
printf("======= %d \n", index);
HashNode *new_node = (HashNode *) malloc(sizeof(HashNode));
assert(new_node != NULL);
new_node->m_data = val;
new_node->link = NULL;
if(pht[index] == NULL)
{
pht[index] = new_node;
}
else
{
HashNode *tmp = pht[index];
while(tmp->link != NULL)
tmp = tmp->link;
tmp->link = new_node;
}
}
void show_htable(HashTable pht)
{
for(int i=0; i<N; ++i){
printf("%d ", i);
HashNode *tmp = pht[i];
while(tmp != NULL){
printf("%d->", tmp->m_data);
tmp = tmp->link;
}
printf("NULL\n");
}
}
void clear_htable(HashNode* tmp)
{
while(tmp != NULL)
{
HashNode *next_tmp = tmp->link;
free(tmp);
tmp = tmp->link;
}
}
void destroy_htable(HashTable pht)
{
for(int i=0; i<N; ++i)
{
clear_htable(pht[i]);
}
}
int main(int argc, const char * argv[])
{
HashTable ht;
memset(ht, NULL, sizeof(ht));;
insert_elem_back(ht, 20); //插入元素
insert_elem_back(ht, 33); //插入元素
printf("*************************insert*****************************\n");
show_htable(ht);
//printf("*************************destroy*****************************\n");
//destroy_htable(ht);
//show_htable(ht);
return 0;
}
|
the_stack_data/6387099.c
|
#ifdef COMPILE_FOR_TEST
#include <assert.h>
#define assume(cond) assert(cond)
#endif
void main(int argc, char* argv[]) {
int x_0_0;//sh_buf.outcnt
int x_0_1;//sh_buf.outcnt
int x_0_2;//sh_buf.outcnt
int x_0_3;//sh_buf.outcnt
int x_0_4;//sh_buf.outcnt
int x_0_5;//sh_buf.outcnt
int x_1_0;//sh_buf.outbuf[0]
int x_1_1;//sh_buf.outbuf[0]
int x_2_0;//sh_buf.outbuf[1]
int x_2_1;//sh_buf.outbuf[1]
int x_3_0;//sh_buf.outbuf[2]
int x_3_1;//sh_buf.outbuf[2]
int x_4_0;//sh_buf.outbuf[3]
int x_4_1;//sh_buf.outbuf[3]
int x_5_0;//sh_buf.outbuf[4]
int x_5_1;//sh_buf.outbuf[4]
int x_6_0;//sh_buf.outbuf[5]
int x_7_0;//sh_buf.outbuf[6]
int x_8_0;//sh_buf.outbuf[7]
int x_9_0;//sh_buf.outbuf[8]
int x_10_0;//sh_buf.outbuf[9]
int x_11_0;//LOG_BUFSIZE
int x_11_1;//LOG_BUFSIZE
int x_12_0;//CREST_scheduler::lock_0
int x_13_0;//t3 T0
int x_14_0;//t2 T0
int x_15_0;//arg T0
int x_16_0;//functioncall::param T0
int x_16_1;//functioncall::param T0
int x_17_0;//buffered T0
int x_18_0;//functioncall::param T0
int x_18_1;//functioncall::param T0
int x_19_0;//functioncall::param T0
int x_19_1;//functioncall::param T0
int x_20_0;//functioncall::param T0
int x_20_1;//functioncall::param T0
int x_21_0;//functioncall::param T0
int x_21_1;//functioncall::param T0
int x_22_0;//direction T0
int x_23_0;//functioncall::param T0
int x_23_1;//functioncall::param T0
int x_24_0;//functioncall::param T0
int x_24_1;//functioncall::param T0
int x_25_0;//functioncall::param T0
int x_25_1;//functioncall::param T0
int x_26_0;//functioncall::param T0
int x_26_1;//functioncall::param T0
int x_27_0;//functioncall::param T0
int x_27_1;//functioncall::param T0
int x_28_0;//functioncall::param T0
int x_28_1;//functioncall::param T0
int x_29_0;//functioncall::param T0
int x_29_1;//functioncall::param T0
int x_30_0;//functioncall::param T0
int x_30_1;//functioncall::param T0
int x_31_0;//functioncall::param T0
int x_31_1;//functioncall::param T0
int x_32_0;//functioncall::param T0
int x_32_1;//functioncall::param T0
int x_33_0;//functioncall::param T0
int x_33_1;//functioncall::param T0
int x_34_0;//functioncall::param T0
int x_34_1;//functioncall::param T0
int x_35_0;//functioncall::param T1
int x_35_1;//functioncall::param T1
int x_36_0;//functioncall::param T1
int x_36_1;//functioncall::param T1
int x_37_0;//i T1
int x_37_1;//i T1
int x_37_2;//i T1
int x_38_0;//rv T1
int x_39_0;//functioncall::param T1
int x_39_1;//functioncall::param T1
int x_40_0;//functioncall::param T1
int x_40_1;//functioncall::param T1
int x_41_0;//functioncall::param T1
int x_41_1;//functioncall::param T1
int x_42_0;//functioncall::param T1
int x_42_1;//functioncall::param T1
int x_43_0;//functioncall::param T2
int x_43_1;//functioncall::param T2
int x_44_0;//functioncall::param T2
int x_44_1;//functioncall::param T2
int x_45_0;//i T2
int x_45_1;//i T2
int x_45_2;//i T2
int x_45_3;//i T2
int x_46_0;//rv T2
int x_47_0;//rv T2
int x_47_1;//rv T2
int x_48_0;//functioncall::param T2
int x_48_1;//functioncall::param T2
int x_49_0;//functioncall::param T2
int x_49_1;//functioncall::param T2
int x_50_0;//functioncall::param T2
int x_50_1;//functioncall::param T2
int x_51_0;//functioncall::param T2
int x_51_1;//functioncall::param T2
int x_52_0;//functioncall::param T2
int x_52_1;//functioncall::param T2
int x_53_0;//functioncall::param T2
int x_53_1;//functioncall::param T2
int x_53_2;//functioncall::param T2
int x_54_0;//functioncall::param T2
int x_54_1;//functioncall::param T2
int x_55_0;//functioncall::param T2
int x_55_1;//functioncall::param T2
int x_56_0;//functioncall::param T2
int x_56_1;//functioncall::param T2
T_0_0_0: x_0_0 = 0;
T_0_1_0: x_1_0 = 0;
T_0_2_0: x_2_0 = 0;
T_0_3_0: x_3_0 = 0;
T_0_4_0: x_4_0 = 0;
T_0_5_0: x_5_0 = 0;
T_0_6_0: x_6_0 = 0;
T_0_7_0: x_7_0 = 0;
T_0_8_0: x_8_0 = 0;
T_0_9_0: x_9_0 = 0;
T_0_10_0: x_10_0 = 0;
T_0_11_0: x_11_0 = 0;
T_0_12_0: x_13_0 = 522322736;
T_0_13_0: x_14_0 = 3695858272;
T_0_14_0: x_15_0 = 0;
T_0_15_0: x_16_0 = 149250103;
T_0_16_0: x_16_1 = -1;
T_0_17_0: x_17_0 = 0;
T_0_18_0: x_18_0 = 1604443681;
T_0_19_0: x_18_1 = x_17_0;
T_0_20_0: x_19_0 = 675035457;
T_0_21_0: x_19_1 = 97;
T_0_22_0: x_20_0 = 1955052338;
T_0_23_0: x_20_1 = 0;
T_0_24_0: x_21_0 = 1019677814;
T_0_25_0: x_21_1 = 0;
T_0_26_0: x_22_0 = -599113664;
T_0_27_0: x_23_0 = 377048178;
T_0_28_0: x_23_1 = x_22_0;
T_0_29_0: x_24_0 = 2088852946;
T_0_30_0: x_24_1 = 0;
T_0_31_0: x_12_0 = -1;
T_0_32_0: x_0_1 = 5;
T_0_33_0: x_1_1 = 72;
T_0_34_0: x_2_1 = 69;
T_0_35_0: x_3_1 = 76;
T_0_36_0: x_4_1 = 76;
T_0_37_0: x_5_1 = 79;
T_0_38_0: x_25_0 = 283668096;
T_0_39_0: x_25_1 = 83;
T_0_40_0: x_26_0 = 1311981546;
T_0_41_0: x_26_1 = 1;
T_0_42_0: x_27_0 = 1682486852;
T_0_43_0: x_27_1 = 1;
T_0_44_0: x_28_0 = 2146718806;
T_0_45_0: x_28_1 = 1;
T_0_46_0: x_29_0 = 1015615132;
T_0_47_0: x_29_1 = 82;
T_0_48_0: x_30_0 = 1394855769;
T_0_49_0: x_30_1 = 90;
T_0_50_0: x_31_0 = 377593283;
T_0_51_0: x_31_1 = 1;
T_0_52_0: x_32_0 = 602671715;
T_0_53_0: x_32_1 = 1;
T_0_54_0: x_33_0 = 1745806499;
T_0_55_0: x_33_1 = 2;
T_0_56_0: x_34_0 = 1620175684;
T_0_57_0: x_34_1 = 2;
T_0_58_0: x_11_1 = 5;
T_2_59_2: x_43_0 = 1915311126;
T_2_60_2: x_43_1 = x_33_1;
T_2_61_2: x_44_0 = 1960753435;
T_2_62_2: x_44_1 = x_34_1;
T_2_63_2: x_45_0 = 0;
T_2_64_2: x_46_0 = 548450817;
T_1_65_1: x_35_0 = 1069601795;
T_1_66_1: x_35_1 = x_27_1;
T_1_67_1: x_36_0 = 1495679640;
T_1_68_1: x_36_1 = x_28_1;
T_1_69_1: x_37_0 = 0;
T_1_70_1: x_38_0 = 550552065;
T_2_71_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_47_0 = -592046160;
T_2_72_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0 && x_18_1 == 0 && x_18_1 == 0) x_48_0 = 277551002;
T_2_73_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0 && x_18_1 == 0 && x_18_1 == 0) x_48_1 = -1;
T_2_74_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0 && x_18_1 == 0 && x_18_1 == 0) x_47_1 = x_48_1;
T_2_75_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0 && x_18_1 == 0 && x_47_1 + 1 == 0) x_0_2 = 0;
T_2_76_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_49_0 = 434964480;
T_2_77_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_49_1 = 9;
T_2_78_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_50_0 = 68883549;
T_2_79_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_50_1 = x_49_1;
T_2_80_2: if (x_0_1 + x_44_1 > x_11_1 && x_0_1 != 0) x_0_3 = 0;
T_2_81_2: if (x_44_1 < x_11_1) x_51_0 = 518966433;
T_2_82_2: if (x_44_1 < x_11_1) x_51_1 = 47901221803776;
T_1_83_1: if (x_36_1 < x_11_1) x_39_0 = 1345006528;
T_1_84_1: if (x_36_1 < x_11_1) x_39_1 = 47901219702528;
T_1_85_1: if (x_36_1 < x_11_1) x_40_0 = 1491893920;
T_1_86_1: if (x_36_1 < x_11_1) x_40_1 = x_0_3 + x_36_1;
T_1_87_1: if (x_36_1 < x_11_1) x_37_1 = 0;
T_1_88_1: if (x_36_1 < x_11_1 && x_37_1 < x_35_1) x_41_0 = 1378082470;
T_1_89_1: if (x_36_1 < x_11_1 && x_37_1 < x_35_1) x_41_1 = 47901219702528;
T_1_90_1: if (x_36_1 < x_11_1) x_37_2 = 1 + x_37_1;
T_1_91_1: if (x_36_1 < x_11_1) x_42_0 = 2103039253;
T_1_92_1: if (x_36_1 < x_11_1) x_42_1 = 47901219702528;
T_1_93_1: if (x_36_1 < x_11_1) x_0_4 = x_0_3 + x_36_1;
T_1_94_1: if (x_44_1 < x_11_1) x_52_0 = 2143487448;
T_1_95_1: if (x_44_1 < x_11_1) x_52_1 = x_0_3 + x_44_1;
T_2_96_2: if (x_44_1 < x_11_1) x_45_1 = 0;
T_2_97_2: if (x_44_1 < x_11_1 && x_45_1 < x_43_1) x_53_0 = 205982934;
T_2_98_2: if (x_44_1 < x_11_1 && x_45_1 < x_43_1) x_53_1 = 47901221803776;
T_2_99_2: if (x_44_1 < x_11_1) x_45_2 = 1 + x_45_1;
T_2_100_2: if (x_44_1 < x_11_1 && x_45_2 < x_43_1) x_53_2 = 47901221803776;
T_2_101_2: if (x_44_1 < x_11_1) x_45_3 = 1 + x_45_2;
T_2_102_2: if (x_44_1 < x_11_1) x_54_0 = 104805709;
T_2_103_2: if (x_44_1 < x_11_1) x_54_1 = 47901221803776;
T_2_104_2: if (x_44_1 < x_11_1) x_0_5 = x_0_4 + x_44_1;
T_2_105_2: if (x_44_1 < x_11_1) x_55_0 = 1600447481;
T_2_106_2: if (x_44_1 < x_11_1) x_55_1 = 47901221803776;
T_2_107_2: if (x_44_1 < x_11_1) x_56_0 = 881018391;
T_2_108_2: if (x_44_1 < x_11_1) x_56_1 = 47901221803776;
T_2_109_2: if (x_44_1 < x_11_1) assert(x_0_5 == x_52_1);
}
|
the_stack_data/231392727.c
|
/*
* $Id$
* Copyright (C) 2004-2006, Parrot Foundation.
*/
/*
=head1 NAME
config/gen/platform/generic/dl.c
=head1 DESCRIPTION
Dynlib stuff
=head2 Functions
=over 4
=cut
*/
#ifdef PARROT_HAS_HEADER_DLFCN
# include <dlfcn.h>
#endif
#define PARROT_DLOPEN_FLAGS RTLD_LAZY
/*
=item C<void * Parrot_dlopen(const char *filename)>
=cut
*/
void *
Parrot_dlopen(const char *filename)
{
#ifdef PARROT_HAS_HEADER_DLFCN
return dlopen(filename, PARROT_DLOPEN_FLAGS);
#else
return 0;
#endif
}
/*
=item C<const char * Parrot_dlerror(void)>
=cut
*/
const char *
Parrot_dlerror(void)
{
#ifdef PARROT_HAS_HEADER_DLFCN
return dlerror();
#else
return 0;
#endif
}
/*
=item C<void * Parrot_dlsym(void *handle, const char *symbol)>
=cut
*/
void *
Parrot_dlsym(void *handle, const char *symbol)
{
#ifdef PARROT_HAS_HEADER_DLFCN
return dlsym(handle, symbol);
#else
return 0;
#endif
}
/*
=item C<int Parrot_dlclose(void *handle)>
=cut
*/
int
Parrot_dlclose(void *handle)
{
#ifdef PARROT_HAS_HEADER_DLFCN
return dlclose(handle);
#else
return -1;
#endif
}
/*
=back
=cut
*/
/*
* Local variables:
* c-file-style: "parrot"
* End:
* vim: expandtab shiftwidth=4:
*/
|
the_stack_data/165766861.c
|
// BUG: sleeping function called from invalid context in __alloc_pages_nodemask
// https://syzkaller.appspot.com/bug?id=4687097eb7fd73a84a745c0356f24f7e9e500b88
// status:open
// autogenerated by syzkaller (https://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <net/if.h>
#include <netinet/in.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <sched.h>
#include <linux/genetlink.h>
#include <linux/if_addr.h>
#include <linux/if_link.h>
#include <linux/in6.h>
#include <linux/neighbour.h>
#include <linux/net.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/veth.h>
static void sleep_ms(uint64_t ms)
{
usleep(ms * 1000);
}
static uint64_t current_time_ms(void)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
exit(1);
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static bool write_file(const char* file, const char* what, ...)
{
char buf[1024];
va_list args;
va_start(args, what);
vsnprintf(buf, sizeof(buf), what, args);
va_end(args);
buf[sizeof(buf) - 1] = 0;
int len = strlen(buf);
int fd = open(file, O_WRONLY | O_CLOEXEC);
if (fd == -1)
return false;
if (write(fd, buf, len) != len) {
int err = errno;
close(fd);
errno = err;
return false;
}
close(fd);
return true;
}
struct nlmsg {
char* pos;
int nesting;
struct nlattr* nested[8];
char buf[1024];
};
static struct nlmsg nlmsg;
static void netlink_init(struct nlmsg* nlmsg, int typ, int flags,
const void* data, int size)
{
memset(nlmsg, 0, sizeof(*nlmsg));
struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf;
hdr->nlmsg_type = typ;
hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
memcpy(hdr + 1, data, size);
nlmsg->pos = (char*)(hdr + 1) + NLMSG_ALIGN(size);
}
static void netlink_attr(struct nlmsg* nlmsg, int typ, const void* data,
int size)
{
struct nlattr* attr = (struct nlattr*)nlmsg->pos;
attr->nla_len = sizeof(*attr) + size;
attr->nla_type = typ;
memcpy(attr + 1, data, size);
nlmsg->pos += NLMSG_ALIGN(attr->nla_len);
}
static int netlink_send_ext(struct nlmsg* nlmsg, int sock, uint16_t reply_type,
int* reply_len)
{
if (nlmsg->pos > nlmsg->buf + sizeof(nlmsg->buf) || nlmsg->nesting)
exit(1);
struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf;
hdr->nlmsg_len = nlmsg->pos - nlmsg->buf;
struct sockaddr_nl addr;
memset(&addr, 0, sizeof(addr));
addr.nl_family = AF_NETLINK;
unsigned n = sendto(sock, nlmsg->buf, hdr->nlmsg_len, 0,
(struct sockaddr*)&addr, sizeof(addr));
if (n != hdr->nlmsg_len)
exit(1);
n = recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0);
if (hdr->nlmsg_type == NLMSG_DONE) {
*reply_len = 0;
return 0;
}
if (n < sizeof(struct nlmsghdr))
exit(1);
if (reply_len && hdr->nlmsg_type == reply_type) {
*reply_len = n;
return 0;
}
if (n < sizeof(struct nlmsghdr) + sizeof(struct nlmsgerr))
exit(1);
if (hdr->nlmsg_type != NLMSG_ERROR)
exit(1);
return -((struct nlmsgerr*)(hdr + 1))->error;
}
static int netlink_send(struct nlmsg* nlmsg, int sock)
{
return netlink_send_ext(nlmsg, sock, 0, NULL);
}
static int netlink_next_msg(struct nlmsg* nlmsg, unsigned int offset,
unsigned int total_len)
{
struct nlmsghdr* hdr = (struct nlmsghdr*)(nlmsg->buf + offset);
if (offset == total_len || offset + hdr->nlmsg_len > total_len)
return -1;
return hdr->nlmsg_len;
}
static void netlink_device_change(struct nlmsg* nlmsg, int sock,
const char* name, bool up, const char* master,
const void* mac, int macsize,
const char* new_name)
{
struct ifinfomsg hdr;
memset(&hdr, 0, sizeof(hdr));
if (up)
hdr.ifi_flags = hdr.ifi_change = IFF_UP;
hdr.ifi_index = if_nametoindex(name);
netlink_init(nlmsg, RTM_NEWLINK, 0, &hdr, sizeof(hdr));
if (new_name)
netlink_attr(nlmsg, IFLA_IFNAME, new_name, strlen(new_name));
if (master) {
int ifindex = if_nametoindex(master);
netlink_attr(nlmsg, IFLA_MASTER, &ifindex, sizeof(ifindex));
}
if (macsize)
netlink_attr(nlmsg, IFLA_ADDRESS, mac, macsize);
int err = netlink_send(nlmsg, sock);
(void)err;
}
const int kInitNetNsFd = 239;
#define DEVLINK_FAMILY_NAME "devlink"
#define DEVLINK_CMD_PORT_GET 5
#define DEVLINK_CMD_RELOAD 37
#define DEVLINK_ATTR_BUS_NAME 1
#define DEVLINK_ATTR_DEV_NAME 2
#define DEVLINK_ATTR_NETDEV_NAME 7
#define DEVLINK_ATTR_NETNS_FD 138
static int netlink_devlink_id_get(struct nlmsg* nlmsg, int sock)
{
struct genlmsghdr genlhdr;
struct nlattr* attr;
int err, n;
uint16_t id = 0;
memset(&genlhdr, 0, sizeof(genlhdr));
genlhdr.cmd = CTRL_CMD_GETFAMILY;
netlink_init(nlmsg, GENL_ID_CTRL, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(nlmsg, CTRL_ATTR_FAMILY_NAME, DEVLINK_FAMILY_NAME,
strlen(DEVLINK_FAMILY_NAME) + 1);
err = netlink_send_ext(nlmsg, sock, GENL_ID_CTRL, &n);
if (err) {
return -1;
}
attr = (struct nlattr*)(nlmsg->buf + NLMSG_HDRLEN +
NLMSG_ALIGN(sizeof(genlhdr)));
for (; (char*)attr < nlmsg->buf + n;
attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) {
if (attr->nla_type == CTRL_ATTR_FAMILY_ID) {
id = *(uint16_t*)(attr + 1);
break;
}
}
if (!id) {
return -1;
}
recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0); /* recv ack */
return id;
}
static void netlink_devlink_netns_move(const char* bus_name,
const char* dev_name, int netns_fd)
{
struct genlmsghdr genlhdr;
int sock;
int id, err;
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (sock == -1)
exit(1);
id = netlink_devlink_id_get(&nlmsg, sock);
if (id == -1)
goto error;
memset(&genlhdr, 0, sizeof(genlhdr));
genlhdr.cmd = DEVLINK_CMD_RELOAD;
netlink_init(&nlmsg, id, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, DEVLINK_ATTR_BUS_NAME, bus_name, strlen(bus_name) + 1);
netlink_attr(&nlmsg, DEVLINK_ATTR_DEV_NAME, dev_name, strlen(dev_name) + 1);
netlink_attr(&nlmsg, DEVLINK_ATTR_NETNS_FD, &netns_fd, sizeof(netns_fd));
err = netlink_send(&nlmsg, sock);
if (err) {
}
error:
close(sock);
}
static struct nlmsg nlmsg2;
static void initialize_devlink_ports(const char* bus_name, const char* dev_name,
const char* netdev_prefix)
{
struct genlmsghdr genlhdr;
int len, total_len, id, err, offset;
uint16_t netdev_index;
int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (sock == -1)
exit(1);
int rtsock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (rtsock == -1)
exit(1);
id = netlink_devlink_id_get(&nlmsg, sock);
if (id == -1)
goto error;
memset(&genlhdr, 0, sizeof(genlhdr));
genlhdr.cmd = DEVLINK_CMD_PORT_GET;
netlink_init(&nlmsg, id, NLM_F_DUMP, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, DEVLINK_ATTR_BUS_NAME, bus_name, strlen(bus_name) + 1);
netlink_attr(&nlmsg, DEVLINK_ATTR_DEV_NAME, dev_name, strlen(dev_name) + 1);
err = netlink_send_ext(&nlmsg, sock, id, &total_len);
if (err) {
goto error;
}
offset = 0;
netdev_index = 0;
while ((len = netlink_next_msg(&nlmsg, offset, total_len)) != -1) {
struct nlattr* attr = (struct nlattr*)(nlmsg.buf + offset + NLMSG_HDRLEN +
NLMSG_ALIGN(sizeof(genlhdr)));
for (; (char*)attr < nlmsg.buf + offset + len;
attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) {
if (attr->nla_type == DEVLINK_ATTR_NETDEV_NAME) {
char* port_name;
char netdev_name[IFNAMSIZ];
port_name = (char*)(attr + 1);
snprintf(netdev_name, sizeof(netdev_name), "%s%d", netdev_prefix,
netdev_index);
netlink_device_change(&nlmsg2, rtsock, port_name, true, 0, 0, 0,
netdev_name);
break;
}
}
offset += len;
netdev_index++;
}
error:
close(rtsock);
close(sock);
}
static void initialize_devlink_pci(void)
{
int netns = open("/proc/self/ns/net", O_RDONLY);
if (netns == -1)
exit(1);
int ret = setns(kInitNetNsFd, 0);
if (ret == -1)
exit(1);
netlink_devlink_netns_move("pci", "0000:00:10.0", netns);
ret = setns(netns, 0);
if (ret == -1)
exit(1);
close(netns);
initialize_devlink_ports("pci", "0000:00:10.0", "netpci");
}
static void kill_and_wait(int pid, int* status)
{
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
int i;
for (i = 0; i < 100; i++) {
if (waitpid(-1, status, WNOHANG | __WALL) == pid)
return;
usleep(1000);
}
DIR* dir = opendir("/sys/fs/fuse/connections");
if (dir) {
for (;;) {
struct dirent* ent = readdir(dir);
if (!ent)
break;
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
continue;
char abort[300];
snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
ent->d_name);
int fd = open(abort, O_WRONLY);
if (fd == -1) {
continue;
}
if (write(fd, abort, 1) < 0) {
}
close(fd);
}
closedir(dir);
} else {
}
while (waitpid(-1, status, __WALL) != pid) {
}
}
static void setup_test()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
write_file("/proc/self/oom_score_adj", "1000");
}
static void execute_one(void);
#define WAIT_FLAGS __WALL
static void loop(void)
{
int iter;
for (iter = 0;; iter++) {
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
setup_test();
execute_one();
exit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
break;
sleep_ms(1);
if (current_time_ms() - start < 5 * 1000)
continue;
kill_and_wait(pid, &status);
break;
}
}
}
void execute_one(void)
{
syscall(__NR_unshare, 0x40000000ul);
}
int main(void)
{
syscall(__NR_mmap, 0x20000000ul, 0x1000000ul, 3ul, 0x32ul, -1, 0);
loop();
return 0;
}
|
the_stack_data/345765.c
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define TAM 10 //tamanho maximo do vetor
void bubble_sort_int(int tamanho, int *);
void exibe_vetor(int tamanho, int *);
void preenche_vetor_random_int(int tamanho, int maior, int vet[]);
int aleatorio(int);
void troca(int array[], int i, int j);
//void preenche_vetor(int tamanho, int vet[])
/*
** A funcao BubbleSort rearranja o vetor v[0..n-1]
** em ordem crescente
** Autor: Ed em 10/04/2013 - alterado em 02/03/2019
*/
/*void bubble_sort_int(int tamanho, int vetor[]){
int i, houvetroca, temp, fim = tamanho;
do{
houvetroca = 0;
for(i=0; i<fim-1;i++){
if(vetor[i]>vetor[i+1]){
temp = vetor[i];
vetor[i] = vetor[i+1];
vetor[i+1] = temp;
houvetroca = 1;
} exibe_vetor(tamanho, vetor); printf("\n"); getchar();
}
}while(houvetroca!=0);
}*/
void bubble_sort_int(int tamanho, int vetor[]){
int i, j;
for (i=1; i < tamanho;i++){
printf("\n[%d] ", i);
for(j=0;j<tamanho-1; j++){
printf("%d, ", j);
if(vetor[j]>vetor[j+1]){
troca(vetor, j, j+1);
}
}
}
}
void troca(int array[], int i, int j){
int aux;
aux = array[i];
array[i] = array[j];
array[j] = aux;
}
int main(){
srand(time(NULL));
int vetor[TAM]; //declara o vetor
preenche_vetor_random_int(TAM, 100, vetor);
printf("\nO vetor digitado e'\n");
exibe_vetor(TAM, vetor);
getchar();
bubble_sort_int(TAM, vetor);
printf("\n\nO vetor ordenado em ordem crescente e'\n");
exibe_vetor(TAM, vetor); getchar();
/*ordena_bolha_desc_int(TAM, vetor);
printf("\n\nO vetor ordenado em ordem decrescente eh\n");
exibe_vetor(TAM, vetor);getchar();*/
printf("\n\n");
getchar();
return 0;
}
void preenche_vetor(int tamanho, int vet[]){ // Preenche o vetor
int i;
for (i=0;i<tamanho;++i){
printf("\nDigite o elemento %d do vetor: ", i);
scanf("%d", &vet[i]);
}
}
void exibe_vetor(int tamanho, int v[]){ //Exibe
int t;
for (t=0;t<tamanho;t++)
printf("%-4d ", v[t]);
}
void preenche_vetor_random_int(int tamanho, int maior, int vet[]){
/* A funcao preenche_vetor_random_int preenche o vetor com valores aleatorios
os parametros sao <tamanho> que e' o tamanho do vetor, <maior> o maior valor gerado
e <vet> que e' o proprio vetor.
*/
int i, valor; // Preenche com valores randomicos melhor
for (i=0;i<tamanho;++i){
valor = (1 + aleatorio(maior-1)); //gera ate maior
vet[i]=valor;
}
}
int aleatorio(int n){ //funcao para gerar aleatorios inteiros
return rand() % n;
}
|
the_stack_data/122271.c
|
#include <stdio.h>
float potencia(int base, int expoente);
/**
* @author: David Gomesh
*
* A Funcao recebe a base e o expoente para calcular a potencia.
*
* Se o expoente for positivo, multplica a base pela propria base
* pela quantidade de vezes que o expoente determinar.
*
* Se o expoente for negativo, multica 1/base por 1/base
* pela quantidade de vezes que o expoente determinar.
* Para isso o expoente eh mulplicado por -1, para se tornar positivo.
*
*/
int main(){
int base, expoente;
printf("Base: ");
scanf("%i", &base);
printf("Expoente: ");
scanf("%i", &expoente);
printf("Potencia: %.6f\n", potencia(base, expoente));
return 0;
}
float potencia(int base, int expoente){
//Resultado a ser retornado
float resultado = 1;
//Controla se expoente eh positivo ou negativo
int expoentePositivo;
if(expoente > 0){
expoentePositivo = 1;
}
else{
expoentePositivo = 0;
//Torna o expoente positivo caso seja negativo
expoente *= -1;
}
//O expoente controla quantas vezes sera repetido, por isso precisa ser positivo
for(int i=0; i<expoente; i++){
//Se o expoente for positivo, multiplica a base pelo resultado atual
if(expoentePositivo){
resultado *= base;
}
//Se o expoente for negativo, o resultado eh multiplicado por (1/base)
else{
resultado *= ((float)1 / base);
}
}
return resultado;
}
|
the_stack_data/105537.c
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <limits.h>
//define global variables
int max_sum;
int max;
int get_maximum_increasing_subsequence_sum(int *array, int size){
int dp[100000]={0}; //initialize array to 0, for memoization
int i,j;
if(size==1){ //if size 1, return that array
max = max_sum = array[0];
return 0;
}
dp[0]=array[0];
max_sum = array[0];
max = array[0];
for(i=1;i<size;i++){ //iterate through array
dp[i]=array[i];
if(dp[i-1]+array[i]>dp[i]){
dp[i] = dp[i-1]+array[i];
}
if(max<dp[i]){ //determine whther to modify the value or not
max = dp[i];
}
if(array[i]>0){
max_sum += array[i];
}
}
if(max_sum>0 && array[0]<0){
max_sum -= array[0];
}
return 0;
}
int main(void)
{
int array[100000]={0};
int t,n,i;
scanf("%d",&t);
while(t>0){
scanf("%d",&n);
i=0;
while(i<n){
scanf("%d",&array[i]);
i++;
}
get_maximum_increasing_subsequence_sum(array,n);
printf("%d %d\n",max, max_sum);
t--;
}
return 0;
}
|
the_stack_data/162644271.c
|
// No nvm module functions.
|
the_stack_data/107864.c
|
/* ,*/
#include <time.h>
#include/* _ ,o*/ <stdlib.h>
#define c(C)/* - . */return ( C); /* 2004*/
#include <stdio.h>/*. Moekan "' `\b-' */
typedef/* */char p;p* u ,w [9
][128] ,*v;typedef int _;_ R,i,N,I,A ,m,o,e
[9], a[256],k [9], n[ 256];FILE*f ;_ x (_ K,_ r
,_ q){; for(; r< q ; K =((
0xffffff) &(K>>8))^ n[255 & ( K
^u[0 + r ++ ] )]);c (K
)} _ E (p*r, p*q ){ c( f =
fopen (r ,q))}_ B(_ q){c( fseek (f, 0
,q))}_ D(){c( fclose(f ))}_ C( p *q){c( 0- puts(q ) )}_/* /
*/main(_ t,p**z){if(t<4)c( C("<in" "file>" "\40<l" "a" "yout> "
/*b9213272*/"<outfile>" ) )u=0;i=I=(E(z[1],"rb")) ?B(2)?0 : (((o =ftell
(f))>=8)?(u =(p*)malloc(o))?B(0)?0:!fread(u,o,1,f):0:0)?0: D():0 ;if(
!u)c(C(" bad\40input "));if(E(z[2],"rb" )){for(N=-1;256> i;n[i++] =-1 )a[
i]=0; for(i=I=0; i<o&&(R =fgetc( f))>-1;i++)++a[R] ?(R==N)?( ++I>7)?(n[
N]+1 )?0:(n [N ]=i-7):0: (N=R) |(I=1):0;A =-1;N=o+1;for(i=33;i<127;i++
)( n[i ]+ 1&&N>a[i])? N= a [A=i] :0;B(i=I=0);if(A+1)for(N=n[A];
I< 8&& (R =fgetc(f ))> -1&& i <o ;i++)(i<N||i>N+7)?(R==A)?((*w[I
] =u [i])?1:(*w[I]= 46))?(a [I++]=i):0:0:0;D();}if(I<1)c(C(
" bad\40la" "yout "))for(i =0;256>(R= i);n[i++]=R)for(A=8;
A >0;A --) R = ( (R&1)==0) ?(unsigned int)R>>(01):((unsigned
/*kero Q' ,KSS */)R>> 1)^ 0xedb88320;m=a[I-1];a[I
]=(m <N)?(m= N+8): ++ m;for(i=00;i<I;e[i++]=0){
v=w [i]+1;for(R =33;127 >R;R++)if(R-47&&R-92
&& R-(_)* w[i])*( v++)= (p)R;*v=0;}for(sprintf
/*'_ G*/ (*w+1, "%0" "8x",x(R=time(i=0),m,o)^~
0) ;i< 8;++ i)u [N+ i]=*(*w+i+1);for(*k=x(~
0,i=0 ,*a);i>- 1; ){for (A=i;A<I;A++){u[+a [ A]
]=w[A ][e[A]] ; k [A+1]=x (k[A],a[A],a[A+1]
);}if (R==k[I]) c( (E(z[3 ],"wb+"))?fwrite(
/* */ u,o,1,f)?D ()|C(" \n OK."):0 :C(
" \n WriteError" )) for (i =+I-
1 ;i >-1?!w[i][++ e[+ i]]:0;
) for( A=+i--; A<I;e[A++]
=0); (i <I-4 )?putchar
((_ ) 46) | fflush
/*' ,*/ ( stdout
): 0& 0;}c(C
(" \n fail")
) /* dP' /
dP pd '
' zc
*/
}
|
the_stack_data/41359.c
|
#include <stdio.h>
int main(void)
{
int x = 10, y;
int *p, *q;
p = &x;
q = &y;
y = *p;
*p = 15;
*q = 20;
printf("Value of x: %d\n", x); // 15
printf("Value of y: %d\n", y); // 20
printf("Value of *p: %d\n", *p); // 15
printf("Value of *q: %d\n", *q); // 20
}
|
the_stack_data/165764532.c
|
#include<stdio.h>
#include<stdlib.h>
struct node{
int no;
struct node *next;
};
int main(){
struct node *head=(struct node*)malloc(sizeof(struct node));
head->next=NULL;
struct node *go=head;
int ascii10=0;
char ans;
int counter=1;
while(1){
printf("Enter value to the node :\n");
printf("(%d). ",counter);
scanf("%d",&go->no);
go->next=(struct node*)malloc(sizeof(struct node));
go=go->next;
go->next==NULL;
printf("a nother node?(Y/y/N/n):");
scanf(" %c",&ans);
printf("%c",ans);
while(1){
if((ans=='Y')||(ans=='y')||(ans=='N')||(ans=='n')){
break;
}else{
printf("Wrong letter : ");
scanf(" %c",&ans);
}
}
if((ans=='N')||(ans=='n')){
break;
}
counter++;
}
printf("The values in the linked list are :");
go=head;
while(go->next!=NULL){
printf("%d ",go->no);
go=go->next;
}
printf("\n");
return 0;
}
|
the_stack_data/1071848.c
|
// 20) Preencher um vetor com os numeros 10 a 20, e depois mostrar o vetor.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
int main(){
int i, vetor[11], x=10;
for(i=0; i < 11; i++){
vetor[i]= x;
x++;
}
for(i = 0; i < 11; i++)
printf("%d\t",vetor[i]);
printf("\n");
system("pause");
return(0);
}
|
the_stack_data/1018182.c
|
/* -*- mode: C; c-basic-offset: 3; -*- */
/*--------------------------------------------------------------------*/
/*--- Read DWARF1/2/3/4 debug info. readdwarf.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2000-2015 Julian Seward
[email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
#include "pub_core_basics.h"
#include "pub_core_debuginfo.h"
#include "pub_core_libcbase.h"
#include "pub_core_libcassert.h"
#include "pub_core_libcprint.h"
#include "pub_core_options.h"
#include "pub_core_xarray.h"
#include "pub_core_tooliface.h" /* VG_(needs) */
#include "priv_misc.h" /* dinfo_zalloc/free/strdup */
#include "priv_image.h"
#include "priv_d3basics.h"
#include "priv_tytypes.h"
#include "priv_storage.h"
#include "priv_readdwarf.h" /* self */
/*------------------------------------------------------------*/
/*--- ---*/
/*--- Read line number and CFI info from DWARF1, DWARF2 ---*/
/*--- and to some extent DWARF3 sections. ---*/
/*--- ---*/
/*------------------------------------------------------------*/
/* The below "safe_*ix" functions allow to resist to malformed dwarf info:
if dwarf info contains wrong file or dirname indexes, these are (silently!)
ignored. */
/* if xa_ix is a valid index in fndn_ix_xa,
return the element (i.e. the UInt indexing in fndnpool).
If xa_ix is invalid, return 0 (i.e. the "null" element in fndnpool). */
static UInt safe_fndn_ix (XArray* fndn_ix_xa, Int xa_ix)
{
if (xa_ix < 0) return 0;
if (xa_ix >= VG_(sizeXA) (fndn_ix_xa)) return 0;
return *(UInt*)VG_(indexXA) ( fndn_ix_xa, xa_ix );
}
/* if xa_ix is a valid index in dirname_xa,
return the element (i.e. the HChar*).
If xa_ix is invalid, return NULL. */
static const HChar* safe_dirname_ix (XArray* dirname_xa, Int xa_ix)
{
if (xa_ix < 0) return NULL;
if (xa_ix >= VG_(sizeXA) (dirname_xa)) return NULL;
return *(HChar**)VG_(indexXA) ( dirname_xa, xa_ix );
}
/*------------------------------------------------------------*/
/*--- Read DWARF2 format line number info. ---*/
/*------------------------------------------------------------*/
/* Structure holding info extracted from the a .debug_line
section. */
typedef struct
{
ULong li_length;
UShort li_version;
ULong li_header_length;
UChar li_min_insn_length;
UChar li_max_ops_per_insn;
UChar li_default_is_stmt;
Int li_line_base;
UChar li_line_range;
UChar li_opcode_base;
}
DebugLineInfo;
/* Structure holding additional infos found from a .debug_info
* compilation unit block */
typedef struct
{
/* Feel free to add more members here if you need ! */
DiCursor compdir; /* Compilation directory - points to .debug_info */
DiCursor name; /* Main file name - points to .debug_info */
ULong stmt_list; /* Offset in .debug_line */
Bool dw64; /* 64-bit Dwarf? */
}
UnitInfo;
/* Line number opcodes. */
enum dwarf_line_number_ops
{
DW_LNS_extended_op = 0,
DW_LNS_copy = 1,
DW_LNS_advance_pc = 2,
DW_LNS_advance_line = 3,
DW_LNS_set_file = 4,
DW_LNS_set_column = 5,
DW_LNS_negate_stmt = 6,
DW_LNS_set_basic_block = 7,
DW_LNS_const_add_pc = 8,
DW_LNS_fixed_advance_pc = 9,
/* DWARF 3. */
DW_LNS_set_prologue_end = 10,
DW_LNS_set_epilogue_begin = 11,
DW_LNS_set_isa = 12
};
/* Line number extended opcodes. */
enum dwarf_line_number_x_ops
{
DW_LNE_end_sequence = 1,
DW_LNE_set_address = 2,
DW_LNE_define_file = 3,
DW_LNE_set_discriminator = 4
};
typedef struct
{
/* Information for the last statement boundary.
* Needed to calculate statement lengths. */
Addr last_address;
UInt last_file;
UInt last_line;
Addr address;
UInt file;
UInt line;
UInt column;
Int is_stmt;
Int basic_block;
UChar end_sequence;
} LineSMR;
/* FIXME: duplicated in readdwarf3.c */
/* Read a 'leb128' and advance *data accordingly. */
static ULong step_leb128 ( DiCursor* data, Int sign )
{
ULong result = 0;
Int shift = 0;
UChar byte;
vg_assert(sign == 0 || sign == 1);
do {
byte = ML_(cur_step_UChar)(data);
result |= ((ULong)(byte & 0x7f)) << shift;
shift += 7;
}
while (byte & 0x80);
if (sign && (shift < 64) && (byte & 0x40))
result |= -(1ULL << shift);
return result;
}
/* FIXME: duplicated in readdwarf3.c */
static ULong step_leb128U( DiCursor* data ) {
return step_leb128( data, 0 );
}
/* FIXME: duplicated in readdwarf3.c */
static Long step_leb128S( DiCursor* data ) {
return step_leb128( data, 1 );
}
/* Read what the DWARF3 spec calls an "initial length field". This
uses up either 4 or 12 bytes of the input and produces a 32-bit or
64-bit number respectively.
Read 32-bit value from p. If it is 0xFFFFFFFF, instead read a
64-bit bit value from p+4. This is used in 64-bit dwarf to encode
some table lengths. Advance the cursor (p) accordingly.
XXX this is a hack: the endianness of the initial length field is
specified by the DWARF we're reading. This happens to work only
because we don't do cross-arch jitting, hence this code runs on a
platform of the same endianness as the DWARF it is reading. Same
applies for initial lengths for CIE/FDEs and probably in zillions
of other places -- to be precise, exactly the places where
binutils/dwarf.c calls byte_get().
*/
static
ULong step_initial_length_field ( DiCursor* p_img, /*OUT*/Bool* is64 )
{
UInt w32 = ML_(cur_step_UInt)(p_img);
if (w32 == 0xFFFFFFFF) {
*is64 = True;
return ML_(cur_step_ULong)(p_img);
} else {
*is64 = False;
return (ULong)w32;
}
}
static
ULong read_initial_length_field ( DiCursor p_img, /*OUT*/Bool* is64 )
{
/* Something of a roundabout approach .. the modification to p_img
is abandoned. */
return step_initial_length_field( &p_img, is64 );
}
static LineSMR state_machine_regs;
static
void reset_state_machine ( Int is_stmt )
{
if (0) VG_(printf)("smr.a := %p (reset)\n", NULL );
state_machine_regs.last_address = 0;
state_machine_regs.last_file = 1;
state_machine_regs.last_line = 1;
state_machine_regs.address = 0;
state_machine_regs.file = 1;
state_machine_regs.line = 1;
state_machine_regs.column = 0;
state_machine_regs.is_stmt = is_stmt;
state_machine_regs.basic_block = 0;
state_machine_regs.end_sequence = 0;
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
/* Handled an extended line op starting at *data, and advance *data
accordingly. */
static
void process_extended_line_op( struct _DebugInfo* di,
XArray* fndn_ix_xa,
DiCursor* data, Int is_stmt)
{
UInt len = step_leb128U(data);
if (len == 0) {
VG_(message)(Vg_UserMsg,
"Warning: DWARF2 reader: "
"Badly formed extended line op encountered\n");
return;
}
UChar op_code = ML_(cur_step_UChar)(data);
if (0) VG_(printf)("dwarf2: ext OPC: %d\n", op_code);
switch (op_code) {
case DW_LNE_end_sequence:
if (0) VG_(printf)("1001: si->o %#lx, smr.a %#lx\n",
(UWord)di->text_debug_bias,
state_machine_regs.address );
/* JRS: added for compliance with spec; is pointless due to
reset_state_machine below */
state_machine_regs.end_sequence = 1;
if (state_machine_regs.is_stmt) {
if (state_machine_regs.last_address) {
ML_(addLineInfo) (
di,
safe_fndn_ix (fndn_ix_xa,
state_machine_regs.last_file),
di->text_debug_bias + state_machine_regs.last_address,
di->text_debug_bias + state_machine_regs.address,
state_machine_regs.last_line, 0
);
}
}
reset_state_machine (is_stmt);
if (di->ddump_line)
VG_(printf)(" Extended opcode %d: End of Sequence\n\n",
(Int)op_code);
break;
case DW_LNE_set_address: {
Addr adr = ML_(cur_step_Addr)(data);
state_machine_regs.address = adr;
if (di->ddump_line)
VG_(printf)(" Extended opcode %d: set Address to 0x%lx\n",
(Int)op_code, (Addr)adr);
break;
}
case DW_LNE_define_file: {
HChar* name = ML_(cur_step_strdup)(data, "di.pelo.1");
UInt fndn_ix = ML_(addFnDn) (di, name, NULL);
VG_(addToXA) (fndn_ix_xa, &fndn_ix);
ML_(dinfo_free)(name);
(void)step_leb128U(data); // ignored: dir index
(void)step_leb128U(data); // ignored: mod time
(void)step_leb128U(data); // ignored: file size
if (di->ddump_line)
VG_(printf)(" DWARF2-line: set_address\n");
break;
}
case DW_LNE_set_discriminator:
(void)step_leb128U(data); // ignored: new 'discriminator' value
break;
default:
if (di->ddump_line)
VG_(printf)("process_extended_line_op:default\n");
break;
}
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
/* read a .debug_line section block for a compilation unit
*
* Input: - theBlock must point to the start of the block
* for the given compilation unit
* - ui contains additional info like the compilation dir
* for this unit
*
* Output: - si debug info structures get updated
*/
static
void read_dwarf2_lineblock ( struct _DebugInfo* di,
const UnitInfo* ui,
DiCursor theBlock, /* IMAGE */
Int noLargerThan )
{
Int i;
DebugLineInfo info;
Bool is64;
XArray* fndn_ix_xa; /* xarray of UInt fndn_ix */
UInt fndn_ix;
XArray* dirname_xa; /* xarray of const HChar* dirname */
const HChar* dirname;
DiCursor external = theBlock;
DiCursor data = theBlock;
/* fndn_ix_xa is an xarray of fndn_ix (indexes in di->fndnpool) which
are build from file names harvested from the DWARF2
info. Entry [0] is the "null" pool index and is never referred to
by the state machine.
Similarly, dirname_xa is an xarray of directory names. Entry [0]
is also NULL and denotes "we don't know what the path is", since
that is different from "the path is the empty string". Unlike
the fndn_ix_xa table, the state machine does refer to entry [0],
which basically means "." ("the current directory of the
compilation", whatever that means, according to the DWARF3
spec.)
*/
/* Fails due to gcc padding ...
vg_assert(sizeof(DWARF2_External_LineInfo)
== sizeof(DWARF2_Internal_LineInfo));
*/
dirname_xa = VG_(newXA) (ML_(dinfo_zalloc), "di.rd2l.1", ML_(dinfo_free),
sizeof(HChar*) );
fndn_ix_xa = VG_(newXA) (ML_(dinfo_zalloc), "di.rd2l.2", ML_(dinfo_free),
sizeof(UInt) );
/* DWARF2 starts numbering filename entries at 1, so we need to
add a dummy zeroth entry to the table. */
fndn_ix = 0; // 0 is the "null" index in a fixed pool.
VG_(addToXA) (fndn_ix_xa, &fndn_ix);
if (ML_(cur_is_valid)(ui->compdir))
dirname = ML_(addStrFromCursor)(di, ui->compdir);
else
dirname = ML_(addStr)(di, ".", -1);
VG_(addToXA) (dirname_xa, &dirname);
info.li_length = step_initial_length_field( &external, &is64 );
if (di->ddump_line)
VG_(printf)(" Length: %llu\n",
info.li_length);
/* Check the length of the block. */
if (info.li_length > noLargerThan) {
ML_(symerr)(di, True,
"DWARF line info appears to be corrupt "
"- the section is too small");
goto out;
}
/* Check its version number. */
info.li_version = ML_(cur_step_UShort)(&external);
if (di->ddump_line)
VG_(printf)(" DWARF Version: %d\n",
(Int)info.li_version);
if (info.li_version != 2 && info.li_version != 3 && info.li_version != 4) {
ML_(symerr)(di, True,
"Only DWARF version 2, 3 and 4 line info "
"is currently supported.");
goto out;
}
info.li_header_length = is64 ? ML_(cur_step_ULong)(&external)
: (ULong)(ML_(cur_step_UInt)(&external));
if (di->ddump_line)
VG_(printf)(" Prologue Length: %llu\n",
info.li_header_length);
info.li_min_insn_length = ML_(cur_step_UChar)(&external);
if (di->ddump_line)
VG_(printf)(" Minimum Instruction Length: %d\n",
(Int)info.li_min_insn_length);
/* We only support machines with one opcode per instruction
for now. If we ever want to support VLIW machines there is
code to handle multiple opcodes per instruction in the
patch attached to BZ#233595.
*/
if (info.li_version >= 4) {
info.li_max_ops_per_insn = ML_(cur_step_UChar)(&external);
if (info.li_max_ops_per_insn != 1) {
ML_(symerr)(di, True,
"Invalid Maximum Ops Per Insn in line info.");
goto out;
}
if (di->ddump_line)
VG_(printf)(" Maximum Ops Per Insn: %d\n",
(Int)info.li_max_ops_per_insn);
} else {
info.li_max_ops_per_insn = 1;
}
info.li_default_is_stmt = ML_(cur_step_UChar)(&external);
if (di->ddump_line)
VG_(printf)(" Initial value of 'is_stmt': %d\n",
(Int)info.li_default_is_stmt);
/* Josef Weidendorfer (20021021) writes:
It seems to me that the Intel Fortran compiler generates bad
DWARF2 line info code: It sets "is_stmt" of the state machine in
the line info reader to be always false. Thus, there is never
a statement boundary generated and therefore never an instruction
range/line number mapping generated for valgrind.
Please have a look at the DWARF2 specification, Ch. 6.2
(x86.ddj.com/ftp/manuals/tools/dwarf.pdf). Perhaps I understand
this wrong, but I don't think so.
I just had a look at the GDB DWARF2 reader... They completely
ignore "is_stmt" when recording line info ;-) That's the reason
"objdump -S" works on files from the intel fortran compiler.
Therefore: */
info.li_default_is_stmt = True;
/* JRS: changed (UInt*) to (UChar*) */
info.li_line_base = ML_(cur_step_UChar)(&external);
info.li_line_base = (Int)(Char)info.li_line_base;
if (di->ddump_line)
VG_(printf)(" Line Base: %d\n",
info.li_line_base);
info.li_line_range = ML_(cur_step_UChar)(&external);
if (di->ddump_line)
VG_(printf)(" Line Range: %d\n",
(Int)info.li_line_range);
info.li_opcode_base = ML_(cur_step_UChar)(&external);
if (di->ddump_line)
VG_(printf)(" Opcode Base: %d\n\n",
info.li_opcode_base);
if (0) VG_(printf)("dwarf2: line base: %d, range %d, opc base: %d\n",
(Int)info.li_line_base,
(Int)info.li_line_range,
(Int)info.li_opcode_base);
DiCursor end_of_sequence
= ML_(cur_plus)(data, info.li_length + (is64 ? 12 : 4));
reset_state_machine (info.li_default_is_stmt);
/* Read the contents of the Opcodes table. */
DiCursor standard_opcodes = external;
if (di->ddump_line) {
VG_(printf)(" Opcodes:\n");
for (i = 1; i < (Int)info.li_opcode_base; i++) {
VG_(printf)(" Opcode %d has %d args\n",
i, (Int)ML_(cur_read_UChar)(
ML_(cur_plus)(standard_opcodes,
(i-1) * sizeof(UChar)) ));
}
VG_(printf)("\n");
}
/* skip over "standard_opcode_lengths" */
data = ML_(cur_plus)(standard_opcodes, info.li_opcode_base - 1);
/* Read the contents of the Directory table. */
if (di->ddump_line)
VG_(printf)(" The Directory Table%s\n",
ML_(cur_read_UChar)(data) == 0 ? " is empty." : ":" );
while (ML_(cur_read_UChar)(data) != 0) {
HChar* data_str = ML_(cur_read_strdup)(data, "di.rd2l.1");
if (di->ddump_line)
VG_(printf)(" %s\n", data_str);
/* If data[0] is '/', then 'data' is an absolute path and we
don't mess with it. Otherwise, construct the
path 'ui->compdir' ++ "/" ++ 'data'. */
if (data_str[0] != '/'
/* not an absolute path */
&& ML_(cur_is_valid)(ui->compdir)
/* actually got something sensible for compdir */
&& ML_(cur_strlen)(ui->compdir))
{
HChar* compdir_str = ML_(cur_read_strdup)(ui->compdir, "di.rd2l.1b");
SizeT len = VG_(strlen)(compdir_str) + 1 + VG_(strlen)(data_str);
HChar *buf = ML_(dinfo_zalloc)("di.rd2l.1c", len + 1);
VG_(strcpy)(buf, compdir_str);
VG_(strcat)(buf, "/");
VG_(strcat)(buf, data_str);
dirname = ML_(addStr)(di, buf, len);
VG_(addToXA) (dirname_xa, &dirname);
if (0) VG_(printf)("rel path %s\n", buf);
ML_(dinfo_free)(compdir_str);
ML_(dinfo_free)(buf);
} else {
/* just use 'data'. */
dirname = ML_(addStr)(di,data_str,-1);
VG_(addToXA) (dirname_xa, &dirname);
if (0) VG_(printf)("abs path %s\n", data_str);
}
data = ML_(cur_plus)(data, VG_(strlen)(data_str) + 1);
ML_(dinfo_free)(data_str);
}
if (di->ddump_line)
VG_(printf)("\n");
if (ML_(cur_read_UChar)(data) != 0) {
ML_(symerr)(di, True,
"can't find NUL at end of DWARF2 directory table");
goto out;
}
data = ML_(cur_plus)(data, 1);
/* Read the contents of the File Name table. This produces a bunch
of fndn_ix in fndn_ix_xa. */
if (di->ddump_line) {
VG_(printf)(" The File Name Table:\n");
VG_(printf)(" Entry Dir Time Size Name\n");
}
i = 1;
while (ML_(cur_read_UChar)(data) != 0) {
HChar* name = ML_(cur_step_strdup)(&data, "di.rd2l.2");
Int diridx = step_leb128U(&data);
Int uu_time = step_leb128U(&data); /* unused */
Int uu_size = step_leb128U(&data); /* unused */
dirname = safe_dirname_ix( dirname_xa, diridx );
fndn_ix = ML_(addFnDn) (di, name, dirname);
VG_(addToXA) (fndn_ix_xa, &fndn_ix);
if (0) VG_(printf)("file %s diridx %d\n", name, diridx );
if (di->ddump_line)
VG_(printf)(" %d\t%d\t%d\t%d\t%s\n",
i, diridx, uu_time, uu_size, name);
i++;
ML_(dinfo_free)(name);
}
if (di->ddump_line)
VG_(printf)("\n");
if (ML_(cur_read_UChar)(data) != 0) {
ML_(symerr)(di, True,
"can't find NUL at end of DWARF2 file name table");
goto out;
}
data = ML_(cur_plus)(data, 1);
if (di->ddump_line)
VG_(printf)(" Line Number Statements:\n");
/* Now display the statements. */
while (ML_(cur_cmpLT)(data, end_of_sequence)) {
UChar op_code = ML_(cur_step_UChar)(&data);
if (0) VG_(printf)("dwarf2: OPC: %d\n", op_code);
if (op_code >= info.li_opcode_base) {
op_code -= info.li_opcode_base;
Word adv = (op_code / info.li_line_range)
* info.li_min_insn_length;
Int advAddr = adv;
state_machine_regs.address += adv;
if (0) VG_(printf)("smr.a += %#lx\n", (UWord)adv );
adv = (op_code % info.li_line_range) + info.li_line_base;
if (0) VG_(printf)("1002: di->o %#lx, smr.a %#lx\n",
(UWord)di->text_debug_bias,
state_machine_regs.address );
state_machine_regs.line += adv;
if (di->ddump_line)
VG_(printf)(" Special opcode %d: advance Address by %d "
"to 0x%lx and Line by %d to %d\n",
(Int)op_code, advAddr, state_machine_regs.address,
(Int)adv, (Int)state_machine_regs.line );
if (state_machine_regs.is_stmt) {
/* only add a statement if there was a previous boundary */
if (state_machine_regs.last_address) {
ML_(addLineInfo)(
di,
safe_fndn_ix (fndn_ix_xa,
state_machine_regs.last_file),
di->text_debug_bias + state_machine_regs.last_address,
di->text_debug_bias + state_machine_regs.address,
state_machine_regs.last_line,
0
);
}
state_machine_regs.last_address = state_machine_regs.address;
state_machine_regs.last_file = state_machine_regs.file;
state_machine_regs.last_line = state_machine_regs.line;
}
}
else { /* ! (op_code >= info.li_opcode_base) */
switch (op_code) {
case DW_LNS_extended_op:
process_extended_line_op (
di, fndn_ix_xa,
&data, info.li_default_is_stmt);
break;
case DW_LNS_copy:
if (0) VG_(printf)("1002: di->o %#lx, smr.a %#lx\n",
(UWord)di->text_debug_bias,
state_machine_regs.address );
if (state_machine_regs.is_stmt) {
/* only add a statement if there was a previous boundary */
if (state_machine_regs.last_address) {
ML_(addLineInfo)(
di,
safe_fndn_ix (fndn_ix_xa,
state_machine_regs.last_file),
di->text_debug_bias + state_machine_regs.last_address,
di->text_debug_bias + state_machine_regs.address,
state_machine_regs.last_line,
0
);
}
state_machine_regs.last_address = state_machine_regs.address;
state_machine_regs.last_file = state_machine_regs.file;
state_machine_regs.last_line = state_machine_regs.line;
}
state_machine_regs.basic_block = 0; /* JRS added */
if (di->ddump_line)
VG_(printf)(" Copy\n");
break;
case DW_LNS_advance_pc: {
UWord adv = info.li_min_insn_length * step_leb128U(&data);
state_machine_regs.address += adv;
if (0) VG_(printf)("smr.a += %#lx\n", adv );
if (di->ddump_line)
VG_(printf)(" Advance PC by %lu to 0x%lx\n",
adv, state_machine_regs.address);
break;
}
case DW_LNS_advance_line: {
Word adv = step_leb128S(&data);
state_machine_regs.line += adv;
if (di->ddump_line)
VG_(printf)(" Advance Line by %ld to %d\n",
adv, (Int)state_machine_regs.line);
break;
}
case DW_LNS_set_file: {
Word adv = step_leb128U(&data);
state_machine_regs.file = adv;
if (di->ddump_line)
VG_(printf)(" Set File Name to entry %ld in the "
"File Name Table\n", adv);
break;
}
case DW_LNS_set_column: {
Word adv = step_leb128U(&data);
state_machine_regs.column = adv;
if (di->ddump_line)
VG_(printf)(" Set column to %ld\n", adv);
break;
}
case DW_LNS_negate_stmt: {
Int adv = state_machine_regs.is_stmt;
adv = ! adv;
state_machine_regs.is_stmt = adv;
if (di->ddump_line)
VG_(printf)(" DWARF2-line: negate_stmt\n");
break;
}
case DW_LNS_set_basic_block: {
state_machine_regs.basic_block = 1;
if (di->ddump_line)
VG_(printf)(" DWARF2-line: set_basic_block\n");
break;
}
case DW_LNS_const_add_pc: {
Word adv = (((255 - info.li_opcode_base) / info.li_line_range)
* info.li_min_insn_length);
state_machine_regs.address += adv;
if (0) VG_(printf)("smr.a += %#lx\n", (UWord)adv );
if (di->ddump_line)
VG_(printf)(" Advance PC by constant %ld to 0x%lx\n",
adv, (Addr)state_machine_regs.address);
break;
}
case DW_LNS_fixed_advance_pc: {
/* XXX: Need something to get 2 bytes */
UWord adv = ML_(cur_step_UShort)(&data);
state_machine_regs.address += adv;
if (0) VG_(printf)("smr.a += %#lx\n", adv );
if (di->ddump_line)
VG_(printf)(" DWARF2-line: fixed_advance_pc\n");
break;
}
case DW_LNS_set_prologue_end:
if (di->ddump_line)
VG_(printf)(" DWARF2-line: set_prologue_end\n");
break;
case DW_LNS_set_epilogue_begin:
if (di->ddump_line)
VG_(printf)(" DWARF2-line: set_epilogue_begin\n");
break;
case DW_LNS_set_isa:
(void)step_leb128U(&data);
if (di->ddump_line)
VG_(printf)(" DWARF2-line: set_isa\n");
break;
default: {
Int j;
for (j = (Int)ML_(cur_read_UChar)(
ML_(cur_plus)(standard_opcodes,
(op_code-1) * sizeof(UChar)));
j > 0 ; --j) {
step_leb128U(&data);
}
if (di->ddump_line)
VG_(printf)(" Unknown opcode %d\n", (Int)op_code);
break;
}
} /* switch (op_code) */
} /* if (op_code >= info.li_opcode_base) */
} /* while (data < end_of_sequence) */
if (di->ddump_line)
VG_(printf)("\n");
out:
VG_(deleteXA)(dirname_xa);
VG_(deleteXA)(fndn_ix_xa);
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
/* Return abbrev for given code
* Returned cursor points to the tag
* */
static DiCursor lookup_abbrev( DiCursor p, ULong acode )
{
while (1) {
ULong code = step_leb128U(&p);
if (code == acode)
return p;
(void)step_leb128U(&p); /* skip tag */
p = ML_(cur_plus)(p,1); /* skip has_children flag */
ULong name;
do {
name = step_leb128U(&p); /* name */
(void)step_leb128U(&p); /* form */
}
while (name != 0); /* until name == form == 0 */
}
}
/* Read general information for a particular compile unit block in
* the .debug_info section. In particular read the name, compdir and
* stmt_list needed to parse the line number information.
*
* Input: - unitblock is the start of a compilation
* unit block in .debuginfo section
* - debugabbrev is start of .debug_abbrev section
* - debugstr is start of .debug_str section
* - debugstr_alt_img is start of .debug_str section in alt debug file
*
* Output: Fill members of ui pertaining to the compilation unit:
* - ui->name is the name of the compilation unit
* - ui->compdir is the compilation unit directory
* - ui->stmt_list is the offset in .debug_line section
* for the dbginfos of this compilation unit
*
* Note : the output strings are not allocated and point
* directly to the memory-mapped section.
*/
static
void read_unitinfo_dwarf2( /*OUT*/UnitInfo* ui,
DiCursor unitblock_img,
DiCursor debugabbrev_img,
DiCursor debugstr_img,
DiCursor debugstr_alt_img )
{
UInt acode, abcode;
ULong atoffs, blklen;
UShort ver;
UChar addr_size;
DiCursor p = unitblock_img;
DiCursor end_img;
DiCursor abbrev_img;
VG_(memset)( ui, 0, sizeof( UnitInfo ) );
ui->stmt_list = -1LL;
/* Read the compilation unit header in .debug_info section - See p 70 */
/* This block length */
blklen = step_initial_length_field( &p, &ui->dw64 );
/* version should be 2, 3 or 4 */
ver = ML_(cur_step_UShort)(&p);
/* get offset in abbrev */
atoffs = ui->dw64 ? ML_(cur_step_ULong)(&p)
: (ULong)(ML_(cur_step_UInt)(&p));
/* Address size */
addr_size = ML_(cur_step_UChar)(&p);
/* End of this block */
end_img = ML_(cur_plus)(unitblock_img, blklen + (ui->dw64 ? 12 : 4));
/* Abbreviation data for this block */
abbrev_img = ML_(cur_plus)(debugabbrev_img, atoffs);
/* Read the compilation unit entry - this is always the first DIE.
* See DWARF4 para 7.5. */
if (ML_(cur_cmpLT)(p, end_img)) {
UInt tag;
acode = step_leb128U( &p ); /* abbreviation code */
/* Read abbreviation header */
abcode = step_leb128U( &abbrev_img ); /* abbreviation code */
if ( acode != abcode ) {
/* This isn't illegal, but somewhat unlikely. Normally the
* first abbrev describes the first DIE, the compile_unit.
* But maybe this abbrevation data is shared with another
* or it is a NULL entry used for padding. See para 7.5.3. */
abbrev_img = lookup_abbrev( ML_(cur_plus)(debugabbrev_img, atoffs),
acode );
}
tag = step_leb128U( &abbrev_img );
if ( tag != 0x0011 /*TAG_compile_unit*/ )
return; /* Not a compile unit (might be partial) or broken DWARF. */
/* DW_CHILDREN_yes or DW_CHILDREN_no */
abbrev_img = ML_(cur_plus)(abbrev_img, 1);
/* And loop on entries */
for ( ; ; ) {
/* Read entry definition */
ULong cval = -1LL; /* Constant value read */
DiCursor sval = DiCursor_INVALID; /* String value read */
UInt name = step_leb128U( &abbrev_img );
UInt form = step_leb128U( &abbrev_img );
if (name == 0)
break;
/* Read data */
/* Attributes encoding explained p 71 */
if ( form == 0x16 /* FORM_indirect */ )
form = step_leb128U( &p );
/* Decode form. For most kinds, Just skip the amount of data since
we don't use it for now */
/* JRS 9 Feb 06: This now handles 64-bit DWARF too. In
64-bit DWARF, lineptr (and loclistptr,macptr,rangelistptr
classes) use FORM_data8, not FORM_data4. Also,
FORM_ref_addr and FORM_strp are 64-bit values, not 32-bit
values. */
/* TJH 27 Apr 10: in DWARF 4 lineptr (and loclistptr,macptr,
rangelistptr classes) use FORM_sec_offset which is 64 bits
in 64 bit DWARF and 32 bits in 32 bit DWARF. */
/* JRS 20 Apr 11: LLVM-2.9 encodes DW_AT_stmt_list using
FORM_addr rather than the FORM_data4 that GCC uses. Hence
handle FORM_addr too. */
switch( form ) {
/* Those cases extract the data properly */
case 0x05: /* FORM_data2 */
cval = ML_(cur_step_UShort)(&p);
break;
case 0x06: /* FORM_data4 */
cval = ML_(cur_step_UInt)(&p);
break;
case 0x0e: /* FORM_strp */ /* pointer in .debug_str */
/* 2006-01-01: only generate a value if a debug_str
section was found) */
if (ML_(cur_is_valid)(debugstr_img) && !ui->dw64)
sval = ML_(cur_plus)(debugstr_img, ML_(cur_read_UInt)(p));
if (ML_(cur_is_valid)(debugstr_img) && ui->dw64)
sval = ML_(cur_plus)(debugstr_img, ML_(cur_read_ULong)(p));
p = ML_(cur_plus)(p, ui->dw64 ? 8 : 4);
break;
case 0x08: /* FORM_string */
sval = p;
p = ML_(cur_plus)(p, ML_(cur_strlen)(p) + 1);
break;
case 0x0b: /* FORM_data1 */
cval = ML_(cur_step_UChar)(&p);
break;
case 0x17: /* FORM_sec_offset */
if (ui->dw64) {
cval = ML_(cur_step_ULong)(&p);
} else {
cval = ML_(cur_step_UInt)(&p);
};
break;
case 0x07: /* FORM_data8 */
if (ui->dw64) cval = ML_(cur_read_ULong)(p);
p = ML_(cur_plus)(p, 8);
/* perhaps should assign unconditionally to cval? */
break;
/* TODO : Following ones just skip data - implement if you need */
case 0x01: /* FORM_addr */
p = ML_(cur_plus)(p, addr_size);
break;
case 0x03: /* FORM_block2 */
p = ML_(cur_plus)(p, ML_(cur_read_UShort)(p) + 2);
break;
case 0x04: /* FORM_block4 */
p = ML_(cur_plus)(p, ML_(cur_read_UInt)(p) + 4);
break;
case 0x09: /* FORM_block */ /* fallthrough */
case 0x18: { /* FORM_exprloc */
ULong block_len = step_leb128U(&p);
p = ML_(cur_plus)(p, block_len);
break;
}
case 0x0a: /* FORM_block1 */
p = ML_(cur_plus)(p, ML_(cur_read_UChar)(p) + 1);
break;
case 0x0c: /* FORM_flag */
p = ML_(cur_plus)(p, 1);
break;
case 0x0d: /* FORM_sdata */
(void)step_leb128S(&p);
break;
case 0x0f: /* FORM_udata */
(void)step_leb128U(&p);
break;
case 0x10: /* FORM_ref_addr */
p = ML_(cur_plus)(p, (ver == 2) ? addr_size
: (ui->dw64 ? 8 : 4));
break;
case 0x11: /* FORM_ref1 */
p = ML_(cur_plus)(p, 1);
break;
case 0x12: /* FORM_ref2 */
p = ML_(cur_plus)(p, 2);
break;
case 0x13: /* FORM_ref4 */
p = ML_(cur_plus)(p, 4);
break;
case 0x14: /* FORM_ref8 */
p = ML_(cur_plus)(p, 8);
break;
case 0x15: /* FORM_ref_udata */
(void)step_leb128U(&p);
break;
case 0x19: /* FORM_flag_present */
break;
case 0x20: /* FORM_ref_sig8 */
p = ML_(cur_plus)(p, 8);
break;
case 0x1f20: /* FORM_GNU_ref_alt */
p = ML_(cur_plus)(p, ui->dw64 ? 8 : 4);
break;
case 0x1f21: /* FORM_GNU_strp_alt */
if (ML_(cur_is_valid)(debugstr_alt_img) && !ui->dw64)
sval = ML_(cur_plus)(debugstr_alt_img,
ML_(cur_read_UInt)(p));
if (ML_(cur_is_valid)(debugstr_alt_img) && ui->dw64)
sval = ML_(cur_plus)(debugstr_alt_img,
ML_(cur_read_ULong)(p));
p = ML_(cur_plus)(p, ui->dw64 ? 8 : 4);
break;
default:
VG_(printf)( "### unhandled dwarf2 abbrev form code 0x%x\n",
form );
break;
}
/* Now store the members we need in the UnitInfo structure */
if ( tag == 0x0011 /*TAG_compile_unit*/ ) {
if ( name == 0x03 ) ui->name = sval; /* DW_AT_name */
else if ( name == 0x1b ) ui->compdir = sval; /* DW_AT_compdir */
else if ( name == 0x10 ) ui->stmt_list = cval; /* DW_AT_stmt_list */
}
}
} /* Just read the first DIE, if that wasn't the compile_unit then
* this might have been a partial unit or broken DWARF info.
* That's enough info for us, and we are not gdb ! */
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
/* Collect the debug info from DWARF3 debugging sections
* of a given module.
*
* Inputs: given .debug_xxx sections
* Output: update di to contain all the DWARF3 debug infos
*/
void ML_(read_debuginfo_dwarf3)
( struct _DebugInfo* di,
DiSlice escn_debug_info, /* .debug_info */
DiSlice escn_debug_types, /* .debug_types */
DiSlice escn_debug_abbv, /* .debug_abbrev */
DiSlice escn_debug_line, /* .debug_line */
DiSlice escn_debug_str, /* .debug_str */
DiSlice escn_debug_str_alt ) /* .debug_str */
{
UnitInfo ui;
UShort ver;
ULong blklen;
Bool blklen_is_64;
/* Make sure we at least have a header for the first block */
if (escn_debug_info.szB < 4) {
ML_(symerr)( di, True,
"Last block truncated in .debug_info; ignoring" );
return;
}
DiCursor block_img = DiCursor_INVALID;
DiCursor end1_img = ML_(cur_plus)( ML_(cur_from_sli)(escn_debug_info),
escn_debug_info.szB );
Int blklen_len = 0;
/* Iterate on all the blocks we find in .debug_info */
for ( block_img = ML_(cur_from_sli)(escn_debug_info);
ML_(cur_cmpLT)(block_img, ML_(cur_plus)(end1_img, -(DiOffT)4));
block_img = ML_(cur_plus)(block_img, blklen + blklen_len) ) {
/* Read the compilation unit header in .debug_info section - See
p 70 */
/* This block length */
blklen = read_initial_length_field( block_img, &blklen_is_64 );
blklen_len = blklen_is_64 ? 12 : 4;
if (ML_(cur_cmpGT)( ML_(cur_plus)(block_img, blklen + blklen_len),
end1_img )) {
ML_(symerr)( di, True,
"Last block truncated in .debug_info; ignoring" );
return;
}
/* version should be 2 */
ver = ML_(cur_read_UShort)( ML_(cur_plus)(block_img, blklen_len) );
if ( ver != 2 && ver != 3 && ver != 4 ) {
ML_(symerr)( di, True,
"Ignoring non-Dwarf2/3/4 block in .debug_info" );
continue;
}
/* Fill ui with offset in .debug_line and compdir */
if (0)
VG_(printf)(
"Reading UnitInfo at 0x%llx.....\n",
(ULong)ML_(cur_minus)( block_img,
ML_(cur_from_sli)(escn_debug_info)) );
read_unitinfo_dwarf2( &ui, block_img,
ML_(cur_from_sli)(escn_debug_abbv),
ML_(cur_from_sli)(escn_debug_str),
ML_(cur_from_sli)(escn_debug_str_alt) );
if (0) {
HChar* str_name = ML_(cur_read_strdup)(ui.name, "di.rdd3.1");
HChar* str_compdir = ML_(cur_read_strdup)(ui.compdir, "di.rdd3.2");
VG_(printf)( " => LINES=0x%llx NAME=%s DIR=%s\n",
ui.stmt_list, str_name, str_compdir );
ML_(dinfo_free)(str_name);
ML_(dinfo_free)(str_compdir);
}
/* Ignore blocks with no .debug_line associated block */
if ( ui.stmt_list == -1LL )
continue;
if (0) {
HChar* str_name = ML_(cur_read_strdup)(ui.name, "di.rdd3.3");
VG_(printf)("debug_line_sz %llu, ui.stmt_list %llu %s\n",
escn_debug_line.szB, ui.stmt_list, str_name );
ML_(dinfo_free)(str_name);
}
/* Read the .debug_line block for this compile unit */
read_dwarf2_lineblock(
di, &ui,
ML_(cur_plus)(ML_(cur_from_sli)(escn_debug_line), ui.stmt_list),
escn_debug_line.szB - ui.stmt_list
);
}
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
/*------------------------------------------------------------*/
/*--- Read DWARF1 format line number info. ---*/
/*------------------------------------------------------------*/
/* DWARF1 appears to be redundant, but nevertheless the Lahey Fortran
compiler generates it.
*/
/* The following three enums (dwarf_tag, dwarf_form, dwarf_attribute)
are taken from the file include/elf/dwarf.h in the GNU gdb-6.0
sources, which are Copyright 1992, 1993, 1995, 1999 Free Software
Foundation, Inc and naturally licensed under the GNU General Public
License version 2 or later.
*/
/* Tag names and codes. */
enum dwarf_tag {
TAG_padding = 0x0000,
TAG_array_type = 0x0001,
TAG_class_type = 0x0002,
TAG_entry_point = 0x0003,
TAG_enumeration_type = 0x0004,
TAG_formal_parameter = 0x0005,
TAG_global_subroutine = 0x0006,
TAG_global_variable = 0x0007,
/* 0x0008 -- reserved */
/* 0x0009 -- reserved */
TAG_label = 0x000a,
TAG_lexical_block = 0x000b,
TAG_local_variable = 0x000c,
TAG_member = 0x000d,
/* 0x000e -- reserved */
TAG_pointer_type = 0x000f,
TAG_reference_type = 0x0010,
TAG_compile_unit = 0x0011,
TAG_string_type = 0x0012,
TAG_structure_type = 0x0013,
TAG_subroutine = 0x0014,
TAG_subroutine_type = 0x0015,
TAG_typedef = 0x0016,
TAG_union_type = 0x0017,
TAG_unspecified_parameters = 0x0018,
TAG_variant = 0x0019,
TAG_common_block = 0x001a,
TAG_common_inclusion = 0x001b,
TAG_inheritance = 0x001c,
TAG_inlined_subroutine = 0x001d,
TAG_module = 0x001e,
TAG_ptr_to_member_type = 0x001f,
TAG_set_type = 0x0020,
TAG_subrange_type = 0x0021,
TAG_with_stmt = 0x0022,
/* GNU extensions */
TAG_format_label = 0x8000, /* for FORTRAN 77 and Fortran 90 */
TAG_namelist = 0x8001, /* For Fortran 90 */
TAG_function_template = 0x8002, /* for C++ */
TAG_class_template = 0x8003 /* for C++ */
};
/* Form names and codes. */
enum dwarf_form {
FORM_ADDR = 0x1,
FORM_REF = 0x2,
FORM_BLOCK2 = 0x3,
FORM_BLOCK4 = 0x4,
FORM_DATA2 = 0x5,
FORM_DATA4 = 0x6,
FORM_DATA8 = 0x7,
FORM_STRING = 0x8
};
/* Attribute names and codes. */
enum dwarf_attribute {
AT_sibling = (0x0010|FORM_REF),
AT_location = (0x0020|FORM_BLOCK2),
AT_name = (0x0030|FORM_STRING),
AT_fund_type = (0x0050|FORM_DATA2),
AT_mod_fund_type = (0x0060|FORM_BLOCK2),
AT_user_def_type = (0x0070|FORM_REF),
AT_mod_u_d_type = (0x0080|FORM_BLOCK2),
AT_ordering = (0x0090|FORM_DATA2),
AT_subscr_data = (0x00a0|FORM_BLOCK2),
AT_byte_size = (0x00b0|FORM_DATA4),
AT_bit_offset = (0x00c0|FORM_DATA2),
AT_bit_size = (0x00d0|FORM_DATA4),
/* (0x00e0|FORM_xxxx) -- reserved */
AT_element_list = (0x00f0|FORM_BLOCK4),
AT_stmt_list = (0x0100|FORM_DATA4),
AT_low_pc = (0x0110|FORM_ADDR),
AT_high_pc = (0x0120|FORM_ADDR),
AT_language = (0x0130|FORM_DATA4),
AT_member = (0x0140|FORM_REF),
AT_discr = (0x0150|FORM_REF),
AT_discr_value = (0x0160|FORM_BLOCK2),
/* (0x0170|FORM_xxxx) -- reserved */
/* (0x0180|FORM_xxxx) -- reserved */
AT_string_length = (0x0190|FORM_BLOCK2),
AT_common_reference = (0x01a0|FORM_REF),
AT_comp_dir = (0x01b0|FORM_STRING),
AT_const_value_string = (0x01c0|FORM_STRING),
AT_const_value_data2 = (0x01c0|FORM_DATA2),
AT_const_value_data4 = (0x01c0|FORM_DATA4),
AT_const_value_data8 = (0x01c0|FORM_DATA8),
AT_const_value_block2 = (0x01c0|FORM_BLOCK2),
AT_const_value_block4 = (0x01c0|FORM_BLOCK4),
AT_containing_type = (0x01d0|FORM_REF),
AT_default_value_addr = (0x01e0|FORM_ADDR),
AT_default_value_data2 = (0x01e0|FORM_DATA2),
AT_default_value_data4 = (0x01e0|FORM_DATA4),
AT_default_value_data8 = (0x01e0|FORM_DATA8),
AT_default_value_string = (0x01e0|FORM_STRING),
AT_friends = (0x01f0|FORM_BLOCK2),
AT_inline = (0x0200|FORM_STRING),
AT_is_optional = (0x0210|FORM_STRING),
AT_lower_bound_ref = (0x0220|FORM_REF),
AT_lower_bound_data2 = (0x0220|FORM_DATA2),
AT_lower_bound_data4 = (0x0220|FORM_DATA4),
AT_lower_bound_data8 = (0x0220|FORM_DATA8),
AT_private = (0x0240|FORM_STRING),
AT_producer = (0x0250|FORM_STRING),
AT_program = (0x0230|FORM_STRING),
AT_protected = (0x0260|FORM_STRING),
AT_prototyped = (0x0270|FORM_STRING),
AT_public = (0x0280|FORM_STRING),
AT_pure_virtual = (0x0290|FORM_STRING),
AT_return_addr = (0x02a0|FORM_BLOCK2),
AT_abstract_origin = (0x02b0|FORM_REF),
AT_start_scope = (0x02c0|FORM_DATA4),
AT_stride_size = (0x02e0|FORM_DATA4),
AT_upper_bound_ref = (0x02f0|FORM_REF),
AT_upper_bound_data2 = (0x02f0|FORM_DATA2),
AT_upper_bound_data4 = (0x02f0|FORM_DATA4),
AT_upper_bound_data8 = (0x02f0|FORM_DATA8),
AT_virtual = (0x0300|FORM_STRING),
/* GNU extensions. */
AT_sf_names = (0x8000|FORM_DATA4),
AT_src_info = (0x8010|FORM_DATA4),
AT_mac_info = (0x8020|FORM_DATA4),
AT_src_coords = (0x8030|FORM_DATA4),
AT_body_begin = (0x8040|FORM_ADDR),
AT_body_end = (0x8050|FORM_ADDR)
};
/* end of enums taken from gdb-6.0 sources */
#if 0
void ML_(read_debuginfo_dwarf1) (
struct _DebugInfo* di,
UChar* dwarf1d, Int dwarf1d_sz,
UChar* dwarf1l, Int dwarf1l_sz )
{
UInt stmt_list;
Bool stmt_list_found;
Int die_offset, die_szb, at_offset;
UShort die_kind, at_kind;
UChar* at_base;
HChar* src_filename;
if (0)
VG_(printf)("read_debuginfo_dwarf1 ( %p, %d, %p, %d )\n",
dwarf1d, dwarf1d_sz, dwarf1l, dwarf1l_sz );
/* This loop scans the DIEs. */
die_offset = 0;
while (True) {
if (die_offset >= dwarf1d_sz) break;
die_szb = ML_(read_Int)(dwarf1d + die_offset);
die_kind = ML_(read_UShort)(dwarf1d + die_offset + 4);
/* We're only interested in compile_unit DIEs; ignore others. */
if (die_kind != TAG_compile_unit) {
die_offset += die_szb;
continue;
}
if (0)
VG_(printf)("compile-unit DIE: offset %d, tag 0x%x, size %d\n",
die_offset, (Int)die_kind, die_szb );
/* We've got a compile_unit DIE starting at (dwarf1d +
die_offset+6). Try and find the AT_name and AT_stmt_list
attributes. Then, finally, we can read the line number info
for this source file. */
/* The next 3 are set as we find the relevant attrs. */
src_filename = NULL;
stmt_list_found = False;
stmt_list = 0;
/* This loop scans the Attrs inside compile_unit DIEs. */
at_base = dwarf1d + die_offset + 6;
at_offset = 0;
while (True) {
if (at_offset >= die_szb-6) break;
at_kind = ML_(read_UShort)(at_base + at_offset);
if (0) VG_(printf)("atoffset %d, attag 0x%x\n",
at_offset, (Int)at_kind );
at_offset += 2; /* step over the attribute itself */
/* We have to examine the attribute to figure out its
length. */
switch (at_kind) {
case AT_stmt_list:
case AT_language:
case AT_sibling:
if (at_kind == AT_stmt_list) {
stmt_list_found = True;
stmt_list = ML_(read_Int)(at_base+at_offset);
}
at_offset += 4; break;
case AT_high_pc:
case AT_low_pc:
at_offset += sizeof(void*); break;
case AT_name:
case AT_producer:
case AT_comp_dir:
/* Zero terminated string, step over it. */
if (at_kind == AT_name)
src_filename = (HChar *)(at_base + at_offset);
while (at_offset < die_szb-6 && at_base[at_offset] != 0)
at_offset++;
at_offset++;
break;
default:
VG_(printf)("Unhandled DWARF-1 attribute 0x%x\n",
(Int)at_kind );
VG_(core_panic)("Unhandled DWARF-1 attribute");
} /* switch (at_kind) */
} /* looping over attributes */
/* So, did we find the required stuff for a line number table in
this DIE? If yes, read it. */
if (stmt_list_found /* there is a line number table */
&& src_filename != NULL /* we know the source filename */
) {
/* Table starts:
Length:
4 bytes, includes the entire table
Base address:
unclear (4? 8?), assuming native pointer size here.
Then a sequence of triples
(source line number -- 32 bits
source line column -- 16 bits
address delta -- 32 bits)
*/
Addr base;
Int len;
HChar* curr_filenm;
UChar* ptr;
UInt prev_line, prev_delta;
curr_filenm = ML_(addStr) ( di, src_filename, -1 );
prev_line = prev_delta = 0;
ptr = dwarf1l + stmt_list;
len = ML_(read_Int)(ptr); ptr += sizeof(Int);
base = ML_(read_Addr)(ptr); ptr += sizeof(void*);
len -= (sizeof(Int) + sizeof(void*));
while (len > 0) {
UInt line;
UShort col;
UInt delta;
line = ML_(read_UInt)(ptr); ptr += sizeof(UInt);
col = ML_(read_UShort)(ptr); ptr += sizeof(UShort);
delta = ML_(read_UInt)(ptr); ptr += sizeof(UInt);
if (0) VG_(printf)("line %d, col %d, delta %d\n",
line, (Int)col, delta );
len -= (sizeof(UInt) + sizeof(UShort) + sizeof(UInt));
if (delta > 0 && prev_line > 0) {
if (0) VG_(printf) (" %d %d-%d\n",
prev_line, prev_delta, delta-1);
ML_(addLineInfo) ( di, curr_filenm, NULL,
base + prev_delta, base + delta,
prev_line, 0 );
}
prev_line = line;
prev_delta = delta;
}
}
/* Move on the next DIE. */
die_offset += die_szb;
} /* Looping over DIEs */
}
#endif
/*------------------------------------------------------------*/
/*--- Read call-frame info from an .eh_frame section ---*/
/*------------------------------------------------------------*/
/* Sources of info:
The DWARF3 spec, available from http://www.dwarfstd.org/Download.php
This describes how to read CFA data from .debug_frame sections.
So as to maximise everybody's annoyance and confusion, .eh_frame
sections are almost the same as .debug_frame sections, but differ
in a few subtle and ill documented but important aspects.
Generic ELF Specification, sections 7.5 (DWARF Extensions) and 7.6
(Exception Frames), available from
http://www.linux-foundation.org/spec/book/ELF-generic/ELF-generic.html
This really does describe .eh_frame, at least the aspects that
differ from standard DWARF3. It's better than guessing, and
(marginally) more fun than reading the gdb source code.
*/
/* Useful info ..
In general:
gdb-6.3/gdb/dwarf2-frame.c
gdb-6.3/gdb/i386-tdep.c:
DWARF2/GCC uses the stack address *before* the function call as a
frame's CFA. [jrs: I presume this means %esp before the call as
the CFA].
JRS: on amd64, the dwarf register numbering is, as per
gdb-6.3/gdb/amd64-tdep.c and also amd64-abi-0.98.pdf:
0 1 2 3 4 5 6 7
RAX RDX RCX RBX RSI RDI RBP RSP
8 ... 15
R8 ... R15
16 is the return address (RIP)
"The table defines Return Address to have a register number,
even though the address is stored in 0(%rsp) and not in a
physical register."
17 ... 24
XMM0 ... XMM7
25 ... 32
XMM8 ... XMM15
33 ... 40
ST0 ... ST7
41 ... 48
MM0 ... MM7
49 RFLAGS
50,51,52,53,54,55 ES,CS,SS,DS,FS,GS
58 FS.BASE (what's that?)
59 GS.BASE (what's that?)
62 TR (task register)
63 LDTR (LDT register)
64 MXCSR
65 FCW (x87 control word)
66 FSW (x86 status word)
On x86 I cannot find any documentation. It _appears_ to be the
actual instruction encoding, viz:
0 1 2 3 4 5 6 7
EAX ECX EDX EBX ESP EBP ESI EDI
8 is the return address (EIP) */
/* Comments re DW_CFA_set_loc, 16 Nov 06.
JRS:
Someone recently sent me a libcrypto.so.0.9.8 as distributed with
Ubuntu of some flavour, compiled with gcc 4.1.2 on amd64. It
causes V's CF reader to complain a lot:
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:24
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:24
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:24
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:24
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:48
>> --19976-- DWARF2 CFI reader: unhandled CFI instruction 0:24
After chasing this around a bit it seems that the CF bytecode
parser lost sync at a DW_CFA_set_loc, which has a single argument
denoting an address.
As it stands that address is extracted by read_Addr(). On amd64
that just fetches 8 bytes regardless of anything else.
read_encoded_Addr() is more sophisticated. This appears to take
into account some kind of encoding flag. When I replace the uses
of read_Addr by read_encoded_Addr for DW_CFA_set_loc, the
complaints go away, there is no loss of sync, and the parsed CF
instructions are the same as shown by readelf --debug-dump=frames.
So it seems a plausible fix. The problem is I looked in the DWARF3
spec and completely failed to figure out whether or not the arg to
DW_CFA_set_loc is supposed to be encoded in a way suitable for
read_encoded_Addr, nor for that matter any description of what it
is that read_encoded_Addr is really decoding.
TomH:
The problem is that the encoding is not standard - the eh_frame
section uses the same encoding as the dwarf_frame section except
for a few small changes, and this is one of them. So this is not
something the DWARF standard covers.
There is an augmentation string to indicate what is going on though
so that programs can recognise it.
What we are doing seems to match what gdb 6.5 and libdwarf 20060614
do though. I'm not sure about readelf though.
(later): Well dwarfdump barfs on it:
dwarfdump ERROR: dwarf_get_fde_info_for_reg:
DW_DLE_DF_FRAME_DECODING_ERROR(193) (193)
I've looked at binutils as well now, and the code in readelf agrees
with your patch - ie it treats set_loc as having an encoded address
if there is a zR augmentation indicating an encoding.
Quite why gdb and libdwarf don't understand this is an interesting
question...
Final outcome: all uses of read_Addr were replaced by
read_encoded_Addr. A new type AddressDecodingInfo was added to
make it relatively clean to plumb through the extra info needed by
read_encoded_Addr.
*/
/* More badness re address encoding, 12 Jan 07.
Most gcc provided CIEs have a "zR" augmentation, which means they
supply their own address encoding, and that works fine. However,
some icc9 supplied CIEs have no augmentation, which means they use
the default_Addr_encoding(). That says to use a machine-word sized
value, literally unmodified.
Since .so's are, in general, relocated when loaded, having absolute
addresses in the CFI data makes no sense when read_encoded_Addr is
used to find the initial location for a FDE. The resulting saga:
TomH:
> I'm chasing a stack backtrace failure for an amd64 .so which was
> created I believe by icc 9.1. After a while I wound up looking at
> this: (readdwarf.c)
>
> 5083 tom static UChar default_Addr_encoding ( void )
> 3584 tom {
> 3584 tom switch (sizeof(Addr)) {
> 3584 tom case 4: return DW_EH_PE_udata4;
> 3584 tom case 8: return DW_EH_PE_udata8;
> 3584 tom default: vg_assert(0);
> 3584 tom }
> 3584 tom }
>
> If a CIE does not have an "augmentation string" (typically "zR") then
> addresses are decoded as described by default_Addr_encoding. If there
> is an 'R' in the augmentation string then the encoding to use
> is specified by the CIE itself, which works fine with GCC compiled code
> since that always appears to specify zR.
Correct.
> Problem is this .so has no augmentation string and so uses the
> default encoding, viz DW_EH_PE_udata8. That appears to mean
> "read a 64 bit number" and use that as-is (for the starting value
> of the program counter when running the CFA program).
Strictly speaking the default is DW_EH_PE_absptr, but that amounts
to either udata4 or udata8 depending on the platform's pointer size
which is a shortcut I used.
> For this .so that gives nonsense (very small) PCs which are later
> rejected by the sanity check which ensures PC ranges fall inside
> the mapped text segment. It seems like the .so expects to have the
> start VMA of the text segment added on. This would correspond to
>
> static UChar default_Addr_encoding ( void )
> {
> switch (sizeof(Addr)) {
> case 4: return DW_EH_PE_textrel + DW_EH_PE_udata4;
> case 8: return DW_EH_PE_textrel + DW_EH_PE_udata8;
> default: vg_assert(0);
> }
> }
The problem you're seeing is that you have absolute pointers inside
a shared library, which obviously makes little sense on the face of
things as how would the linker know where the library will be
loaded?
The answer of course is that it doesn't, so if it points absolute
pointers in the frame unwind data is has to include relocations for
them, and I'm betting that if you look at the relocations in the
library you will there are some for that data.
That is fine of course when ld.so maps the library - it will
relocate the eh_frame data as it maps it (or prelinking will
already have done so) and when the g++ exception code kicks in and
unwinds the stack it will see relocated data.
We of course are mapping the section from the ELF file ourselves
and are not applying the relocations, hence the problem you are
seeing.
Strictly speaking we should apply the relocations but the cheap
solution is essentially to do what you've done - strictly speaking
you should adjust by the difference between the address the library
was linked for and the address it has been loaded at, but a shared
library will normally be linked for address zero I believe. It's
possible that prelinking might change that though?
JRS:
That all syncs with what I am seeing.
So what I am inclined to do is:
- Leave default_Addr_encoding as it is
- Change read_encoded_Addr's handling of "case DW_EH_PE_absptr" so
it sets base to, as you say, the difference between the address
the library was linked for and the address it has been loaded at
(== the SegInfo's text_bias)
Does that sound sane? I think it should even handle the prelinked
case.
(JRS, later)
Hmm. Plausible as it sounds, it doesn't work. It now produces
bogus backtraces for locations inside the (statically linked)
memcheck executable.
Besides, there are a couple of other places where read_encoded_Addr
is used -- one of which is used to establish the length of the
address range covered by the current FDE:
fde_arange = read_encoded_Addr(&nbytes, &adi, data);
and it doesn't seem to make any sense for read_encoded_Addr to add
on the text segment bias in that context. The DWARF3 spec says
that both the initial_location and address_range (length) fields
are encoded the same way ("target address"), so it is unclear at
what stage in the process it would be appropriate to relocate the
former but not the latter.
One unprincipled kludge that does work is the following: just
before handing one of the address range fragments off to
ML_(addDiCfSI) for permanent storage, check its start address. If
that is very low (less than 2 M), and is far below the mapped text
segment, and adding the text bias would move the fragment entirely
inside the mapped text segment, then do so. A kind of kludged
last-minute relocation, if you like.
12 Jan 07: committing said kludge (see kludge_then_addDiCfSI). If
the situation clarifies, it can easily enough be backed out and
replaced by a better fix.
*/
/* --------------- Decls --------------- */
#if defined(VGP_x86_linux) || defined(VGP_x86_solaris)
# define FP_REG 5
# define SP_REG 4
# define RA_REG_DEFAULT 8
#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
# define FP_REG 6
# define SP_REG 7
# define RA_REG_DEFAULT 16
#elif defined(VGP_ppc32_linux)
# define FP_REG 1
# define SP_REG 1
# define RA_REG_DEFAULT 65
#elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
# define FP_REG 1
# define SP_REG 1
# define RA_REG_DEFAULT 65
#elif defined(VGP_arm_linux)
# define FP_REG 12
# define SP_REG 13
# define RA_REG_DEFAULT 14
#elif defined(VGP_arm64_linux)
# define FP_REG 29
# define SP_REG 31
# define RA_REG_DEFAULT 30
#elif defined(VGP_x86_darwin)
# define FP_REG 5
# define SP_REG 4
# define RA_REG_DEFAULT 8
#elif defined(VGP_amd64_darwin)
# define FP_REG 6
# define SP_REG 7
# define RA_REG_DEFAULT 16
#elif defined(VGP_s390x_linux)
# define FP_REG 11 // sometimes s390 has a frame pointer in r11
# define SP_REG 15 // stack is always r15
# define RA_REG_DEFAULT 14 // the return address is in r14
#elif defined(VGP_mips32_linux)
# define FP_REG 30
# define SP_REG 29
# define RA_REG_DEFAULT 31
#elif defined(VGP_mips64_linux)
# define FP_REG 30
# define SP_REG 29
# define RA_REG_DEFAULT 31
#elif defined(VGP_tilegx_linux)
# define FP_REG 52
# define SP_REG 54
# define RA_REG_DEFAULT 55
#else
# error "Unknown platform"
#endif
/* The number of regs we are prepared to unwind. The number for
arm-linux (320) seems ludicrously high, but the ARM IHI 0040A page
7 (DWARF for the ARM Architecture) specifies that values up to 320
might exist, for Neon/VFP-v3. */
#if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
|| defined(VGP_ppc64le_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux)
# define N_CFI_REGS 72
#elif defined(VGP_arm_linux) || defined(VGP_tilegx_linux)
# define N_CFI_REGS 320
#elif defined(VGP_arm64_linux)
# define N_CFI_REGS 128
#else
# define N_CFI_REGS 20
#endif
/* Instructions for the automaton */
enum dwarf_cfa_primary_ops
{
DW_CFA_use_secondary = 0,
DW_CFA_advance_loc = 1,
DW_CFA_offset = 2,
DW_CFA_restore = 3
};
enum dwarf_cfa_secondary_ops
{
DW_CFA_nop = 0x00,
DW_CFA_set_loc = 0x01,
DW_CFA_advance_loc1 = 0x02,
DW_CFA_advance_loc2 = 0x03,
DW_CFA_advance_loc4 = 0x04,
DW_CFA_offset_extended = 0x05,
DW_CFA_restore_extended = 0x06,
DW_CFA_undefined = 0x07,
DW_CFA_same_value = 0x08,
DW_CFA_register = 0x09,
DW_CFA_remember_state = 0x0a,
DW_CFA_restore_state = 0x0b,
DW_CFA_def_cfa = 0x0c,
DW_CFA_def_cfa_register = 0x0d,
DW_CFA_def_cfa_offset = 0x0e,
DW_CFA_def_cfa_expression = 0x0f, /* DWARF3 only */
DW_CFA_expression = 0x10, /* DWARF3 only */
DW_CFA_offset_extended_sf = 0x11, /* DWARF3 only */
DW_CFA_def_cfa_sf = 0x12, /* DWARF3 only */
DW_CFA_def_cfa_offset_sf = 0x13, /* DWARF3 only */
DW_CFA_val_offset = 0x14, /* DWARF3 only */
DW_CFA_val_offset_sf = 0x15, /* DWARF3 only */
DW_CFA_val_expression = 0x16, /* DWARF3 only */
DW_CFA_lo_user = 0x1c,
DW_CFA_GNU_window_save = 0x2d, /* GNU extension */
DW_CFA_GNU_args_size = 0x2e, /* GNU extension */
DW_CFA_GNU_negative_offset_extended = 0x2f, /* GNU extension */
DW_CFA_ORCL_arg_loc = 0x30, /* Oracle extension */
DW_CFA_hi_user = 0x3f
};
#define DW_EH_PE_absptr 0x00
#define DW_EH_PE_omit 0xff
#define DW_EH_PE_uleb128 0x01
#define DW_EH_PE_udata2 0x02
#define DW_EH_PE_udata4 0x03
#define DW_EH_PE_udata8 0x04
#define DW_EH_PE_sleb128 0x09
#define DW_EH_PE_sdata2 0x0A
#define DW_EH_PE_sdata4 0x0B
#define DW_EH_PE_sdata8 0x0C
#define DW_EH_PE_signed 0x08
#define DW_EH_PE_pcrel 0x10
#define DW_EH_PE_textrel 0x20
#define DW_EH_PE_datarel 0x30
#define DW_EH_PE_funcrel 0x40
#define DW_EH_PE_aligned 0x50
#define DW_EH_PE_indirect 0x80
/* RegRule and UnwindContext are used temporarily to do the unwinding.
The result is then summarised into a sequence of CfiSIs, if
possible. UnwindContext effectively holds the state of the
abstract machine whilst it is running.
The CFA can either be a signed offset from a register,
or an expression:
CFA = cfa_reg + cfa_off when UnwindContext.cfa_is_regoff==True
| [[ cfa_expr_id ]]
When .cfa_is_regoff == True, cfa_expr_id must be zero
When .cfa_is_regoff == False, cfa_reg must be zero
and cfa_off must be zero
RegRule describes, for each register, how to get its
value in the previous frame, where 'cfa' denotes the cfa
for the frame as a whole:
RegRule = RR_Undef -- undefined
| RR_Same -- same as in previous frame
| RR_CFAOff arg -- is at * ( cfa + arg )
| RR_CFAValOff arg -- is ( cfa + arg )
| RR_Reg arg -- is in register 'arg'
| RR_Expr arg -- is at * [[ arg ]]
| RR_ValExpr arg -- is [[ arg ]]
| RR_Arch -- dunno
Note that RR_Expr is redundant since the same can be represented
using RR_ValExpr with an explicit dereference (CfiExpr_Deref) at
the outermost level.
All expressions are stored in exprs in the containing
UnwindContext. Since the UnwindContext gets reinitialised for each
new FDE, summarise_context needs to copy out any expressions it
wants to keep into the cfsi_exprs field of the containing SegInfo.
*/
typedef
struct {
enum { RR_Undef, RR_Same, RR_CFAOff, RR_CFAValOff,
RR_Reg, /*RR_Expr,*/ RR_ValExpr, RR_Arch } tag;
/* meaning: int offset for CFAoff/CFAValOff
reg # for Reg
expr index for Expr/ValExpr */
Int arg;
}
RegRule;
static void ppRegRule ( const XArray* exprs, const RegRule* rrule )
{
vg_assert(exprs);
switch (rrule->tag) {
case RR_Undef: VG_(printf)("u "); break;
case RR_Same: VG_(printf)("s "); break;
case RR_CFAOff: VG_(printf)("c%d ", rrule->arg); break;
case RR_CFAValOff: VG_(printf)("v%d ", rrule->arg); break;
case RR_Reg: VG_(printf)("r%d ", rrule->arg); break;
case RR_ValExpr: VG_(printf)("ve{");
ML_(ppCfiExpr)( exprs, rrule->arg );
VG_(printf)("} ");
break;
case RR_Arch: VG_(printf)("a "); break;
default: VG_(core_panic)("ppRegRule");
}
}
/* Size of the stack of register unwind rules. This is only
exceedingly rarely used, so a stack of size 1 should actually work
with almost all compiler-generated CFA. */
#define N_RR_STACK 4
typedef
struct {
/* Read-only fields (set by the CIE) */
Int code_a_f;
Int data_a_f;
Addr initloc;
Int ra_reg;
/* The rest of these fields can be modifed by
run_CF_instruction. */
/* The LOC entry */
Addr loc;
/* We need a stack of these in order to handle
DW_CFA_{remember,restore}_state. */
struct UnwindContextState {
/* The CFA entry. This can be either reg+/-offset or an expr. */
Bool cfa_is_regoff; /* True=>is reg+offset; False=>is expr */
Int cfa_reg;
Int cfa_off; /* in bytes */
Int cfa_expr_ix; /* index into cfa_exprs */
/* Register unwind rules. */
RegRule reg[N_CFI_REGS];
}
state[N_RR_STACK];
Int state_sp; /* 0 <= state_sp < N_RR_STACK; points at the
currently-in-use rule set. */
/* array of CfiExpr, shared by reg[] and cfa_expr_ix */
XArray* exprs;
}
UnwindContext;
static void ppUnwindContext ( const UnwindContext* ctx )
{
Int j, i;
VG_(printf)("0x%llx: ", (ULong)ctx->loc);
for (j = 0; j <= ctx->state_sp; j++) {
const struct UnwindContextState* ctxs = &ctx->state[j];
VG_(printf)("%s[%d]={ ", j > 0 ? " " : "", j);
if (ctxs->cfa_is_regoff) {
VG_(printf)("%d(r%d) ", ctxs->cfa_off, ctxs->cfa_reg);
} else {
vg_assert(ctx->exprs);
VG_(printf)("{");
ML_(ppCfiExpr)( ctx->exprs, ctxs->cfa_expr_ix );
VG_(printf)("} ");
}
VG_(printf)("{ ");
for (i = 0; i < N_CFI_REGS; i++)
ppRegRule(ctx->exprs, &ctxs->reg[i]);
VG_(printf)("}");
}
VG_(printf)("\n");
}
static void initUnwindContext ( /*OUT*/UnwindContext* ctx )
{
Int j, i;
VG_(memset)(ctx, 0, sizeof(*ctx));
/* ctx->code_a_f = 0;
ctx->data_a_f = 0;
ctx->initloc = 0; */
ctx->ra_reg = RA_REG_DEFAULT;
/* ctx->loc = 0;
ctx->exprs = NULL;
ctx->state_sp = 0; */
for (j = 0; j < N_RR_STACK; j++) {
ctx->state[j].cfa_is_regoff = True;
/* ctx->state[j].cfa_reg = 0;
ctx->state[j].cfa_off = 0;
ctx->state[j].cfa_expr_ix = 0; */
for (i = 0; i < N_CFI_REGS; i++) {
if (RR_Undef != 0)
ctx->state[j].reg[i].tag = RR_Undef;
/* ctx->state[j].reg[i].arg = 0; */
}
# if defined(VGA_arm)
/* All callee-saved registers (or at least the ones we are
summarising for) should start out as RR_Same, on ARM. */
ctx->state[j].reg[11].tag = RR_Same;
/* ctx->state[j].reg[13].tag = RR_Same; */
ctx->state[j].reg[14].tag = RR_Same;
ctx->state[j].reg[12].tag = RR_Same;
ctx->state[j].reg[7].tag = RR_Same;
/* this can't be right though: R12 (IP) isn't callee saved. */
# elif defined(VGA_arm64)
/* Callee-saved registers (that we are interested in) should
start out as RR_Same. */
ctx->state[j].reg[29/*FP*/].tag = RR_Same;
ctx->state[j].reg[30/*LR*/].tag = RR_Same;
# endif
}
}
/* A structure which holds information needed by read_encoded_Addr().
*/
typedef
struct {
UChar encoding;
DiCursor ehframe_image;
Addr ehframe_avma;
Addr text_bias;
Addr got_avma;
}
AddressDecodingInfo;
/* ------------ Deal with summary-info records ------------ */
/* --------------- Summarisation --------------- */
/* Forward */
static
Int copy_convert_CfiExpr_tree ( XArray* dst, const UnwindContext* srcuc,
Int nd );
/* Summarise ctx into si, if possible. Returns True if successful.
This is taken to be just after ctx's loc advances; hence the
summary is up to but not including the current loc. This works
on both x86 and amd64.
*/
static Bool summarise_context(/*OUT*/Addr* base,
/*OUT*/UInt* len,
/*OUT*/DiCfSI_m* si_m,
Addr loc_start,
const UnwindContext* ctx,
DebugInfo* debuginfo )
{
Int why = 0;
const struct UnwindContextState* ctxs;
*base = 0;
*len = 0;
VG_(bzero_inline)(si_m, sizeof(*si_m));
/* Guard against obviously stupid settings of the reg-rule stack
pointer. */
if (ctx->state_sp < 0) { why = 8; goto failed; }
if (ctx->state_sp >= N_RR_STACK) { why = 9; goto failed; }
ctxs = &ctx->state[ctx->state_sp];
/* First, summarise the method for generating the CFA */
if (!ctxs->cfa_is_regoff) {
/* it was set by DW_CFA_def_cfa_expression; try to convert */
XArray *src, *dst;
Int conv;
src = ctx->exprs;
dst = debuginfo->cfsi_exprs;
if (src && (VG_(sizeXA)(src) > 0) && (!dst)) {
dst = VG_(newXA)( ML_(dinfo_zalloc), "di.ccCt.1", ML_(dinfo_free),
sizeof(CfiExpr) );
debuginfo->cfsi_exprs = dst;
}
conv = copy_convert_CfiExpr_tree
( dst, ctx, ctxs->cfa_expr_ix );
vg_assert(conv >= -1);
if (conv == -1) { why = 6; goto failed; }
si_m->cfa_how = CFIC_EXPR;
si_m->cfa_off = conv;
if (0 && debuginfo->ddump_frames)
ML_(ppCfiExpr)(dst, conv);
}
else
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == SP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
|| defined(VGA_mips32) || defined(VGA_mips64) \
|| defined(VGA_tilegx)
si_m->cfa_how = CFIC_IA_SPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R13REL;
# elif defined(VGA_arm64)
si_m->cfa_how = CFIC_ARM64_SPREL;
# else
si_m->cfa_how = 0; /* invalid */
# endif
}
else
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == FP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
|| defined(VGA_mips32) || defined(VGA_mips64) \
|| defined(VGA_tilegx)
si_m->cfa_how = CFIC_IA_BPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R12REL;
# elif defined(VGA_arm64)
si_m->cfa_how = CFIC_ARM64_X29REL;
# else
si_m->cfa_how = 0; /* invalid */
# endif
}
# if defined(VGA_arm)
else
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == 11/*??_REG*/) {
si_m->cfa_how = CFIC_ARM_R11REL;
si_m->cfa_off = ctxs->cfa_off;
}
else
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == 7/*??_REG*/) {
si_m->cfa_how = CFIC_ARM_R7REL;
si_m->cfa_off = ctxs->cfa_off;
}
# elif defined(VGA_arm64)
// do we need any arm64 specifics here?
# endif
else {
why = 1;
goto failed;
}
# define SUMMARISE_HOW(_how, _off, _ctxreg) \
switch (_ctxreg.tag) { \
case RR_Undef: \
_how = CFIR_UNKNOWN; _off = 0; break; \
case RR_Same: \
_how = CFIR_SAME; _off = 0; break; \
case RR_CFAOff: \
_how = CFIR_MEMCFAREL; _off = _ctxreg.arg; break; \
case RR_CFAValOff: \
_how = CFIR_CFAREL; _off = _ctxreg.arg; break; \
case RR_ValExpr: { \
XArray *src, *dst; \
Int conv; \
src = ctx->exprs; \
dst = debuginfo->cfsi_exprs; \
if (src && (VG_(sizeXA)(src) > 0) && (!dst)) { \
dst = VG_(newXA)( ML_(dinfo_zalloc), \
"di.ccCt.2", \
ML_(dinfo_free), \
sizeof(CfiExpr) ); \
debuginfo->cfsi_exprs = dst; \
} \
conv = copy_convert_CfiExpr_tree \
( dst, ctx, _ctxreg.arg ); \
vg_assert(conv >= -1); \
if (conv == -1) { why = 7; goto failed; } \
_how = CFIR_EXPR; \
_off = conv; \
if (0 && debuginfo->ddump_frames) \
ML_(ppCfiExpr)(dst, conv); \
break; \
} \
default: \
why = 2; goto failed; /* otherwise give up */ \
}
# if defined(VGA_x86) || defined(VGA_amd64)
/* --- entire tail of this fn specialised for x86/amd64 --- */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off,
ctxs->reg[ctx->ra_reg] );
SUMMARISE_HOW(si_m->bp_how, si_m->bp_off,
ctxs->reg[FP_REG] );
/* on x86/amd64, it seems the old %{e,r}sp value before the call is
always the same as the CFA. Therefore ... */
si_m->sp_how = CFIR_CFAREL;
si_m->sp_off = 0;
/* also, gcc says "Undef" for %{e,r}bp when it is unchanged. So
.. */
if (ctxs->reg[FP_REG].tag == RR_Undef)
si_m->bp_how = CFIR_SAME;
/* knock out some obviously stupid cases */
if (si_m->ra_how == CFIR_SAME)
{ why = 3; goto failed; }
/* bogus looking range? Note, we require that the difference is
representable in 32 bits. */
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_arm)
/* ---- entire tail of this fn specialised for arm ---- */
SUMMARISE_HOW(si_m->r14_how, si_m->r14_off,
ctxs->reg[14] );
//SUMMARISE_HOW(si_m->r13_how, si_m->r13_off,
// ctxs->reg[13] );
SUMMARISE_HOW(si_m->r12_how, si_m->r12_off,
ctxs->reg[FP_REG] );
SUMMARISE_HOW(si_m->r11_how, si_m->r11_off,
ctxs->reg[11/*FP_REG*/] );
SUMMARISE_HOW(si_m->r7_how, si_m->r7_off,
ctxs->reg[7] );
if (ctxs->reg[14/*LR*/].tag == RR_Same
&& ctx->ra_reg == 14/*as we expect it always to be*/) {
/* Generate a trivial CfiExpr, which merely says "r14". First
ensure this DebugInfo has a cfsi_expr array in which to park
it. */
if (!debuginfo->cfsi_exprs)
debuginfo->cfsi_exprs = VG_(newXA)( ML_(dinfo_zalloc),
"di.ccCt.2a",
ML_(dinfo_free),
sizeof(CfiExpr) );
si_m->ra_off = ML_(CfiExpr_CfiReg)( debuginfo->cfsi_exprs,
Creg_ARM_R14);
si_m->ra_how = CFIR_EXPR;
} else {
/* Just summarise it in the normal way */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off,
ctxs->reg[ctx->ra_reg] );
}
/* on arm, it seems the old r13 (SP) value before the call is
always the same as the CFA. Therefore ... */
si_m->r13_how = CFIR_CFAREL;
si_m->r13_off = 0;
/* bogus looking range? Note, we require that the difference is
representable in 32 bits. */
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_arm64)
/* --- entire tail of this fn specialised for arm64 --- */
SUMMARISE_HOW(si_m->x30_how, si_m->x30_off, ctxs->reg[30/*LR*/]);
SUMMARISE_HOW(si_m->x29_how, si_m->x29_off, ctxs->reg[29/*FP*/]);
if (ctxs->reg[30/*LR*/].tag == RR_Same
&& ctx->ra_reg == 30/*as we expect it always to be*/) {
/* Generate a trivial CfiExpr, which merely says "x30". First
ensure this DebugInfo has a cfsi_expr array in which to park
it. */
if (!debuginfo->cfsi_exprs)
debuginfo->cfsi_exprs = VG_(newXA)( ML_(dinfo_zalloc),
"di.ccCt.2a-arm64",
ML_(dinfo_free),
sizeof(CfiExpr) );
si_m->ra_off = ML_(CfiExpr_CfiReg)( debuginfo->cfsi_exprs,
Creg_ARM64_X30);
si_m->ra_how = CFIR_EXPR;
} else {
/* Just summarise it in the normal way */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off, ctxs->reg[ctx->ra_reg]);
}
/* on arm64, it seems the old SP value before the call is always
the same as the CFA. Therefore ... */
si_m->sp_how = CFIR_CFAREL;
si_m->sp_off = 0;
/* bogus looking range? Note, we require that the difference is
representable in 32 bits. */
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_s390x)
/* --- entire tail of this fn specialised for s390 --- */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off,
ctxs->reg[ctx->ra_reg] );
SUMMARISE_HOW(si_m->fp_how, si_m->fp_off,
ctxs->reg[FP_REG] );
SUMMARISE_HOW(si_m->sp_how, si_m->sp_off,
ctxs->reg[SP_REG] );
/* change some defaults to consumable values */
if (si_m->sp_how == CFIR_UNKNOWN)
si_m->sp_how = CFIR_SAME;
if (si_m->fp_how == CFIR_UNKNOWN)
si_m->fp_how = CFIR_SAME;
if (si_m->cfa_how == CFIR_UNKNOWN) {
si_m->cfa_how = CFIC_IA_SPREL;
si_m->cfa_off = 160;
}
if (si_m->ra_how == CFIR_UNKNOWN) {
if (!debuginfo->cfsi_exprs)
debuginfo->cfsi_exprs = VG_(newXA)( ML_(dinfo_zalloc),
"di.ccCt.2a",
ML_(dinfo_free),
sizeof(CfiExpr) );
si_m->ra_how = CFIR_EXPR;
si_m->ra_off = ML_(CfiExpr_CfiReg)( debuginfo->cfsi_exprs,
Creg_S390_LR);
}
/* knock out some obviously stupid cases */
if (si_m->ra_how == CFIR_SAME)
{ why = 3; goto failed; }
/* bogus looking range? Note, we require that the difference is
representable in 32 bits. */
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_mips32) || defined(VGA_mips64)
/* --- entire tail of this fn specialised for mips --- */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off,
ctxs->reg[ctx->ra_reg] );
SUMMARISE_HOW(si_m->fp_how, si_m->fp_off,
ctxs->reg[FP_REG] );
SUMMARISE_HOW(si_m->sp_how, si_m->sp_off,
ctxs->reg[SP_REG] );
si_m->sp_how = CFIR_CFAREL;
si_m->sp_off = 0;
if (si_m->fp_how == CFIR_UNKNOWN)
si_m->fp_how = CFIR_SAME;
if (si_m->cfa_how == CFIR_UNKNOWN) {
si_m->cfa_how = CFIC_IA_SPREL;
si_m->cfa_off = 160;
}
if (si_m->ra_how == CFIR_UNKNOWN) {
if (!debuginfo->cfsi_exprs)
debuginfo->cfsi_exprs = VG_(newXA)( ML_(dinfo_zalloc),
"di.ccCt.2a",
ML_(dinfo_free),
sizeof(CfiExpr) );
si_m->ra_how = CFIR_EXPR;
si_m->ra_off = ML_(CfiExpr_CfiReg)( debuginfo->cfsi_exprs,
Creg_MIPS_RA);
}
if (si_m->ra_how == CFIR_SAME)
{ why = 3; goto failed; }
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_tilegx)
/* --- entire tail of this fn specialised for tilegx --- */
SUMMARISE_HOW(si_m->ra_how, si_m->ra_off,
ctxs->reg[ctx->ra_reg] );
SUMMARISE_HOW(si_m->fp_how, si_m->fp_off,
ctxs->reg[FP_REG] );
SUMMARISE_HOW(si_m->sp_how, si_m->sp_off,
ctxs->reg[SP_REG] );
si_m->sp_how = CFIR_CFAREL;
si_m->sp_off = 0;
if (si_m->fp_how == CFIR_UNKNOWN)
si_m->fp_how = CFIR_SAME;
if (si_m->cfa_how == CFIR_UNKNOWN) {
si_m->cfa_how = CFIC_IA_SPREL;
si_m->cfa_off = 160;
}
if (si_m->ra_how == CFIR_UNKNOWN) {
if (!debuginfo->cfsi_exprs)
debuginfo->cfsi_exprs = VG_(newXA)( ML_(dinfo_zalloc),
"di.ccCt.2a",
ML_(dinfo_free),
sizeof(CfiExpr) );
si_m->ra_how = CFIR_EXPR;
si_m->ra_off = ML_(CfiExpr_CfiReg)( debuginfo->cfsi_exprs,
Creg_TILEGX_LR);
}
if (si_m->ra_how == CFIR_SAME)
{ why = 3; goto failed; }
if (loc_start >= ctx->loc)
{ why = 4; goto failed; }
if (ctx->loc - loc_start > 10000000 /* let's say */)
{ why = 5; goto failed; }
*base = loc_start + ctx->initloc;
*len = (UInt)(ctx->loc - loc_start);
return True;
# elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
/* These don't use CFI based unwinding (is that really true?) */
# else
# error "Unknown arch"
# endif
/* --- non-specialised code after this point --- */
# undef SUMMARISE_HOW
failed:
if (VG_(clo_verbosity) > 2 || debuginfo->trace_cfi) {
VG_(message)(Vg_DebugMsg,
"summarise_context(loc_start = %#lx)"
": cannot summarise(why=%d): \n", loc_start, why);
ppUnwindContext(ctx);
}
return False;
}
/* Copy the tree rooted at srcuc->exprs node srcix to dstxa, on the
way converting any DwReg regs (regs numbered using the Dwarf scheme
defined by each architecture's ABI) into CfiRegs, which are
platform independent. If the conversion isn't possible because
there is no equivalent register, return -1. This has the
undesirable side effect of de-dagifying the input; oh well. */
static Int copy_convert_CfiExpr_tree ( XArray* dstxa,
const UnwindContext* srcuc,
Int srcix )
{
CfiExpr* src;
Int cpL, cpR, cpA;
XArray* srcxa = srcuc->exprs;
vg_assert(srcxa);
vg_assert(dstxa);
vg_assert(srcix >= 0 && srcix < VG_(sizeXA)(srcxa));
src = VG_(indexXA)( srcxa, srcix );
switch (src->tag) {
case Cex_Undef:
return ML_(CfiExpr_Undef)( dstxa );
case Cex_Deref:
cpA = copy_convert_CfiExpr_tree( dstxa, srcuc, src->Cex.Deref.ixAddr );
if (cpA == -1)
return -1; /* propagate failure */
return ML_(CfiExpr_Deref)( dstxa, cpA );
case Cex_Const:
return ML_(CfiExpr_Const)( dstxa, src->Cex.Const.con );
case Cex_Binop:
cpL = copy_convert_CfiExpr_tree( dstxa, srcuc, src->Cex.Binop.ixL );
cpR = copy_convert_CfiExpr_tree( dstxa, srcuc, src->Cex.Binop.ixR );
vg_assert(cpL >= -1 && cpR >= -1);
if (cpL == -1 || cpR == -1)
return -1; /* propagate failure */
return ML_(CfiExpr_Binop)( dstxa, src->Cex.Binop.op, cpL, cpR );
case Cex_CfiReg:
/* should not see these in input (are created only by this
conversion step!) */
VG_(core_panic)("copy_convert_CfiExpr_tree: CfiReg in input");
case Cex_DwReg: {
/* This is the only place where the conversion can fail. */
Int dwreg __attribute__((unused));
dwreg = src->Cex.DwReg.reg;
# if defined(VGA_x86) || defined(VGA_amd64)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_SP );
if (dwreg == FP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_BP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_IP ); /* correct? */
# elif defined(VGA_arm)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_ARM_R13 );
if (dwreg == FP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_ARM_R12 );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_ARM_R15 ); /* correct? */
# elif defined(VGA_s390x)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_S390_SP );
if (dwreg == FP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_S390_FP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_S390_IA );
# elif defined(VGA_mips32) || defined(VGA_mips64)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_SP );
if (dwreg == FP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_BP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_IP );
# elif defined(VGA_arm64)
I_die_here;
# elif defined(VGA_ppc32) || defined(VGA_ppc64be) \
|| defined(VGA_ppc64le)
# elif defined(VGA_tilegx)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_TILEGX_SP );
if (dwreg == FP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_TILEGX_BP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_TILEGX_IP );
# else
# error "Unknown arch"
# endif
/* else we must fail - can't represent the reg */
return -1;
}
default:
VG_(core_panic)("copy_convert_CfiExpr_tree: default");
}
}
static void ppUnwindContext_summary ( const UnwindContext* ctx )
{
const struct UnwindContextState* ctxs = &ctx->state[ctx->state_sp];
VG_(printf)("0x%llx-1: ", (ULong)ctx->loc);
if (ctxs->cfa_reg == SP_REG) {
VG_(printf)("SP/CFA=%d+SP ", ctxs->cfa_off);
} else
if (ctxs->cfa_reg == FP_REG) {
VG_(printf)("SP/CFA=%d+FP ", ctxs->cfa_off);
} else {
VG_(printf)("SP/CFA=unknown ");
}
VG_(printf)("RA=");
ppRegRule( ctx->exprs, &ctxs->reg[ctx->ra_reg] );
VG_(printf)("FP=");
ppRegRule( ctx->exprs, &ctxs->reg[FP_REG] );
VG_(printf)("\n");
}
/* ------------ Pick apart DWARF2 byte streams ------------ */
static ULong step_le_u_encoded_literal ( DiCursor* data, UInt size )
{
switch (size) {
case 8: return (ULong)ML_(cur_step_ULong)( data );
case 4: return (ULong)ML_(cur_step_UInt)( data );
case 2: return (ULong)ML_(cur_step_UShort)( data );
case 1: return (ULong)ML_(cur_step_UChar)( data );
default: vg_assert(0); /*NOTREACHED*/ return 0;
}
}
static Long step_le_s_encoded_literal ( DiCursor* data, UInt size )
{
ULong u64 = step_le_u_encoded_literal( data, size );
Long s64;
switch (size) {
case 8: s64 = u64; break;
case 4: s64 = u64 << 32; s64 >>= 32; break;
case 2: s64 = u64 << 48; s64 >>= 48; break;
case 1: s64 = u64 << 56; s64 >>= 56; break;
default: vg_assert(0); /*NOTREACHED*/ return 0;
}
return s64;
}
static UChar default_Addr_encoding ( void )
{
switch (sizeof(Addr)) {
case 4: return DW_EH_PE_udata4;
case 8: return DW_EH_PE_udata8;
default: vg_assert(0);
}
}
static UInt size_of_encoded_Addr ( UChar encoding )
{
if (encoding == DW_EH_PE_omit)
return 0;
switch (encoding & 0x07) {
case DW_EH_PE_absptr: return sizeof(Addr);
case DW_EH_PE_udata2: return sizeof(UShort);
case DW_EH_PE_udata4: return sizeof(UInt);
case DW_EH_PE_udata8: return sizeof(ULong);
default: vg_assert(0);
}
}
static Addr step_encoded_Addr ( const AddressDecodingInfo* adi,
/*MOD*/DiCursor* data )
{
/* Regarding the handling of DW_EH_PE_absptr. DWARF3 says this
denotes an absolute address, hence you would think 'base' is
zero. However, that is nonsensical (unless relocations are to
be applied to the unwind data before reading it, which sounds
unlikely). My interpretation is that DW_EH_PE_absptr indicates
an address relative to where the object was loaded (technically,
relative to its stated load VMA, hence the use of text_bias
rather than text_avma). Hmm, should we use text_bias or
text_avma here? Not sure.
This view appears to be supported by DWARF3 spec sec 7.3
"Executable Objects and Shared Objects":
This requirement makes the debugging information for shared
objects position independent. Virtual addresses in a shared
object may be calculated by adding the offset to the base
address at which the object was attached. This offset is
available in the run-time linker's data structures.
*/
Addr base;
Word offset;
UChar encoding = adi->encoding;
DiCursor ehframe_image = adi->ehframe_image;
Addr ehframe_avma = adi->ehframe_avma;
Addr got_avma = adi->got_avma;
vg_assert((encoding & DW_EH_PE_indirect) == 0);
switch (encoding & 0x70) {
case DW_EH_PE_absptr:
base = adi->text_bias;
break;
case DW_EH_PE_pcrel:
base = ehframe_avma + ML_(cur_minus)(*data, ehframe_image);
break;
case DW_EH_PE_datarel:
base = got_avma;
break;
case DW_EH_PE_textrel:
vg_assert(0);
base = /* text base address */ 0;
break;
case DW_EH_PE_funcrel:
base = 0;
break;
case DW_EH_PE_aligned:
base = 0;
offset = ML_(cur_minus)(*data, ehframe_image);
if ((offset % sizeof(Addr)) != 0) {
Word nbytes = sizeof(Addr) - (offset % sizeof(Addr));
*data = ML_(cur_plus)(*data, nbytes);
}
break;
default:
vg_assert(0);
}
if ((encoding & 0x07) == 0x00)
encoding |= default_Addr_encoding();
switch (encoding & 0x0f) {
case DW_EH_PE_udata2:
return base + ML_(cur_step_UShort)(data);
case DW_EH_PE_udata4:
return base + ML_(cur_step_UInt)(data);
case DW_EH_PE_udata8:
return base + ML_(cur_step_ULong)(data);
case DW_EH_PE_sdata2:
return base + ML_(cur_step_Short)(data);
case DW_EH_PE_sdata4:
return base + ML_(cur_step_Int)(data);
case DW_EH_PE_sdata8:
return base + ML_(cur_step_Long)(data);
default:
vg_assert2(0, "read encoded address %d\n", encoding & 0x0f);
}
}
/* ------------ Run/show DWARF3 expressions ---------- */
/* Convert the DWARF3 expression in expr[0 .. exprlen-1] into a dag
(of CfiExprs) stored in ctx->exprs, and return the index in
ctx->exprs of the root node. Or fail in which case return -1. */
/* IMPORTANT: when adding expression forms here, also remember to
add suitable evaluation code in evalCfiExpr in debuginfo.c. */
static Int dwarfexpr_to_dag ( const UnwindContext* ctx,
DiCursor expr, Int exprlen,
Bool push_cfa_at_start,
Bool ddump_frames )
{
# define N_EXPR_STACK 20
# define PUSH(_arg) \
do { \
vg_assert(sp >= -1 && sp < N_EXPR_STACK); \
if (sp == N_EXPR_STACK-1) \
return -1; \
sp++; \
stack[sp] = (_arg); \
} while (0)
# define POP(_lval) \
do { \
vg_assert(sp >= -1 && sp < N_EXPR_STACK); \
if (sp == -1) \
return -1; \
_lval = stack[sp]; \
sp--; \
} while (0)
Int ix, ix2, reg;
UChar opcode;
Word sw;
UWord uw;
CfiUnop uop;
CfiBinop bop;
const HChar* opname;
Int sp; /* # of top element: valid is -1 .. N_EXPR_STACK-1 */
Int stack[N_EXPR_STACK]; /* indices into ctx->exprs */
const struct UnwindContextState* ctxs = &ctx->state[ctx->state_sp];
XArray* dst = ctx->exprs;
DiCursor limit = ML_(cur_plus)(expr, exprlen);
vg_assert(dst);
vg_assert(exprlen >= 0);
sp = -1; /* empty */
/* Synthesise the CFA as a CfiExpr */
if (push_cfa_at_start) {
if (ctxs->cfa_is_regoff) {
/* cfa is reg +/- offset */
ix = ML_(CfiExpr_Binop)( dst,
Cbinop_Add,
ML_(CfiExpr_DwReg)( dst, ctxs->cfa_reg ),
ML_(CfiExpr_Const)( dst, (UWord)(Word)ctxs->cfa_off )
);
PUSH(ix);
} else {
/* CFA is already an expr; use its root node */
PUSH(ctxs->cfa_expr_ix);
}
}
while (True) {
vg_assert(sp >= -1 && sp < N_EXPR_STACK);
if (ML_(cur_cmpGT)(expr, limit)) /* "expr > limit" */
return -1; /* overrun - something's wrong */
if (ML_(cur_cmpEQ)(expr, limit)) { /* "expr == limit" */
/* end of expr - return expr on the top of stack. */
if (sp == -1)
return -1; /* stack empty. Bad. */
else
break;
}
uop = 0; bop = 0; opname = NULL; /* excessively conservative */
opcode = ML_(cur_step_UChar)(&expr);
switch (opcode) {
case DW_OP_lit0 ... DW_OP_lit31:
/* push: literal 0 .. 31 */
sw = (Word)opcode - (Word)DW_OP_lit0;
vg_assert(sw >= 0 && sw <= 31);
PUSH( ML_(CfiExpr_Const)( dst, (UWord)sw ) );
if (ddump_frames)
VG_(printf)("DW_OP_lit%ld", sw);
break;
case DW_OP_breg0 ... DW_OP_breg31:
/* push: reg + sleb128 */
reg = (Int)opcode - (Int)DW_OP_breg0;
vg_assert(reg >= 0 && reg <= 31);
sw = step_leb128S( &expr );
ix = ML_(CfiExpr_Binop)( dst,
Cbinop_Add,
ML_(CfiExpr_DwReg)( dst, reg ),
ML_(CfiExpr_Const)( dst, (UWord)sw )
);
PUSH(ix);
if (ddump_frames)
VG_(printf)("DW_OP_breg%d: %ld", reg, sw);
break;
case DW_OP_reg0 ... DW_OP_reg31:
/* push: reg */
reg = (Int)opcode - (Int)DW_OP_reg0;
vg_assert(reg >= 0 && reg <= 31);
ix = ML_(CfiExpr_DwReg)( dst, reg );
PUSH(ix);
if (ddump_frames)
VG_(printf)("DW_OP_reg%d", reg);
break;
case DW_OP_plus_uconst:
uw = step_leb128U( &expr );
PUSH( ML_(CfiExpr_Const)( dst, uw ) );
POP( ix );
POP( ix2 );
PUSH( ML_(CfiExpr_Binop)( dst, Cbinop_Add, ix2, ix ) );
if (ddump_frames)
VG_(printf)("DW_OP_plus_uconst: %lu", uw);
break;
case DW_OP_const4s:
/* push: 32-bit signed immediate */
sw = step_le_s_encoded_literal( &expr, 4 );
PUSH( ML_(CfiExpr_Const)( dst, (UWord)sw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const4s: %ld", sw);
break;
case DW_OP_const2s:
/* push: 16-bit signed immediate */
sw = step_le_s_encoded_literal( &expr, 2 );
PUSH( ML_(CfiExpr_Const)( dst, (UWord)sw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const2s: %ld", sw);
break;
case DW_OP_const1s:
/* push: 8-bit signed immediate */
sw = step_le_s_encoded_literal( &expr, 1 );
PUSH( ML_(CfiExpr_Const)( dst, (UWord)sw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const1s: %ld", sw);
break;
case DW_OP_const1u:
/* push: 8-bit unsigned immediate */
uw = step_le_u_encoded_literal( &expr, 1 );
PUSH( ML_(CfiExpr_Const)( dst, uw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const1: %lu", uw);
break;
case DW_OP_const2u:
/* push: 16-bit unsigned immediate */
uw = step_le_u_encoded_literal( &expr, 2 );
PUSH( ML_(CfiExpr_Const)( dst, uw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const2: %lu", uw);
break;
case DW_OP_const4u:
/* push: 32-bit unsigned immediate */
uw = step_le_u_encoded_literal( &expr, 4 );
PUSH( ML_(CfiExpr_Const)( dst, uw ) );
if (ddump_frames)
VG_(printf)("DW_OP_const4: %lu", uw);
break;
case DW_OP_abs:
uop = Cunop_Abs; opname = "abs"; goto unop;
case DW_OP_neg:
uop = Cunop_Neg; opname = "neg"; goto unop;
case DW_OP_not:
uop = Cunop_Not; opname = "not"; goto unop;
unop:
POP( ix );
PUSH( ML_(CfiExpr_Unop)( dst, uop, ix ) );
if (ddump_frames)
VG_(printf)("DW_OP_%s", opname);
break;
case DW_OP_minus:
bop = Cbinop_Sub; opname = "minus"; goto binop;
case DW_OP_plus:
bop = Cbinop_Add; opname = "plus"; goto binop;
case DW_OP_and:
bop = Cbinop_And; opname = "and"; goto binop;
case DW_OP_mul:
bop = Cbinop_Mul; opname = "mul"; goto binop;
case DW_OP_shl:
bop = Cbinop_Shl; opname = "shl"; goto binop;
case DW_OP_shr:
bop = Cbinop_Shr; opname = "shr"; goto binop;
case DW_OP_eq:
bop = Cbinop_Eq; opname = "eq"; goto binop;
case DW_OP_ge:
bop = Cbinop_Ge; opname = "ge"; goto binop;
case DW_OP_gt:
bop = Cbinop_Gt; opname = "gt"; goto binop;
case DW_OP_le:
bop = Cbinop_Le; opname = "le"; goto binop;
case DW_OP_lt:
bop = Cbinop_Lt; opname = "lt"; goto binop;
case DW_OP_ne:
bop = Cbinop_Ne; opname = "ne"; goto binop;
binop:
POP( ix );
POP( ix2 );
PUSH( ML_(CfiExpr_Binop)( dst, bop, ix2, ix ) );
if (ddump_frames)
VG_(printf)("DW_OP_%s", opname);
break;
case DW_OP_deref:
POP( ix );
PUSH( ML_(CfiExpr_Deref)( dst, ix ) );
if (ddump_frames)
VG_(printf)("DW_OP_deref");
break;
default:
if (!VG_(clo_xml))
VG_(message)(Vg_DebugMsg,
"Warning: DWARF2 CFI reader: unhandled DW_OP_ "
"opcode 0x%x\n", (Int)opcode);
return -1;
}
if (ML_(cur_cmpLT)(expr, limit) && ddump_frames)
VG_(printf)("; ");
}
vg_assert(sp >= -1 && sp < N_EXPR_STACK);
if (sp == -1)
return -1;
if (0 && ddump_frames)
ML_(ppCfiExpr)( dst, stack[sp] );
return stack[sp];
# undef POP
# undef PUSH
# undef N_EXPR_STACK
}
/* ------------ Run/show CFI instructions ------------ */
/* Run a CFI instruction, and also return its length.
Returns 0 if the instruction could not be executed.
*/
static Int run_CF_instruction ( /*MOD*/UnwindContext* ctx,
DiCursor instrIN,
const UnwindContext* restore_ctx,
const AddressDecodingInfo* adi,
const DebugInfo* di )
{
Int off, reg, reg2, len, j;
UInt delta;
Addr printing_bias = ((Addr)ctx->initloc) - ((Addr)di->text_bias);
struct UnwindContextState* ctxs;
DiCursor instr = instrIN;
UChar instr_0 = ML_(cur_step_UChar)(&instr);
UChar hi2 = (instr_0 >> 6) & 3;
UChar lo6 = instr_0 & 0x3F;
if (ctx->state_sp < 0 || ctx->state_sp >= N_RR_STACK)
return 0; /* bogus reg-rule stack pointer */
ctxs = &ctx->state[ctx->state_sp];
if (hi2 == DW_CFA_advance_loc) {
delta = (UInt)lo6;
delta *= ctx->code_a_f;
ctx->loc += delta;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_advance_loc: %d to %08lx\n",
(Int)delta, (Addr)ctx->loc + printing_bias);
return ML_(cur_minus)(instr, instrIN);
}
if (hi2 == DW_CFA_offset) {
/* Set rule for reg 'lo6' to CFAOff(off * data_af) */
off = step_leb128( &instr, 0 );
reg = (Int)lo6;
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAOff;
ctxs->reg[reg].arg = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_offset: r%d at cfa%s%d\n",
(Int)reg,
ctxs->reg[reg].arg < 0 ? "" : "+",
(Int)ctxs->reg[reg].arg );
return ML_(cur_minus)(instr, instrIN);
}
if (hi2 == DW_CFA_restore) {
reg = (Int)lo6;
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
if (restore_ctx == NULL)
return 0; /* fail */
ctxs->reg[reg] = restore_ctx->state[restore_ctx->state_sp].reg[reg];
if (di->ddump_frames)
VG_(printf)(" DW_CFA_restore: r%d\n", (Int)reg);
return ML_(cur_minus)(instr, instrIN);
}
vg_assert(hi2 == DW_CFA_use_secondary);
switch (lo6) {
case DW_CFA_nop:
if (di->ddump_frames)
VG_(printf)(" DW_CFA_nop\n");
break;
case DW_CFA_set_loc:
/* WAS:
ctx->loc = read_Addr(&instr[i]) - ctx->initloc; i+= sizeof(Addr);
Was this ever right? */
/* 2007 Feb 23: No. binutils/dwarf.c treats it as an encoded
address and that appears to be in accordance with the
DWARF3 spec. */
ctx->loc = step_encoded_Addr(adi, &instr);
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_set_loc\n");
break;
case DW_CFA_advance_loc1:
delta = (UInt)ML_(cur_step_UChar)(&instr);
delta *= ctx->code_a_f;
ctx->loc += delta;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_advance_loc1: %d to %08lx\n",
(Int)delta, (Addr)ctx->loc + printing_bias);
break;
case DW_CFA_advance_loc2:
delta = (UInt)ML_(cur_step_UShort)(&instr);
delta *= ctx->code_a_f;
ctx->loc += delta;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_advance_loc2: %d to %08lx\n",
(Int)delta, (Addr)ctx->loc + printing_bias);
break;
case DW_CFA_advance_loc4:
delta = (UInt)ML_(cur_step_UInt)(&instr);
delta *= ctx->code_a_f;
ctx->loc += delta;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_advance_loc4: %d to %08lx\n",
(Int)delta, (Addr)ctx->loc + printing_bias);
break;
case DW_CFA_def_cfa:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->cfa_is_regoff = True;
ctxs->cfa_expr_ix = 0;
ctxs->cfa_reg = reg;
ctxs->cfa_off = off;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_def_cfa: r%d ofs %d\n", (Int)reg, (Int)off);
break;
case DW_CFA_def_cfa_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->cfa_is_regoff = True;
ctxs->cfa_expr_ix = 0;
ctxs->cfa_reg = reg;
ctxs->cfa_off = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_def_cfa_sf\n");
break;
case DW_CFA_register:
reg = step_leb128( &instr, 0 );
reg2 = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
if (reg2 < 0 || reg2 >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_Reg;
ctxs->reg[reg].arg = reg2;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_register: r%d in r%d\n",
(Int)reg, (Int)reg2);
break;
case DW_CFA_offset_extended:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAOff;
ctxs->reg[reg].arg = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_offset_extended\n");
break;
case DW_CFA_offset_extended_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAOff;
ctxs->reg[reg].arg = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_offset_extended_sf: r%d at cfa%s%d\n",
reg,
ctxs->reg[reg].arg < 0 ? "" : "+",
(Int)ctxs->reg[reg].arg);
break;
case DW_CFA_GNU_negative_offset_extended:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAOff;
ctxs->reg[reg].arg = (-off) * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_GNU_negative_offset_extended\n");
break;
case DW_CFA_restore_extended:
reg = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
if (restore_ctx == NULL)
return 0; /* fail */
ctxs->reg[reg] = restore_ctx->state[restore_ctx->state_sp].reg[reg];
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_restore_extended\n");
break;
case DW_CFA_val_offset:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAValOff;
ctxs->reg[reg].arg = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_val_offset\n");
break;
case DW_CFA_val_offset_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_CFAValOff;
ctxs->reg[reg].arg = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_val_offset_sf\n");
break;
case DW_CFA_def_cfa_register:
reg = step_leb128( &instr, 0);
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->cfa_is_regoff = True;
ctxs->cfa_expr_ix = 0;
ctxs->cfa_reg = reg;
/* ->cfa_off unchanged */
if (di->ddump_frames)
VG_(printf)(" DW_CFA_def_cfa_register: r%d\n", (Int)reg );
break;
case DW_CFA_def_cfa_offset:
off = step_leb128( &instr, 0);
ctxs->cfa_is_regoff = True;
ctxs->cfa_expr_ix = 0;
/* ->reg is unchanged */
ctxs->cfa_off = off;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_def_cfa_offset: %d\n", (Int)off);
break;
case DW_CFA_def_cfa_offset_sf:
off = step_leb128( &instr, 1);
ctxs->cfa_is_regoff = True;
ctxs->cfa_expr_ix = 0;
/* ->reg is unchanged */
ctxs->cfa_off = off * ctx->data_a_f;
if (di->ddump_frames)
VG_(printf)(" DW_CFA_def_cfa_offset_sf: %d\n", ctxs->cfa_off);
break;
case DW_CFA_undefined:
reg = step_leb128( &instr, 0);
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_Undef;
ctxs->reg[reg].arg = 0;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_undefined\n");
break;
case DW_CFA_same_value:
reg = step_leb128( &instr, 0);
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
ctxs->reg[reg].tag = RR_Same;
ctxs->reg[reg].arg = 0;
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_same_value\n");
break;
case DW_CFA_GNU_args_size:
/* No idea what is supposed to happen. gdb-6.3 simply
ignores these. */
/*off = */ (void)step_leb128( &instr, 0 );
if (di->ddump_frames)
VG_(printf)(" rci:DW_CFA_GNU_args_size (ignored)\n");
break;
case DW_CFA_expression: {
/* Identical to DW_CFA_val_expression except that the value
computed is an address and so needs one final
dereference. */
DiCursor expr;
reg = step_leb128( &instr, 0 );
len = step_leb128( &instr, 0 );
expr = instr;
instr = ML_(cur_plus)(instr, len);
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
if (di->ddump_frames)
VG_(printf)(" DW_CFA_expression: r%d (",
(Int)reg);
/* Convert the expression into a dag rooted at ctx->exprs index j,
or fail. */
j = dwarfexpr_to_dag ( ctx, expr, len, True/*push CFA at start*/,
di->ddump_frames);
if (di->ddump_frames)
VG_(printf)(")\n");
vg_assert(j >= -1);
if (j >= 0) {
vg_assert(ctx->exprs);
vg_assert( j < VG_(sizeXA)(ctx->exprs) );
}
if (j == -1)
return 0; /* fail */
/* Add an extra dereference */
j = ML_(CfiExpr_Deref)( ctx->exprs, j );
ctxs->reg[reg].tag = RR_ValExpr;
ctxs->reg[reg].arg = j;
break;
}
case DW_CFA_val_expression: {
DiCursor expr;
reg = step_leb128( &instr, 0 );
len = step_leb128( &instr, 0 );
expr = instr;
instr = ML_(cur_plus)(instr, len);
if (reg < 0 || reg >= N_CFI_REGS)
return 0; /* fail */
if (di->ddump_frames)
VG_(printf)(" DW_CFA_val_expression: r%d (",
(Int)reg);
/* Convert the expression into a dag rooted at ctx->exprs index j,
or fail. */
j = dwarfexpr_to_dag ( ctx, expr, len, True/*push CFA at start*/,
di->ddump_frames);
if (di->ddump_frames)
VG_(printf)(")\n");
vg_assert(j >= -1);
if (j >= 0) {
vg_assert(ctx->exprs);
vg_assert( j < VG_(sizeXA)(ctx->exprs) );
}
if (j == -1)
return 0; /* fail */
ctxs->reg[reg].tag = RR_ValExpr;
ctxs->reg[reg].arg = j;
break;
}
case DW_CFA_def_cfa_expression: {
DiCursor expr;
len = step_leb128( &instr, 0 );
expr = instr;
instr = ML_(cur_plus)(instr, len);
if (di->ddump_frames)
VG_(printf)(" DW_CFA_def_cfa_expression (");
/* Convert the expression into a dag rooted at ctx->exprs index j,
or fail. */
j = dwarfexpr_to_dag ( ctx, expr, len, False/*!push CFA at start*/,
di->ddump_frames);
if (di->ddump_frames)
VG_(printf)(")\n");
ctxs->cfa_is_regoff = False;
ctxs->cfa_reg = 0;
ctxs->cfa_off = 0;
ctxs->cfa_expr_ix = j;
break;
}
case DW_CFA_GNU_window_save:
/* Ignored. This appears to be sparc-specific; quite why it
turns up in SuSE-supplied x86 .so's beats me. */
if (di->ddump_frames)
VG_(printf)(" DW_CFA_GNU_window_save\n");
break;
case DW_CFA_remember_state:
if (di->ddump_frames)
VG_(printf)(" DW_CFA_remember_state\n");
/* we just checked this at entry, so: */
vg_assert(ctx->state_sp >= 0 && ctx->state_sp < N_RR_STACK);
ctx->state_sp++;
if (ctx->state_sp == N_RR_STACK) {
/* stack overflow. We're hosed. */
VG_(message)(Vg_DebugMsg, "DWARF2 CFI reader: N_RR_STACK is "
"too low; increase and recompile.");
return 0; /* indicate failure */
} else {
VG_(memcpy)(/*dst*/&ctx->state[ctx->state_sp],
/*src*/&ctx->state[ctx->state_sp - 1],
sizeof(ctx->state[ctx->state_sp]) );
}
break;
case DW_CFA_restore_state:
if (di->ddump_frames)
VG_(printf)(" DW_CFA_restore_state\n");
/* we just checked this at entry, so: */
vg_assert(ctx->state_sp >= 0 && ctx->state_sp < N_RR_STACK);
if (ctx->state_sp == 0) {
/* stack undefflow. Give up. */
return 0; /* indicate failure */
} else {
/* simply fall back to previous entry */
ctx->state_sp--;
}
break;
case DW_CFA_ORCL_arg_loc:
if (di->ddump_frames)
VG_(printf)(" DW_CFA_ORCL_arg_loc\n");
break;
default:
VG_(message)(Vg_DebugMsg, "DWARF2 CFI reader: unhandled CFI "
"instruction 0:%d\n", (Int)lo6);
if (di->ddump_frames)
VG_(printf)(" rci:run_CF_instruction:default\n");
return 0; /* failure */
/*NOTREACHED*/
}
return ML_(cur_minus)(instr, instrIN);
}
/* Show a CFI instruction, and also return its length. Show it as
close as possible (preferably identical) to how GNU binutils
readelf --debug-dump=frames would. */
static Int show_CF_instruction ( DiCursor instrIN,
const AddressDecodingInfo* adi,
Int code_a_f, Int data_a_f )
{
Int off, coff, reg, reg2, len;
UInt delta;
Addr loc;
DiCursor instr = instrIN;
UChar instr_0 = ML_(cur_step_UChar)(&instr);
UChar hi2 = (instr_0 >> 6) & 3;
UChar lo6 = instr_0 & 0x3F;
if (0) {
DiCursor tmpi = instrIN;
UInt i_0 = ML_(cur_step_UChar)(&tmpi);
UInt i_1 = ML_(cur_step_UChar)(&tmpi);
UInt i_2 = ML_(cur_step_UChar)(&tmpi);
UInt i_3 = ML_(cur_step_UChar)(&tmpi);
UInt i_4 = ML_(cur_step_UChar)(&tmpi);
UInt i_5 = ML_(cur_step_UChar)(&tmpi);
UInt i_6 = ML_(cur_step_UChar)(&tmpi);
UInt i_7 = ML_(cur_step_UChar)(&tmpi);
VG_(printf)("raw:%x/%x:%x:%x:%x:%x:%x:%x:%x:%x\n",
hi2, lo6, i_0, i_1, i_2, i_3, i_4, i_5, i_6, i_7);
}
if (hi2 == DW_CFA_advance_loc) {
VG_(printf)(" sci:DW_CFA_advance_loc(%d)\n", (Int)lo6);
return ML_(cur_minus)(instr, instrIN);
}
if (hi2 == DW_CFA_offset) {
off = step_leb128( &instr, 0 );
coff = off * data_a_f;
VG_(printf)(" DW_CFA_offset: r%d at cfa%s%d\n",
(Int)lo6, coff < 0 ? "" : "+", (Int)coff );
return ML_(cur_minus)(instr, instrIN);
}
if (hi2 == DW_CFA_restore) {
VG_(printf)(" sci:DW_CFA_restore(r%d)\n", (Int)lo6);
return ML_(cur_minus)(instr, instrIN);
}
vg_assert(hi2 == DW_CFA_use_secondary);
switch (lo6) {
case DW_CFA_nop:
VG_(printf)(" DW_CFA_nop\n");
break;
case DW_CFA_set_loc:
/* WAS: loc = read_Addr(&instr[i]); i+= sizeof(Addr);
(now known to be incorrect -- the address is encoded) */
loc = step_encoded_Addr(adi, &instr);
VG_(printf)(" sci:DW_CFA_set_loc(%#lx)\n", loc);
break;
case DW_CFA_advance_loc1:
delta = (UInt)ML_(cur_step_UChar)(&instr);
VG_(printf)(" sci:DW_CFA_advance_loc1(%u)\n", delta);
break;
case DW_CFA_advance_loc2:
delta = (UInt)ML_(cur_step_UShort)(&instr);
VG_(printf)(" sci:DW_CFA_advance_loc2(%u)\n", delta);
break;
case DW_CFA_advance_loc4:
delta = (UInt)ML_(cur_step_UInt)(&instr);
VG_(printf)(" DW_CFA_advance_loc4(%u)\n", delta);
break;
case DW_CFA_def_cfa:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
VG_(printf)(" DW_CFA_def_cfa: r%d ofs %d\n", reg, off);
break;
case DW_CFA_def_cfa_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
VG_(printf)(" DW_CFA_def_cfa_sf: r%d ofs %d\n",
reg, off * data_a_f);
break;
case DW_CFA_register:
reg = step_leb128( &instr, 0);
reg2 = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_register(r%d, r%d)\n", reg, reg2);
break;
case DW_CFA_def_cfa_register:
reg = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_def_cfa_register(r%d)\n", reg);
break;
case DW_CFA_def_cfa_offset:
off = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_def_cfa_offset(%d)\n", off);
break;
case DW_CFA_def_cfa_offset_sf:
off = step_leb128( &instr, 1);
VG_(printf)(" sci:DW_CFA_def_cfa_offset_sf(%d)\n", off);
break;
case DW_CFA_restore_extended:
reg = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_restore_extended(r%d)\n", reg);
break;
case DW_CFA_undefined:
reg = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_undefined(r%d)\n", reg);
break;
case DW_CFA_same_value:
reg = step_leb128( &instr, 0);
VG_(printf)(" sci:DW_CFA_same_value(r%d)\n", reg);
break;
case DW_CFA_remember_state:
VG_(printf)(" sci:DW_CFA_remember_state\n");
break;
case DW_CFA_restore_state:
VG_(printf)(" sci:DW_CFA_restore_state\n");
break;
case DW_CFA_GNU_args_size:
off = step_leb128( &instr, 0 );
VG_(printf)(" sci:DW_CFA_GNU_args_size(%d)\n", off );
break;
case DW_CFA_def_cfa_expression:
len = step_leb128( &instr, 0 );
instr = ML_(cur_plus)(instr, len);
VG_(printf)(" sci:DW_CFA_def_cfa_expression(length %d)\n", len);
break;
case DW_CFA_expression:
reg = step_leb128( &instr, 0 );
len = step_leb128( &instr, 0 );
instr = ML_(cur_plus)(instr, len);
VG_(printf)(" sci:DW_CFA_expression(r%d, length %d)\n", reg, len);
break;
case DW_CFA_val_expression:
reg = step_leb128( &instr, 0 );
len = step_leb128( &instr, 0 );
instr = ML_(cur_plus)(instr, len);
VG_(printf)(" sci:DW_CFA_val_expression(r%d, length %d)\n", reg, len);
break;
case DW_CFA_offset_extended:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
VG_(printf)(" sci:DW_CFA_offset_extended(r%d, "
"off %d x data_af)\n", reg, off);
break;
case DW_CFA_offset_extended_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
coff = (Int)(off * data_a_f);
VG_(printf)(" DW_CFA_offset_extended_sf: r%d at cfa%s%d\n",
reg, coff < 0 ? "" : "+", coff);
break;
case DW_CFA_GNU_negative_offset_extended:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
VG_(printf)(" sci:DW_CFA_GNU_negative_offset_extended"
"(r%d, off %d x data_af)\n", reg, -off);
break;
case DW_CFA_val_offset:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 0 );
VG_(printf)(" sci:DW_CFA_val_offset(r%d, off %d x data_af)\n",
reg, off);
break;
case DW_CFA_val_offset_sf:
reg = step_leb128( &instr, 0 );
off = step_leb128( &instr, 1 );
VG_(printf)(" sci:DW_CFA_val_offset_sf(r%d, off %d x data_af)\n",
reg, off);
break;
case DW_CFA_GNU_window_save:
VG_(printf)(" sci:DW_CFA_GNU_window_save\n");
break;
case DW_CFA_ORCL_arg_loc:
/* :TODO: Print all arguments when implemented in libdwarf. */
VG_(printf)(" sci:DW_CFA_ORCL_arg_loc\n");
break;
default:
VG_(printf)(" sci:0:%d\n", (Int)lo6);
break;
}
return ML_(cur_minus)(instr, instrIN);
}
/* Show the instructions in instrs[0 .. ilen-1]. */
static void show_CF_instructions ( DiCursor instrs, Int ilen,
const AddressDecodingInfo* adi,
Int code_a_f, Int data_a_f )
{
Int i = 0;
while (True) {
if (i >= ilen) break;
i += show_CF_instruction( ML_(cur_plus)(instrs, i),
adi, code_a_f, data_a_f );
}
}
/* Run the CF instructions in instrs[0 .. ilen-1], until the end is
reached, or until there is a failure. Return True iff success.
*/
static
Bool run_CF_instructions ( DebugInfo* di,
Bool record,
UnwindContext* ctx, DiCursor instrs, Int ilen,
UWord fde_arange,
const UnwindContext* restore_ctx,
const AddressDecodingInfo* adi )
{
Addr base;
UInt len;
DiCfSI_m cfsi_m;
Bool summ_ok;
Int j, i = 0;
Addr loc_prev;
if (0) ppUnwindContext(ctx);
if (0) ppUnwindContext_summary(ctx);
while (True) {
loc_prev = ctx->loc;
if (i >= ilen) break;
if (0) (void)show_CF_instruction( ML_(cur_plus)(instrs,i), adi,
ctx->code_a_f, ctx->data_a_f );
j = run_CF_instruction( ctx, ML_(cur_plus)(instrs,i),
restore_ctx, adi, di );
if (j == 0)
return False; /* execution failed */
i += j;
if (0) ppUnwindContext(ctx);
if (record && loc_prev != ctx->loc) {
summ_ok = summarise_context ( &base, &len, &cfsi_m,
loc_prev, ctx, di );
if (summ_ok) {
ML_(addDiCfSI)(di, base, len, &cfsi_m);
if (di->trace_cfi)
ML_(ppDiCfSI)(di->cfsi_exprs, base, len, &cfsi_m);
}
}
}
if (ctx->loc < fde_arange) {
loc_prev = ctx->loc;
ctx->loc = fde_arange;
if (record) {
summ_ok = summarise_context ( &base, &len, &cfsi_m,
loc_prev, ctx, di );
if (summ_ok) {
ML_(addDiCfSI)(di, base, len, &cfsi_m);
if (di->trace_cfi)
ML_(ppDiCfSI)(di->cfsi_exprs, base, len, &cfsi_m);
}
}
}
return True;
}
/* ------------ Main entry point for CFI reading ------------ */
typedef
struct {
/* This gives the CIE an identity to which FDEs will refer. */
ULong offset;
/* Code, data factors. */
Int code_a_f;
Int data_a_f;
/* Return-address pseudo-register. */
Int ra_reg;
UChar address_encoding;
/* Where are the instrs? */
DiCursor instrs;
Int ilen;
/* God knows .. don't ask */
Bool saw_z_augmentation;
}
CIE;
static void init_CIE ( CIE* cie )
{
cie->offset = 0;
cie->code_a_f = 0;
cie->data_a_f = 0;
cie->ra_reg = 0;
cie->address_encoding = 0;
cie->instrs = DiCursor_INVALID;
cie->ilen = 0;
cie->saw_z_augmentation = False;
}
static CIE *the_CIEs = NULL;
static SizeT N_CIEs = 0;
/* Read, summarise and store CFA unwind info from .eh_frame and
.debug_frame sections. is_ehframe tells us which kind we are
dealing with -- they are slightly different. */
void ML_(read_callframe_info_dwarf3)
( /*OUT*/struct _DebugInfo* di,
DiSlice escn_frame, Addr frame_avma, Bool is_ehframe )
{
const HChar* how = NULL;
Int n_CIEs = 0;
DiCursor frame_image = ML_(cur_from_sli)(escn_frame); /* fixed */
DiOffT frame_size = escn_frame.szB;
DiCursor data = frame_image;
UWord cfsi_used_orig;
/* If we're dealing with a .debug_frame, assume zero frame_avma. */
if (!is_ehframe)
vg_assert(frame_avma == 0);
# if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
|| defined(VGP_ppc64le_linux)
/* These targets don't use CFI-based stack unwinding. */
return;
# endif
/* If we read more than one .debug_frame or .eh_frame for this
DebugInfo*, the second and subsequent reads should only add FDEs
for address ranges not already covered by the FDEs already
present. To be able to quickly check which address ranges are
already present, any existing records (DiCFSIs) must be sorted,
so we can binary-search them in the code below. We also record
di->cfsi_used so that we know where the boundary is between
existing and new records. */
if (di->cfsi_used > 0) {
ML_(canonicaliseCFI) ( di );
}
cfsi_used_orig = di->cfsi_used;
if (di->trace_cfi) {
VG_(printf)("\n-----------------------------------------------\n");
VG_(printf)("CFI info: szB %llu, _avma %#lx\n",
escn_frame.szB, frame_avma );
VG_(printf)("CFI info: name %s\n", di->fsm.filename );
}
/* Loop over CIEs/FDEs */
/* Conceptually, the frame info is a sequence of FDEs, one for each
function. Inside an FDE is a miniature program for a special
state machine, which, when run, produces the stack-unwinding
info for that function.
Because the FDEs typically have much in common, and because the
DWARF designers appear to have been fanatical about space
saving, the common parts are factored out into so-called CIEs.
That means that what we traverse is a sequence of structs, each
of which is either a FDE (usually) or a CIE (occasionally).
Each FDE has a field indicating which CIE is the one pertaining
to it.
The following loop traverses the sequence. FDEs are dealt with
immediately; once we harvest the useful info in an FDE, it is
then forgotten about. By contrast, CIEs are validated and
dumped into an array, because later FDEs may refer to any
previously-seen CIE.
*/
while (True) {
DiCursor ciefde_start;
ULong ciefde_len;
ULong cie_pointer;
Bool dw64;
/* Are we done? */
if (ML_(cur_cmpEQ)(data, ML_(cur_plus)(frame_image, frame_size)))
return;
/* Overshot the end? Means something is wrong */
if (ML_(cur_cmpGT)(data, ML_(cur_plus)(frame_image, frame_size))) {
how = "overran the end of .eh_frame";
goto bad;
}
/* Ok, we must be looking at the start of a new CIE or FDE.
Figure out which it is. */
ciefde_start = data;
if (di->trace_cfi)
VG_(printf)("\ncie/fde.start = (frame_image + 0x%llx)\n",
(ULong)ML_(cur_minus)(ciefde_start, frame_image));
ciefde_len = (ULong)ML_(cur_step_UInt)(&data);
if (di->trace_cfi)
VG_(printf)("cie/fde.length = %llu\n", ciefde_len);
/* Apparently, if the .length field is zero, we are at the end
of the sequence. This is stated in the Generic Elf
Specification (see comments far above here) and is one of the
places where .eh_frame and .debug_frame data differ. */
if (ciefde_len == 0) {
if (di->ddump_frames)
VG_(printf)("%08llx ZERO terminator\n\n",
(ULong)ML_(cur_minus)(ciefde_start, frame_image));
return;
}
/* If the .length field is 0xFFFFFFFF then we're dealing with
64-bit DWARF, and the real length is stored as a 64-bit
number immediately following it. */
dw64 = False;
if (ciefde_len == 0xFFFFFFFFUL) {
dw64 = True;
ciefde_len = ML_(cur_step_ULong)(&data);
}
/* Now get the CIE ID, whose size depends on the DWARF 32 vs
64-ness. */
if (dw64) {
/* see XXX below */
cie_pointer = ML_(cur_step_ULong)(&data);
} else {
/* see XXX below */
cie_pointer = (ULong)ML_(cur_step_UInt)(&data);
}
if (di->trace_cfi)
VG_(printf)("cie.pointer = %llu\n", cie_pointer);
/* If cie_pointer is zero for .eh_frame or all ones for .debug_frame,
we've got a CIE; else it's an FDE. */
if (cie_pointer == (is_ehframe ? 0ULL
: dw64 ? 0xFFFFFFFFFFFFFFFFULL : 0xFFFFFFFFULL)) {
Int this_CIE;
UChar cie_version;
DiCursor cie_augmentation;
/* --------- CIE --------- */
if (di->trace_cfi)
VG_(printf)("------ new CIE #%d ------\n", n_CIEs);
/* Allocate a new CIE record. */
vg_assert(n_CIEs >= 0);
if (n_CIEs == N_CIEs) {
N_CIEs += 1000;
the_CIEs = ML_(dinfo_realloc)("di.rcid3.2", the_CIEs,
N_CIEs * sizeof the_CIEs[0]);
}
this_CIE = n_CIEs;
n_CIEs++;
init_CIE( &the_CIEs[this_CIE] );
/* Record its offset. This is how we will find it again
later when looking at an FDE. */
the_CIEs[this_CIE].offset
= (ULong)ML_(cur_minus)(ciefde_start, frame_image);
if (di->ddump_frames)
VG_(printf)("%08lx %08lx %08lx CIE\n",
(Addr)ML_(cur_minus)(ciefde_start, frame_image),
(Addr)ciefde_len,
(Addr)(UWord)cie_pointer );
cie_version = ML_(cur_step_UChar)(&data);
if (di->trace_cfi)
VG_(printf)("cie.version = %d\n", (Int)cie_version);
if (di->ddump_frames)
VG_(printf)(" Version: %d\n", (Int)cie_version);
if (cie_version != 1 && cie_version != 3 && cie_version != 4) {
how = "unexpected CIE version (not 1 nor 3 nor 4)";
goto bad;
}
cie_augmentation = data;
data = ML_(cur_plus)(data, 1 + ML_(cur_strlen)(cie_augmentation));
if (di->trace_cfi || di->ddump_frames) {
HChar* str = ML_(cur_read_strdup)(cie_augmentation, "di.rcid3.1");
if (di->trace_cfi)
VG_(printf)("cie.augment = \"%s\"\n", str);
if (di->ddump_frames)
VG_(printf)(" Augmentation: \"%s\"\n", str);
ML_(dinfo_free)(str);
}
if (ML_(cur_read_UChar)(cie_augmentation) == 'e'
&& ML_(cur_read_UChar)
(ML_(cur_plus)(cie_augmentation, 1)) == 'h') {
data = ML_(cur_plus)(data, sizeof(Addr));
cie_augmentation = ML_(cur_plus)(cie_augmentation, 2);
}
if (cie_version >= 4) {
if (ML_(cur_step_UChar)(&data) != sizeof(Addr)) {
how = "unexpected address size";
goto bad;
}
if (ML_(cur_step_UChar)(&data) != 0) {
how = "unexpected non-zero segment size";
goto bad;
}
}
the_CIEs[this_CIE].code_a_f = step_leb128( &data, 0);
if (di->trace_cfi)
VG_(printf)("cie.code_af = %d\n",
the_CIEs[this_CIE].code_a_f);
if (di->ddump_frames)
VG_(printf)(" Code alignment factor: %d\n",
(Int)the_CIEs[this_CIE].code_a_f);
the_CIEs[this_CIE].data_a_f = step_leb128( &data, 1);
if (di->trace_cfi)
VG_(printf)("cie.data_af = %d\n",
the_CIEs[this_CIE].data_a_f);
if (di->ddump_frames)
VG_(printf)(" Data alignment factor: %d\n",
(Int)the_CIEs[this_CIE].data_a_f);
if (cie_version == 1) {
the_CIEs[this_CIE].ra_reg = (Int)ML_(cur_step_UChar)(&data);
} else {
the_CIEs[this_CIE].ra_reg = step_leb128( &data, 0);
}
if (di->trace_cfi)
VG_(printf)("cie.ra_reg = %d\n",
the_CIEs[this_CIE].ra_reg);
if (di->ddump_frames)
VG_(printf)(" Return address column: %d\n",
(Int)the_CIEs[this_CIE].ra_reg);
if (the_CIEs[this_CIE].ra_reg < 0
|| the_CIEs[this_CIE].ra_reg >= N_CFI_REGS) {
how = "cie.ra_reg has implausible value";
goto bad;
}
the_CIEs[this_CIE].saw_z_augmentation
= ML_(cur_read_UChar)(cie_augmentation) == 'z';
if (the_CIEs[this_CIE].saw_z_augmentation) {
UInt length = step_leb128( &data, 0);
the_CIEs[this_CIE].instrs = ML_(cur_plus)(data, length);
cie_augmentation = ML_(cur_plus)(cie_augmentation, 1);
if (di->ddump_frames) {
UInt i;
VG_(printf)(" Augmentation data: ");
for (i = 0; i < length; i++)
VG_(printf)(" %02x", (UInt)ML_(cur_read_UChar)
(ML_(cur_plus)(data, i)));
VG_(printf)("\n");
}
} else {
the_CIEs[this_CIE].instrs = DiCursor_INVALID;
}
the_CIEs[this_CIE].address_encoding = default_Addr_encoding();
while (ML_(cur_read_UChar)(cie_augmentation)) {
switch (ML_(cur_read_UChar)(cie_augmentation)) {
case 'L':
data = ML_(cur_plus)(data, 1);
cie_augmentation = ML_(cur_plus)(cie_augmentation, 1);
break;
case 'R':
the_CIEs[this_CIE].address_encoding
= ML_(cur_step_UChar)(&data);
cie_augmentation = ML_(cur_plus)(cie_augmentation, 1);
break;
case 'P':
data = ML_(cur_plus)(data, size_of_encoded_Addr(
ML_(cur_read_UChar)(data) ));
data = ML_(cur_plus)(data, 1);
cie_augmentation = ML_(cur_plus)(cie_augmentation, 1);
break;
case 'S':
cie_augmentation = ML_(cur_plus)(cie_augmentation, 1);
break;
default:
if (!ML_(cur_is_valid)(the_CIEs[this_CIE].instrs)) {
how = "unhandled cie.augmentation";
goto bad;
}
data = the_CIEs[this_CIE].instrs;
goto done_augmentation;
}
}
done_augmentation:
if (di->trace_cfi)
VG_(printf)("cie.encoding = 0x%x\n",
the_CIEs[this_CIE].address_encoding);
the_CIEs[this_CIE].instrs = data;
the_CIEs[this_CIE].ilen = ML_(cur_minus)(ciefde_start, data)
+ (Long)ciefde_len + (Long)sizeof(UInt);
if (di->trace_cfi) {
//VG_(printf)("cie.instrs = %p\n", the_CIEs[this_CIE].instrs);
VG_(printf)("cie.ilen = %d\n", the_CIEs[this_CIE].ilen);
}
if (the_CIEs[this_CIE].ilen < 0
|| the_CIEs[this_CIE].ilen > frame_size) {
how = "implausible # cie initial insns";
goto bad;
}
data = ML_(cur_plus)(data, the_CIEs[this_CIE].ilen);
/* Show the CIE's instructions (the preamble for each FDE
that uses this CIE). */
if (di->ddump_frames)
VG_(printf)("\n");
if (di->trace_cfi || di->ddump_frames) {
AddressDecodingInfo adi;
adi.encoding = the_CIEs[this_CIE].address_encoding;
adi.ehframe_image = frame_image;
adi.ehframe_avma = frame_avma;
adi.text_bias = di->text_debug_bias;
adi.got_avma = di->got_avma;
show_CF_instructions( the_CIEs[this_CIE].instrs,
the_CIEs[this_CIE].ilen, &adi,
the_CIEs[this_CIE].code_a_f,
the_CIEs[this_CIE].data_a_f );
}
if (di->ddump_frames)
VG_(printf)("\n");
} else {
AddressDecodingInfo adi;
UnwindContext ctx, restore_ctx;
Int cie;
ULong look_for;
Bool ok;
Addr fde_initloc;
UWord fde_arange;
DiCursor fde_instrs;
Int fde_ilen;
/* --------- FDE --------- */
/* Find the relevant CIE. The CIE we want is located
cie_pointer bytes back from here. */
/* re sizeof(UInt) / sizeof(ULong), matches XXX above. */
if (is_ehframe)
look_for = ML_(cur_minus)(data, frame_image)
- (dw64 ? sizeof(ULong) : sizeof(UInt))
- cie_pointer;
else
look_for = cie_pointer;
for (cie = 0; cie < n_CIEs; cie++) {
if (0) VG_(printf)("look for %llu %llu\n",
look_for, the_CIEs[cie].offset );
if (the_CIEs[cie].offset == look_for)
break;
}
vg_assert(cie >= 0 && cie <= n_CIEs);
if (cie == n_CIEs) {
how = "FDE refers to not-findable CIE";
goto bad;
}
adi.encoding = the_CIEs[cie].address_encoding;
adi.ehframe_image = frame_image;
adi.ehframe_avma = frame_avma;
adi.text_bias = di->text_debug_bias;
adi.got_avma = di->got_avma;
fde_initloc = step_encoded_Addr(&adi, &data);
if (di->trace_cfi)
VG_(printf)("fde.initloc = %#lx\n", fde_initloc);
adi.encoding = the_CIEs[cie].address_encoding & 0xf;
adi.ehframe_image = frame_image;
adi.ehframe_avma = frame_avma;
adi.text_bias = di->text_debug_bias;
adi.got_avma = di->got_avma;
/* WAS (incorrectly):
fde_arange = read_encoded_Addr(&nbytes, &adi, data);
data += nbytes;
The following corresponds to what binutils/dwarf.c does:
*/
{ UInt ptr_size = size_of_encoded_Addr( adi.encoding );
switch (ptr_size) {
case 8: case 4: case 2: case 1:
fde_arange
= (UWord)step_le_u_encoded_literal(&data, ptr_size);
break;
default:
how = "unknown arange field encoding in FDE";
goto bad;
}
}
if (di->trace_cfi)
VG_(printf)("fde.arangec = %#lx\n", fde_arange);
if (di->ddump_frames)
VG_(printf)("%08lx %08lx %08lx FDE cie=%08lx pc=%08lx..%08lx\n",
(Addr)ML_(cur_minus)(ciefde_start, frame_image),
(Addr)ciefde_len,
(Addr)(UWord)cie_pointer,
(Addr)look_for,
((Addr)fde_initloc) - di->text_debug_bias,
((Addr)fde_initloc) - di->text_debug_bias + fde_arange);
if (the_CIEs[cie].saw_z_augmentation) {
UInt length = step_leb128( &data, 0);
if (di->ddump_frames && (length > 0)) {
UInt i;
VG_(printf)(" Augmentation data: ");
for (i = 0; i < length; i++)
VG_(printf)(" %02x", (UInt)ML_(cur_read_UChar)
(ML_(cur_plus)(data, i)));
VG_(printf)("\n\n");
}
data = ML_(cur_plus)(data, length);
}
fde_instrs = data;
fde_ilen = ML_(cur_minus)(ciefde_start, data)
+ (Long)ciefde_len + (Long)sizeof(UInt);
if (di->trace_cfi) {
//VG_(printf)("fde.instrs = %p\n", fde_instrs);
VG_(printf)("fde.ilen = %d\n", (Int)fde_ilen);
}
if (fde_ilen < 0 || fde_ilen > frame_size) {
how = "implausible # fde insns";
goto bad;
}
data = ML_(cur_plus)(data, fde_ilen);
/* If this object's DebugInfo* had some DiCFSIs from a
previous .eh_frame or .debug_frame read, we must check
that we're not adding a duplicate. */
if (cfsi_used_orig > 0) {
Addr a_mid_lo, a_mid_hi;
Word mid, size,
lo = 0,
hi = cfsi_used_orig-1;
while (True) {
/* current unsearched space is from lo to hi, inclusive. */
if (lo > hi) break; /* not found */
mid = (lo + hi) / 2;
a_mid_lo = di->cfsi_rd[mid].base;
size = di->cfsi_rd[mid].len;
a_mid_hi = a_mid_lo + size - 1;
vg_assert(a_mid_hi >= a_mid_lo);
if (fde_initloc + fde_arange <= a_mid_lo) {
hi = mid-1; continue;
}
if (fde_initloc > a_mid_hi) { lo = mid+1; continue; }
break;
}
/* The range this .debug_frame FDE covers has been already
covered in .eh_frame section. Don't add it from .debug_frame
section again. */
if (lo <= hi)
continue;
}
adi.encoding = the_CIEs[cie].address_encoding;
adi.ehframe_image = frame_image;
adi.ehframe_avma = frame_avma;
adi.text_bias = di->text_debug_bias;
adi.got_avma = di->got_avma;
if (di->trace_cfi)
show_CF_instructions( fde_instrs, fde_ilen, &adi,
the_CIEs[cie].code_a_f,
the_CIEs[cie].data_a_f );
initUnwindContext(&ctx);
ctx.code_a_f = the_CIEs[cie].code_a_f;
ctx.data_a_f = the_CIEs[cie].data_a_f;
ctx.initloc = fde_initloc;
ctx.ra_reg = the_CIEs[cie].ra_reg;
ctx.exprs = VG_(newXA)( ML_(dinfo_zalloc), "di.rcid.1",
ML_(dinfo_free),
sizeof(CfiExpr) );
/* Run the CIE's instructions. Ugly hack: if
--debug-dump=frames is in effect, suppress output for
these instructions since they will already have been shown
at the time the CIE was first encountered. Note, not
thread safe - if this reader is ever made threaded, should
fix properly. */
{ Bool hack = di->ddump_frames;
di->ddump_frames = False;
initUnwindContext(&restore_ctx);
ok = run_CF_instructions(
di, False, &ctx, the_CIEs[cie].instrs,
the_CIEs[cie].ilen, 0, NULL, &adi
);
di->ddump_frames = hack;
}
/* And now run the instructions for the FDE, starting from
the state created by running the CIE preamble
instructions. */
if (ok) {
restore_ctx = ctx;
ok = run_CF_instructions(
di, True, &ctx, fde_instrs, fde_ilen, fde_arange,
&restore_ctx, &adi
);
if (di->ddump_frames)
VG_(printf)("\n");
}
VG_(deleteXA)( ctx.exprs );
}
}
return;
bad:
if (!VG_(clo_xml) && VG_(clo_verbosity) > 1)
VG_(message)(Vg_UserMsg,
"Warning: %s in DWARF2 CFI reading\n", how);
return;
}
#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
|
the_stack_data/148560.c
|
struct A
{
int b:3;
} d, e;
int c;
void f ()
{
char g = d.b * e.b;
c = g;
}
|
the_stack_data/67797.c
|
/*
John Carroll
COMP-1200: Assignment 03
Date Completed: February 7th, 2012
I worked on my assignment alone, using only course material.
Program: BMI Calculation
-----------------------------------------------------------------
Find the BMI for a given height(in inches) and weight(in pounds.)
Determine the BMI catagory.
Compute and display the target weight for a given BMI.
Compute the Idea Body Weight (IBW) for given height and gender.
------------------------------------------------------------------
*/
#include <stdio.h>
int main()
{ //VARIABLES
double weight, height, convertedHeight, convertedWeight, bmi, targetBmi, targetConvertedWeight, targetWeight, ibwWomen, ibwMen;
char gender;
/*-----------------------------------------------------------*/
//INPUT
//Prompt for height
printf("Enter the height in inches: "); // the prompt
scanf("%lf",&height); // reads height
//Prompt for weight
printf("Enter the weight in pounds: "); // the prompt
scanf("%lf",&weight); // reads weight
/*-----------------------------------------------------------*/
//COMPUTATION
//Compute the conversions of the inputs
convertedHeight = height*0.0254;
convertedWeight = weight/2.2046;
//Calculate the BMI, using the formula
bmi = (convertedWeight)/(convertedHeight*convertedHeight);
printf("\n"); // insert blank line
/*-----------------------------------------------------------*/
//OUTPUT
//Display BMI
printf("The BMI is: %-6.2lf\n", bmi);
//Determine the BMI's catagory.
if ( bmi < 25 )
printf("BMI Classification: Normal\n");
//Print if the BMI is in the normal category
else if ( bmi >= 25 && bmi < 30 )
printf("BMI Classification: Overweight\n");
//Print if the BMI is in the overweight category
else if ( bmi >= 30 )
printf("BMI Classification: Obese\n");
//Print if the BMI is in the obese category
printf("\n"); // insert blank line
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
//INPUT
//Prompt for targetted BMI
printf("Enter the target BMI: "); // the prompt
scanf("%lf",&targetBmi); // reads BMI
/*-----------------------------------------------------------*/
//COMPUTATION
//Calculate and then compute the conversions
targetConvertedWeight = targetBmi*(convertedHeight*convertedHeight);
targetWeight = targetConvertedWeight*2.2046;
/*-----------------------------------------------------------*/
//OUTPUT
//Display target weight
printf("The target weight is: %-6.2lf pounds.\n", targetWeight);
printf("\n"); // insert blank line
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
//INPUT
//Prompt for gender
printf("Is the person a female or male? Enter F or M: "); // the prompt
scanf(" %c", &gender); // reads gender
/*-----------------------------------------------------------*/
//COMPUTATION
//Compute the Idea Body Weight (IBW) for given height and gender.
ibwWomen = ((45.5 + 2.3 * (height - 60))*(2.2046));
ibwMen = ((50.0 + 2.3 * (height - 60))*(2.2046));
/*-----------------------------------------------------------*/
//OUTPUT
//if Female
if ((gender == 'F') || (gender == 'f')) printf("The ideal weight is %-6.2lf pounds.\n", ibwWomen);
//if Male
else if ((gender == 'M') || (gender == 'm')) printf("The ideal weight is %-6.2lf pounds.\n", ibwMen);
return 0;
}
|
the_stack_data/86159.c
|
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <complex.h>
#ifdef complex
#undef complex
#endif
#ifdef I
#undef I
#endif
#if defined(_WIN64)
typedef long long BLASLONG;
typedef unsigned long long BLASULONG;
#else
typedef long BLASLONG;
typedef unsigned long BLASULONG;
#endif
#ifdef LAPACK_ILP64
typedef BLASLONG blasint;
#if defined(_WIN64)
#define blasabs(x) llabs(x)
#else
#define blasabs(x) labs(x)
#endif
#else
typedef int blasint;
#define blasabs(x) abs(x)
#endif
typedef blasint integer;
typedef unsigned int uinteger;
typedef char *address;
typedef short int shortint;
typedef float real;
typedef double doublereal;
typedef struct { real r, i; } complex;
typedef struct { doublereal r, i; } doublecomplex;
#ifdef _MSC_VER
static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
#else
static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
#endif
#define pCf(z) (*_pCf(z))
#define pCd(z) (*_pCd(z))
typedef int logical;
typedef short int shortlogical;
typedef char logical1;
typedef char integer1;
#define TRUE_ (1)
#define FALSE_ (0)
/* Extern is for use with -E */
#ifndef Extern
#define Extern extern
#endif
/* I/O stuff */
typedef int flag;
typedef int ftnlen;
typedef int ftnint;
/*external read, write*/
typedef struct
{ flag cierr;
ftnint ciunit;
flag ciend;
char *cifmt;
ftnint cirec;
} cilist;
/*internal read, write*/
typedef struct
{ flag icierr;
char *iciunit;
flag iciend;
char *icifmt;
ftnint icirlen;
ftnint icirnum;
} icilist;
/*open*/
typedef struct
{ flag oerr;
ftnint ounit;
char *ofnm;
ftnlen ofnmlen;
char *osta;
char *oacc;
char *ofm;
ftnint orl;
char *oblnk;
} olist;
/*close*/
typedef struct
{ flag cerr;
ftnint cunit;
char *csta;
} cllist;
/*rewind, backspace, endfile*/
typedef struct
{ flag aerr;
ftnint aunit;
} alist;
/* inquire */
typedef struct
{ flag inerr;
ftnint inunit;
char *infile;
ftnlen infilen;
ftnint *inex; /*parameters in standard's order*/
ftnint *inopen;
ftnint *innum;
ftnint *innamed;
char *inname;
ftnlen innamlen;
char *inacc;
ftnlen inacclen;
char *inseq;
ftnlen inseqlen;
char *indir;
ftnlen indirlen;
char *infmt;
ftnlen infmtlen;
char *inform;
ftnint informlen;
char *inunf;
ftnlen inunflen;
ftnint *inrecl;
ftnint *innrec;
char *inblank;
ftnlen inblanklen;
} inlist;
#define VOID void
union Multitype { /* for multiple entry points */
integer1 g;
shortint h;
integer i;
/* longint j; */
real r;
doublereal d;
complex c;
doublecomplex z;
};
typedef union Multitype Multitype;
struct Vardesc { /* for Namelist */
char *name;
char *addr;
ftnlen *dims;
int type;
};
typedef struct Vardesc Vardesc;
struct Namelist {
char *name;
Vardesc **vars;
int nvars;
};
typedef struct Namelist Namelist;
#define abs(x) ((x) >= 0 ? (x) : -(x))
#define dabs(x) (fabs(x))
#define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
#define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
#define dmin(a,b) (f2cmin(a,b))
#define dmax(a,b) (f2cmax(a,b))
#define bit_test(a,b) ((a) >> (b) & 1)
#define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
#define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
#define abort_() { sig_die("Fortran abort routine called", 1); }
#define c_abs(z) (cabsf(Cf(z)))
#define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
#ifdef _MSC_VER
#define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
#define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
#else
#define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
#define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
#endif
#define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
#define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
#define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
//#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
#define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
#define d_abs(x) (fabs(*(x)))
#define d_acos(x) (acos(*(x)))
#define d_asin(x) (asin(*(x)))
#define d_atan(x) (atan(*(x)))
#define d_atn2(x, y) (atan2(*(x),*(y)))
#define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
#define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
#define d_cos(x) (cos(*(x)))
#define d_cosh(x) (cosh(*(x)))
#define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
#define d_exp(x) (exp(*(x)))
#define d_imag(z) (cimag(Cd(z)))
#define r_imag(z) (cimagf(Cf(z)))
#define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
#define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
#define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
#define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
#define d_log(x) (log(*(x)))
#define d_mod(x, y) (fmod(*(x), *(y)))
#define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
#define d_nint(x) u_nint(*(x))
#define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
#define d_sign(a,b) u_sign(*(a),*(b))
#define r_sign(a,b) u_sign(*(a),*(b))
#define d_sin(x) (sin(*(x)))
#define d_sinh(x) (sinh(*(x)))
#define d_sqrt(x) (sqrt(*(x)))
#define d_tan(x) (tan(*(x)))
#define d_tanh(x) (tanh(*(x)))
#define i_abs(x) abs(*(x))
#define i_dnnt(x) ((integer)u_nint(*(x)))
#define i_len(s, n) (n)
#define i_nint(x) ((integer)u_nint(*(x)))
#define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
#define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
#define pow_si(B,E) spow_ui(*(B),*(E))
#define pow_ri(B,E) spow_ui(*(B),*(E))
#define pow_di(B,E) dpow_ui(*(B),*(E))
#define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
#define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
#define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
#define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
#define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
#define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
#define sig_die(s, kill) { exit(1); }
#define s_stop(s, n) {exit(0);}
static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
#define z_abs(z) (cabs(Cd(z)))
#define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
#define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
#define myexit_() break;
#define mycycle() continue;
#define myceiling(w) {ceil(w)}
#define myhuge(w) {HUGE_VAL}
//#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
#define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
/* procedure parameter types for -A and -C++ */
#define F2C_proc_par_types 1
#ifdef __cplusplus
typedef logical (*L_fp)(...);
#else
typedef logical (*L_fp)();
#endif
static float spow_ui(float x, integer n) {
float pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
static double dpow_ui(double x, integer n) {
double pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#ifdef _MSC_VER
static _Fcomplex cpow_ui(complex x, integer n) {
complex pow={1.0,0.0}; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
for(u = n; ; ) {
if(u & 01) pow.r *= x.r, pow.i *= x.i;
if(u >>= 1) x.r *= x.r, x.i *= x.i;
else break;
}
}
_Fcomplex p={pow.r, pow.i};
return p;
}
#else
static _Complex float cpow_ui(_Complex float x, integer n) {
_Complex float pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#endif
#ifdef _MSC_VER
static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
_Dcomplex pow={1.0,0.0}; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
for(u = n; ; ) {
if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
else break;
}
}
_Dcomplex p = {pow._Val[0], pow._Val[1]};
return p;
}
#else
static _Complex double zpow_ui(_Complex double x, integer n) {
_Complex double pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#endif
static integer pow_ii(integer x, integer n) {
integer pow; unsigned long int u;
if (n <= 0) {
if (n == 0 || x == 1) pow = 1;
else if (x != -1) pow = x == 0 ? 1/x : 0;
else n = -n;
}
if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
u = n;
for(pow = 1; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
static integer dmaxloc_(double *w, integer s, integer e, integer *n)
{
double m; integer i, mi;
for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
if (w[i-1]>m) mi=i ,m=w[i-1];
return mi-s+1;
}
static integer smaxloc_(float *w, integer s, integer e, integer *n)
{
float m; integer i, mi;
for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
if (w[i-1]>m) mi=i ,m=w[i-1];
return mi-s+1;
}
static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Fcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
}
}
pCf(z) = zdotc;
}
#else
_Complex float zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
}
}
pCf(z) = zdotc;
}
#endif
static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Dcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
}
}
pCd(z) = zdotc;
}
#else
_Complex double zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
}
}
pCd(z) = zdotc;
}
#endif
static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Fcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
}
}
pCf(z) = zdotc;
}
#else
_Complex float zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cf(&x[i]) * Cf(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
}
}
pCf(z) = zdotc;
}
#endif
static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Dcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
}
}
pCd(z) = zdotc;
}
#else
_Complex double zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cd(&x[i]) * Cd(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
}
}
pCd(z) = zdotc;
}
#endif
/* -- translated by f2c (version 20000121).
You must link the resulting object file with the libraries:
-lf2c -lm (in that order)
*/
/* Table of constant values */
static integer c__1 = 1;
static integer c_n1 = -1;
static integer c__2 = 2;
static integer c__65 = 65;
/* > \brief \b DORMRQ */
/* =========== DOCUMENTATION =========== */
/* Online html documentation available at */
/* http://www.netlib.org/lapack/explore-html/ */
/* > \htmlonly */
/* > Download DORMRQ + dependencies */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dormrq.
f"> */
/* > [TGZ]</a> */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dormrq.
f"> */
/* > [ZIP]</a> */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dormrq.
f"> */
/* > [TXT]</a> */
/* > \endhtmlonly */
/* Definition: */
/* =========== */
/* SUBROUTINE DORMRQ( SIDE, TRANS, M, N, K, A, LDA, TAU, C, LDC, */
/* WORK, LWORK, INFO ) */
/* CHARACTER SIDE, TRANS */
/* INTEGER INFO, K, LDA, LDC, LWORK, M, N */
/* DOUBLE PRECISION A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) */
/* > \par Purpose: */
/* ============= */
/* > */
/* > \verbatim */
/* > */
/* > DORMRQ overwrites the general real M-by-N matrix C with */
/* > */
/* > SIDE = 'L' SIDE = 'R' */
/* > TRANS = 'N': Q * C C * Q */
/* > TRANS = 'T': Q**T * C C * Q**T */
/* > */
/* > where Q is a real orthogonal matrix defined as the product of k */
/* > elementary reflectors */
/* > */
/* > Q = H(1) H(2) . . . H(k) */
/* > */
/* > as returned by DGERQF. Q is of order M if SIDE = 'L' and of order N */
/* > if SIDE = 'R'. */
/* > \endverbatim */
/* Arguments: */
/* ========== */
/* > \param[in] SIDE */
/* > \verbatim */
/* > SIDE is CHARACTER*1 */
/* > = 'L': apply Q or Q**T from the Left; */
/* > = 'R': apply Q or Q**T from the Right. */
/* > \endverbatim */
/* > */
/* > \param[in] TRANS */
/* > \verbatim */
/* > TRANS is CHARACTER*1 */
/* > = 'N': No transpose, apply Q; */
/* > = 'T': Transpose, apply Q**T. */
/* > \endverbatim */
/* > */
/* > \param[in] M */
/* > \verbatim */
/* > M is INTEGER */
/* > The number of rows of the matrix C. M >= 0. */
/* > \endverbatim */
/* > */
/* > \param[in] N */
/* > \verbatim */
/* > N is INTEGER */
/* > The number of columns of the matrix C. N >= 0. */
/* > \endverbatim */
/* > */
/* > \param[in] K */
/* > \verbatim */
/* > K is INTEGER */
/* > The number of elementary reflectors whose product defines */
/* > the matrix Q. */
/* > If SIDE = 'L', M >= K >= 0; */
/* > if SIDE = 'R', N >= K >= 0. */
/* > \endverbatim */
/* > */
/* > \param[in] A */
/* > \verbatim */
/* > A is DOUBLE PRECISION array, dimension */
/* > (LDA,M) if SIDE = 'L', */
/* > (LDA,N) if SIDE = 'R' */
/* > The i-th row must contain the vector which defines the */
/* > elementary reflector H(i), for i = 1,2,...,k, as returned by */
/* > DGERQF in the last k rows of its array argument A. */
/* > \endverbatim */
/* > */
/* > \param[in] LDA */
/* > \verbatim */
/* > LDA is INTEGER */
/* > The leading dimension of the array A. LDA >= f2cmax(1,K). */
/* > \endverbatim */
/* > */
/* > \param[in] TAU */
/* > \verbatim */
/* > TAU is DOUBLE PRECISION array, dimension (K) */
/* > TAU(i) must contain the scalar factor of the elementary */
/* > reflector H(i), as returned by DGERQF. */
/* > \endverbatim */
/* > */
/* > \param[in,out] C */
/* > \verbatim */
/* > C is DOUBLE PRECISION array, dimension (LDC,N) */
/* > On entry, the M-by-N matrix C. */
/* > On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
/* > \endverbatim */
/* > */
/* > \param[in] LDC */
/* > \verbatim */
/* > LDC is INTEGER */
/* > The leading dimension of the array C. LDC >= f2cmax(1,M). */
/* > \endverbatim */
/* > */
/* > \param[out] WORK */
/* > \verbatim */
/* > WORK is DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
/* > On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
/* > \endverbatim */
/* > */
/* > \param[in] LWORK */
/* > \verbatim */
/* > LWORK is INTEGER */
/* > The dimension of the array WORK. */
/* > If SIDE = 'L', LWORK >= f2cmax(1,N); */
/* > if SIDE = 'R', LWORK >= f2cmax(1,M). */
/* > For good performance, LWORK should generally be larger. */
/* > */
/* > If LWORK = -1, then a workspace query is assumed; the routine */
/* > only calculates the optimal size of the WORK array, returns */
/* > this value as the first entry of the WORK array, and no error */
/* > message related to LWORK is issued by XERBLA. */
/* > \endverbatim */
/* > */
/* > \param[out] INFO */
/* > \verbatim */
/* > INFO is INTEGER */
/* > = 0: successful exit */
/* > < 0: if INFO = -i, the i-th argument had an illegal value */
/* > \endverbatim */
/* Authors: */
/* ======== */
/* > \author Univ. of Tennessee */
/* > \author Univ. of California Berkeley */
/* > \author Univ. of Colorado Denver */
/* > \author NAG Ltd. */
/* > \date December 2016 */
/* > \ingroup doubleOTHERcomputational */
/* ===================================================================== */
/* Subroutine */ int dormrq_(char *side, char *trans, integer *m, integer *n,
integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
{
/* System generated locals */
address a__1[2];
integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
i__5;
char ch__1[2];
/* Local variables */
logical left;
integer i__;
extern logical lsame_(char *, char *);
integer nbmin, iinfo, i1, i2, i3;
extern /* Subroutine */ int dormr2_(char *, char *, integer *, integer *,
integer *, doublereal *, integer *, doublereal *, doublereal *,
integer *, doublereal *, integer *);
integer ib, nb, mi, ni;
extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *,
integer *, integer *, integer *, doublereal *, integer *,
doublereal *, integer *, doublereal *, integer *, doublereal *,
integer *);
integer nq, nw;
extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *,
doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *, ftnlen);
extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
integer *, integer *, ftnlen, ftnlen);
logical notran;
integer ldwork;
char transt[1];
integer lwkopt;
logical lquery;
integer iwt;
/* -- LAPACK computational routine (version 3.7.0) -- */
/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
/* December 2016 */
/* ===================================================================== */
/* Test the input arguments */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1 * 1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
c_offset = 1 + c_dim1 * 1;
c__ -= c_offset;
--work;
/* Function Body */
*info = 0;
left = lsame_(side, "L");
notran = lsame_(trans, "N");
lquery = *lwork == -1;
/* NQ is the order of Q and NW is the minimum dimension of WORK */
if (left) {
nq = *m;
nw = f2cmax(1,*n);
} else {
nq = *n;
nw = f2cmax(1,*m);
}
if (! left && ! lsame_(side, "R")) {
*info = -1;
} else if (! notran && ! lsame_(trans, "T")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
} else if (*k < 0 || *k > nq) {
*info = -5;
} else if (*lda < f2cmax(1,*k)) {
*info = -7;
} else if (*ldc < f2cmax(1,*m)) {
*info = -10;
} else if (*lwork < nw && ! lquery) {
*info = -12;
}
if (*info == 0) {
/* Compute the workspace requirements */
if (*m == 0 || *n == 0) {
lwkopt = 1;
} else {
/* Computing MIN */
/* Writing concatenation */
i__3[0] = 1, a__1[0] = side;
i__3[1] = 1, a__1[1] = trans;
s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
i__1 = 64, i__2 = ilaenv_(&c__1, "DORMRQ", ch__1, m, n, k, &c_n1,
(ftnlen)6, (ftnlen)2);
nb = f2cmin(i__1,i__2);
lwkopt = nw * nb + 4160;
}
work[1] = (doublereal) lwkopt;
}
if (*info != 0) {
i__1 = -(*info);
xerbla_("DORMRQ", &i__1, (ftnlen)6);
return 0;
} else if (lquery) {
return 0;
}
/* Quick return if possible */
if (*m == 0 || *n == 0) {
return 0;
}
nbmin = 2;
ldwork = nw;
if (nb > 1 && nb < *k) {
if (*lwork < nw * nb + 4160) {
nb = (*lwork - 4160) / ldwork;
/* Computing MAX */
/* Writing concatenation */
i__3[0] = 1, a__1[0] = side;
i__3[1] = 1, a__1[1] = trans;
s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
i__1 = 2, i__2 = ilaenv_(&c__2, "DORMRQ", ch__1, m, n, k, &c_n1, (
ftnlen)6, (ftnlen)2);
nbmin = f2cmax(i__1,i__2);
}
}
if (nb < nbmin || nb >= *k) {
/* Use unblocked code */
dormr2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
c_offset], ldc, &work[1], &iinfo);
} else {
/* Use blocked code */
iwt = nw * nb + 1;
if (left && ! notran || ! left && notran) {
i1 = 1;
i2 = *k;
i3 = nb;
} else {
i1 = (*k - 1) / nb * nb + 1;
i2 = 1;
i3 = -nb;
}
if (left) {
ni = *n;
} else {
mi = *m;
}
if (notran) {
*(unsigned char *)transt = 'T';
} else {
*(unsigned char *)transt = 'N';
}
i__1 = i2;
i__2 = i3;
for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
/* Computing MIN */
i__4 = nb, i__5 = *k - i__ + 1;
ib = f2cmin(i__4,i__5);
/* Form the triangular factor of the block reflector */
/* H = H(i+ib-1) . . . H(i+1) H(i) */
i__4 = nq - *k + i__ + ib - 1;
dlarft_("Backward", "Rowwise", &i__4, &ib, &a[i__ + a_dim1], lda,
&tau[i__], &work[iwt], &c__65);
if (left) {
/* H or H**T is applied to C(1:m-k+i+ib-1,1:n) */
mi = *m - *k + i__ + ib - 1;
} else {
/* H or H**T is applied to C(1:m,1:n-k+i+ib-1) */
ni = *n - *k + i__ + ib - 1;
}
/* Apply H or H**T */
dlarfb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, &a[
i__ + a_dim1], lda, &work[iwt], &c__65, &c__[c_offset],
ldc, &work[1], &ldwork);
/* L10: */
}
}
work[1] = (doublereal) lwkopt;
return 0;
/* End of DORMRQ */
} /* dormrq_ */
|
the_stack_data/184518896.c
|
#if 0
3. 动态开辟能容纳若干整型元素的数组,读入用户输入的整型数,依次存放到此数组中,使得
没插入的一个整型数组都是有序的
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void sort(int *arr);
int main()
{
int num, i;
size_t n = 1;
int *p = NULL;
int *res, count = 0;
int *dest, *src;
while (getchar() != '\n')
{
scanf("%d", &num);
count ++;
p = realloc(p, n);
p[n-1] = num;
sort(p);
n ++;
}
for (int i = 0; i < count; i++)
printf("%d ", p[i]);
printf("\n");
free(p);
return 0;
}
void sort(int *arr)
{
int n;
n = sizeof(arr) / sizeof(arr[0]);
for (int i = 0; i < n - 1; i++)
{
for (int j = 0; j < n - i - 1; j++)
{
printf("%d ", arr[j+1]);
if (arr[j] > arr[j+1])
{
arr[j] = arr[j] ^ arr[j+1];
arr[j+1] = arr[j] ^ arr[j+1];
arr[j] = arr[j] ^ arr[j+1];
}
}
}
}
|
the_stack_data/9513665.c
|
/*******************************************************************************
*
* fork a new process and re-direct standard out to a file
*
******************************************************************************/
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <stdio.h>
main()
{
int pid;
int i;
int status;
pid = fork ();
if (pid==0) { //in child process
execl ("./hello", NULL);
fprintf (stderr, "Something wrong if you see this line\n");
}
else { //in parent process
wait (&status);
printf ("Parent: the child terminates\n");
}
return 0;
}//main()
|
the_stack_data/168892697.c
|
int test_assert(int, int, char *);
int printf();
int main() {
test_assert(0, 0, "0");
test_assert(42, 42, "42");
test_assert(21, 5 + 20 - 4, "5 + 20 - 4");
test_assert(41, 12 + 34 - 5, "12 + 34 - 5");
test_assert(47, 5 + 6 * 7, "5 + 6 * 7");
test_assert(15, 5 * (9 - 6), "5 * (9 - 6)");
test_assert(4, (3 + 5) / 2, "(3 + 5) / 2");
test_assert(10, -10 + 20, "-10 + 20");
test_assert(2, -10 / -5, "-10 / -5");
test_assert(1, 10 == 10, "10 - 10");
test_assert(0, 10 < 10, "10 < 10");
test_assert(1, 20 - 10 * 1 + 2 <= 2 * 7, "20 - 10 * 1 + 2 <= 2 * 7");
test_assert(0, 80 - 50 > 20 * 2, "80 - 50 > 20 * 2");
test_assert(1, 80 - 50 < 20 * 2, "80 - 50 < 20 * 2");
test_assert(0, 80 - 0 <= 20 * 2, "80 - 0 <= 20 * 2");
test_assert(1, 0 <= 16, "0 <= 16");
test_assert(0, 0 >= 16, "0 >= 16");
test_assert(0, 80 % 4, "80 % 4");
test_assert(3, 79 % 4, "79 % 4");
test_assert(28, 1 ? 28 : 25, "1 ? 28 : 25");
test_assert(28, 1241 ? 28 : 25, "1 ? 28 : 25");
test_assert(25, 0 ? 28 : 25, "1 ? 28 : 25");
test_assert(4, sizeof(int), "sizeof(int)");
test_assert(8, sizeof(int *), "sizeof(int*)");
test_assert(1, sizeof(char), "sizeof(char)");
test_assert(8, sizeof(char *), "sizeof(char*)");
test_assert(4, _Alignof(int), "_Alignof(int)");
test_assert(8, _Alignof(int *), "_Alignof(int*)");
test_assert(1, _Alignof(char), "_Alignof(char)");
test_assert(8, _Alignof(char *), "_Alignof(char*)");
printf("OK\n");
return 0;
}
|
the_stack_data/1230992.c
|
/* { dg-do compile } */
/* { dg-require-effective-target pie } */
/* { dg-options "-O2 -fpie" } */
/* Weak common symbol with -fpie. */
__attribute__((weak))
int xxx;
int
foo ()
{
return xxx;
}
/* { dg-final { scan-assembler "movl\[ \t\]xxx\\(%rip\\), %eax" { target { ! ia32 } } } } */
/* { dg-final { scan-assembler-not "xxx@GOTPCREL" { target { ! ia32 } } } } */
/* { dg-final { scan-assembler "movl\[ \t\]xxx@GOTOFF\\(%\[^,\]*\\), %eax" { target ia32 } } } */
/* { dg-final { scan-assembler-not "movl\[ \t\]xxx@GOT\\(%\[^,\]*\\), %eax" { target ia32 } } } */
|
the_stack_data/32948959.c
|
/** Z80: portable Z80 emulator *******************************/
/** **/
/** Debug.c **/
/** **/
/** This file contains the built-in debugging routine for **/
/** the Z80 emulator which is called on each Z80 step when **/
/** Trap!=0. **/
/** **/
/** Copyright (C) Marat Fayzullin 1995-2018 **/
/** You are not allowed to distribute this software **/
/** commercially. Please, notify me, if you make any **/
/** changes to this file. **/
/*************************************************************/
#ifdef DEBUG
#include "Z80.h"
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#ifdef FMSX
#include "AY8910.h"
extern AY8910 PSG;
#endif
static const char *Mnemonics[256] =
{
"NOP","LD BC,#h","LD (BC),A","INC BC","INC B","DEC B","LD B,*h","RLCA",
"EX AF,AF'","ADD HL,BC","LD A,(BC)","DEC BC","INC C","DEC C","LD C,*h","RRCA",
"DJNZ @h","LD DE,#h","LD (DE),A","INC DE","INC D","DEC D","LD D,*h","RLA",
"JR @h","ADD HL,DE","LD A,(DE)","DEC DE","INC E","DEC E","LD E,*h","RRA",
"JR NZ,@h","LD HL,#h","LD (#h),HL","INC HL","INC H","DEC H","LD H,*h","DAA",
"JR Z,@h","ADD HL,HL","LD HL,(#h)","DEC HL","INC L","DEC L","LD L,*h","CPL",
"JR NC,@h","LD SP,#h","LD (#h),A","INC SP","INC (HL)","DEC (HL)","LD (HL),*h","SCF",
"JR C,@h","ADD HL,SP","LD A,(#h)","DEC SP","INC A","DEC A","LD A,*h","CCF",
"LD B,B","LD B,C","LD B,D","LD B,E","LD B,H","LD B,L","LD B,(HL)","LD B,A",
"LD C,B","LD C,C","LD C,D","LD C,E","LD C,H","LD C,L","LD C,(HL)","LD C,A",
"LD D,B","LD D,C","LD D,D","LD D,E","LD D,H","LD D,L","LD D,(HL)","LD D,A",
"LD E,B","LD E,C","LD E,D","LD E,E","LD E,H","LD E,L","LD E,(HL)","LD E,A",
"LD H,B","LD H,C","LD H,D","LD H,E","LD H,H","LD H,L","LD H,(HL)","LD H,A",
"LD L,B","LD L,C","LD L,D","LD L,E","LD L,H","LD L,L","LD L,(HL)","LD L,A",
"LD (HL),B","LD (HL),C","LD (HL),D","LD (HL),E","LD (HL),H","LD (HL),L","HALT","LD (HL),A",
"LD A,B","LD A,C","LD A,D","LD A,E","LD A,H","LD A,L","LD A,(HL)","LD A,A",
"ADD B","ADD C","ADD D","ADD E","ADD H","ADD L","ADD (HL)","ADD A",
"ADC B","ADC C","ADC D","ADC E","ADC H","ADC L","ADC (HL)","ADC A",
"SUB B","SUB C","SUB D","SUB E","SUB H","SUB L","SUB (HL)","SUB A",
"SBC B","SBC C","SBC D","SBC E","SBC H","SBC L","SBC (HL)","SBC A",
"AND B","AND C","AND D","AND E","AND H","AND L","AND (HL)","AND A",
"XOR B","XOR C","XOR D","XOR E","XOR H","XOR L","XOR (HL)","XOR A",
"OR B","OR C","OR D","OR E","OR H","OR L","OR (HL)","OR A",
"CP B","CP C","CP D","CP E","CP H","CP L","CP (HL)","CP A",
"RET NZ","POP BC","JP NZ,#h","JP #h","CALL NZ,#h","PUSH BC","ADD *h","RST 00h",
"RET Z","RET","JP Z,#h","PFX_CB","CALL Z,#h","CALL #h","ADC *h","RST 08h",
"RET NC","POP DE","JP NC,#h","OUTA (*h)","CALL NC,#h","PUSH DE","SUB *h","RST 10h",
"RET C","EXX","JP C,#h","INA (*h)","CALL C,#h","PFX_DD","SBC *h","RST 18h",
"RET PO","POP HL","JP PO,#h","EX HL,(SP)","CALL PO,#h","PUSH HL","AND *h","RST 20h",
"RET PE","LD PC,HL","JP PE,#h","EX DE,HL","CALL PE,#h","PFX_ED","XOR *h","RST 28h",
"RET P","POP AF","JP P,#h","DI","CALL P,#h","PUSH AF","OR *h","RST 30h",
"RET M","LD SP,HL","JP M,#h","EI","CALL M,#h","PFX_FD","CP *h","RST 38h"
};
static const char *MnemonicsCB[256] =
{
"RLC B","RLC C","RLC D","RLC E","RLC H","RLC L","RLC (HL)","RLC A",
"RRC B","RRC C","RRC D","RRC E","RRC H","RRC L","RRC (HL)","RRC A",
"RL B","RL C","RL D","RL E","RL H","RL L","RL (HL)","RL A",
"RR B","RR C","RR D","RR E","RR H","RR L","RR (HL)","RR A",
"SLA B","SLA C","SLA D","SLA E","SLA H","SLA L","SLA (HL)","SLA A",
"SRA B","SRA C","SRA D","SRA E","SRA H","SRA L","SRA (HL)","SRA A",
"SLL B","SLL C","SLL D","SLL E","SLL H","SLL L","SLL (HL)","SLL A",
"SRL B","SRL C","SRL D","SRL E","SRL H","SRL L","SRL (HL)","SRL A",
"BIT 0,B","BIT 0,C","BIT 0,D","BIT 0,E","BIT 0,H","BIT 0,L","BIT 0,(HL)","BIT 0,A",
"BIT 1,B","BIT 1,C","BIT 1,D","BIT 1,E","BIT 1,H","BIT 1,L","BIT 1,(HL)","BIT 1,A",
"BIT 2,B","BIT 2,C","BIT 2,D","BIT 2,E","BIT 2,H","BIT 2,L","BIT 2,(HL)","BIT 2,A",
"BIT 3,B","BIT 3,C","BIT 3,D","BIT 3,E","BIT 3,H","BIT 3,L","BIT 3,(HL)","BIT 3,A",
"BIT 4,B","BIT 4,C","BIT 4,D","BIT 4,E","BIT 4,H","BIT 4,L","BIT 4,(HL)","BIT 4,A",
"BIT 5,B","BIT 5,C","BIT 5,D","BIT 5,E","BIT 5,H","BIT 5,L","BIT 5,(HL)","BIT 5,A",
"BIT 6,B","BIT 6,C","BIT 6,D","BIT 6,E","BIT 6,H","BIT 6,L","BIT 6,(HL)","BIT 6,A",
"BIT 7,B","BIT 7,C","BIT 7,D","BIT 7,E","BIT 7,H","BIT 7,L","BIT 7,(HL)","BIT 7,A",
"RES 0,B","RES 0,C","RES 0,D","RES 0,E","RES 0,H","RES 0,L","RES 0,(HL)","RES 0,A",
"RES 1,B","RES 1,C","RES 1,D","RES 1,E","RES 1,H","RES 1,L","RES 1,(HL)","RES 1,A",
"RES 2,B","RES 2,C","RES 2,D","RES 2,E","RES 2,H","RES 2,L","RES 2,(HL)","RES 2,A",
"RES 3,B","RES 3,C","RES 3,D","RES 3,E","RES 3,H","RES 3,L","RES 3,(HL)","RES 3,A",
"RES 4,B","RES 4,C","RES 4,D","RES 4,E","RES 4,H","RES 4,L","RES 4,(HL)","RES 4,A",
"RES 5,B","RES 5,C","RES 5,D","RES 5,E","RES 5,H","RES 5,L","RES 5,(HL)","RES 5,A",
"RES 6,B","RES 6,C","RES 6,D","RES 6,E","RES 6,H","RES 6,L","RES 6,(HL)","RES 6,A",
"RES 7,B","RES 7,C","RES 7,D","RES 7,E","RES 7,H","RES 7,L","RES 7,(HL)","RES 7,A",
"SET 0,B","SET 0,C","SET 0,D","SET 0,E","SET 0,H","SET 0,L","SET 0,(HL)","SET 0,A",
"SET 1,B","SET 1,C","SET 1,D","SET 1,E","SET 1,H","SET 1,L","SET 1,(HL)","SET 1,A",
"SET 2,B","SET 2,C","SET 2,D","SET 2,E","SET 2,H","SET 2,L","SET 2,(HL)","SET 2,A",
"SET 3,B","SET 3,C","SET 3,D","SET 3,E","SET 3,H","SET 3,L","SET 3,(HL)","SET 3,A",
"SET 4,B","SET 4,C","SET 4,D","SET 4,E","SET 4,H","SET 4,L","SET 4,(HL)","SET 4,A",
"SET 5,B","SET 5,C","SET 5,D","SET 5,E","SET 5,H","SET 5,L","SET 5,(HL)","SET 5,A",
"SET 6,B","SET 6,C","SET 6,D","SET 6,E","SET 6,H","SET 6,L","SET 6,(HL)","SET 6,A",
"SET 7,B","SET 7,C","SET 7,D","SET 7,E","SET 7,H","SET 7,L","SET 7,(HL)","SET 7,A"
};
static const char *MnemonicsED[256] =
{
"DB EDh,00h","DB EDh,01h","DB EDh,02h","DB EDh,03h",
"DB EDh,04h","DB EDh,05h","DB EDh,06h","DB EDh,07h",
"DB EDh,08h","DB EDh,09h","DB EDh,0Ah","DB EDh,0Bh",
"DB EDh,0Ch","DB EDh,0Dh","DB EDh,0Eh","DB EDh,0Fh",
"DB EDh,10h","DB EDh,11h","DB EDh,12h","DB EDh,13h",
"DB EDh,14h","DB EDh,15h","DB EDh,16h","DB EDh,17h",
"DB EDh,18h","DB EDh,19h","DB EDh,1Ah","DB EDh,1Bh",
"DB EDh,1Ch","DB EDh,1Dh","DB EDh,1Eh","DB EDh,1Fh",
"DB EDh,20h","DB EDh,21h","DB EDh,22h","DB EDh,23h",
"DB EDh,24h","DB EDh,25h","DB EDh,26h","DB EDh,27h",
"DB EDh,28h","DB EDh,29h","DB EDh,2Ah","DB EDh,2Bh",
"DB EDh,2Ch","DB EDh,2Dh","DB EDh,2Eh","DB EDh,2Fh",
"DB EDh,30h","DB EDh,31h","DB EDh,32h","DB EDh,33h",
"DB EDh,34h","DB EDh,35h","DB EDh,36h","DB EDh,37h",
"DB EDh,38h","DB EDh,39h","DB EDh,3Ah","DB EDh,3Bh",
"DB EDh,3Ch","DB EDh,3Dh","DB EDh,3Eh","DB EDh,3Fh",
"IN B,(C)","OUT (C),B","SBC HL,BC","LD (#h),BC",
"NEG","RETN","IM 0","LD I,A",
"IN C,(C)","OUT (C),C","ADC HL,BC","LD BC,(#h)",
"DB EDh,4Ch","RETI","DB EDh,4Eh","LD R,A",
"IN D,(C)","OUT (C),D","SBC HL,DE","LD (#h),DE",
"DB EDh,54h","DB EDh,55h","IM 1","LD A,I",
"IN E,(C)","OUT (C),E","ADC HL,DE","LD DE,(#h)",
"DB EDh,5Ch","DB EDh,5Dh","IM 2","LD A,R",
"IN H,(C)","OUT (C),H","SBC HL,HL","LD (#h),HL",
"DB EDh,64h","DB EDh,65h","DB EDh,66h","RRD",
"IN L,(C)","OUT (C),L","ADC HL,HL","LD HL,(#h)",
"DB EDh,6Ch","DB EDh,6Dh","DB EDh,6Eh","RLD",
"IN F,(C)","DB EDh,71h","SBC HL,SP","LD (#h),SP",
"DB EDh,74h","DB EDh,75h","DB EDh,76h","DB EDh,77h",
"IN A,(C)","OUT (C),A","ADC HL,SP","LD SP,(#h)",
"DB EDh,7Ch","DB EDh,7Dh","DB EDh,7Eh","DB EDh,7Fh",
"DB EDh,80h","DB EDh,81h","DB EDh,82h","DB EDh,83h",
"DB EDh,84h","DB EDh,85h","DB EDh,86h","DB EDh,87h",
"DB EDh,88h","DB EDh,89h","DB EDh,8Ah","DB EDh,8Bh",
"DB EDh,8Ch","DB EDh,8Dh","DB EDh,8Eh","DB EDh,8Fh",
"DB EDh,90h","DB EDh,91h","DB EDh,92h","DB EDh,93h",
"DB EDh,94h","DB EDh,95h","DB EDh,96h","DB EDh,97h",
"DB EDh,98h","DB EDh,99h","DB EDh,9Ah","DB EDh,9Bh",
"DB EDh,9Ch","DB EDh,9Dh","DB EDh,9Eh","DB EDh,9Fh",
"LDI","CPI","INI","OUTI",
"DB EDh,A4h","DB EDh,A5h","DB EDh,A6h","DB EDh,A7h",
"LDD","CPD","IND","OUTD",
"DB EDh,ACh","DB EDh,ADh","DB EDh,AEh","DB EDh,AFh",
"LDIR","CPIR","INIR","OTIR",
"DB EDh,B4h","DB EDh,B5h","DB EDh,B6h","DB EDh,B7h",
"LDDR","CPDR","INDR","OTDR",
"DB EDh,BCh","DB EDh,BDh","DB EDh,BEh","DB EDh,BFh",
"DB EDh,C0h","DB EDh,C1h","DB EDh,C2h","DB EDh,C3h",
"DB EDh,C4h","DB EDh,C5h","DB EDh,C6h","DB EDh,C7h",
"DB EDh,C8h","DB EDh,C9h","DB EDh,CAh","DB EDh,CBh",
"DB EDh,CCh","DB EDh,CDh","DB EDh,CEh","DB EDh,CFh",
"DB EDh,D0h","DB EDh,D1h","DB EDh,D2h","DB EDh,D3h",
"DB EDh,D4h","DB EDh,D5h","DB EDh,D6h","DB EDh,D7h",
"DB EDh,D8h","DB EDh,D9h","DB EDh,DAh","DB EDh,DBh",
"DB EDh,DCh","DB EDh,DDh","DB EDh,DEh","DB EDh,DFh",
"DB EDh,E0h","DB EDh,E1h","DB EDh,E2h","DB EDh,E3h",
"DB EDh,E4h","DB EDh,E5h","DB EDh,E6h","DB EDh,E7h",
"DB EDh,E8h","DB EDh,E9h","DB EDh,EAh","DB EDh,EBh",
"DB EDh,ECh","DB EDh,EDh","DB EDh,EEh","DB EDh,EFh",
"DB EDh,F0h","DB EDh,F1h","DB EDh,F2h","DB EDh,F3h",
"DB EDh,F4h","DB EDh,F5h","DB EDh,F6h","DB EDh,F7h",
"DB EDh,F8h","DB EDh,F9h","DB EDh,FAh","DB EDh,FBh",
"DB EDh,FCh","DB EDh,FDh","DB EDh,FEh","DB EDh,FFh"
};
static const char *MnemonicsXX[256] =
{
"NOP","LD BC,#h","LD (BC),A","INC BC","INC B","DEC B","LD B,*h","RLCA",
"EX AF,AF'","ADD I%,BC","LD A,(BC)","DEC BC","INC C","DEC C","LD C,*h","RRCA",
"DJNZ @h","LD DE,#h","LD (DE),A","INC DE","INC D","DEC D","LD D,*h","RLA",
"JR @h","ADD I%,DE","LD A,(DE)","DEC DE","INC E","DEC E","LD E,*h","RRA",
"JR NZ,@h","LD I%,#h","LD (#h),I%","INC I%","INC I%h","DEC I%h","LD I%h,*h","DAA",
"JR Z,@h","ADD I%,I%","LD I%,(#h)","DEC I%","INC I%l","DEC I%l","LD I%l,*h","CPL",
"JR NC,@h","LD SP,#h","LD (#h),A","INC SP","INC (I%+^h)","DEC (I%+^h)","LD (I%+^h),*h","SCF",
"JR C,@h","ADD I%,SP","LD A,(#h)","DEC SP","INC A","DEC A","LD A,*h","CCF",
"LD B,B","LD B,C","LD B,D","LD B,E","LD B,I%h","LD B,I%l","LD B,(I%+^h)","LD B,A",
"LD C,B","LD C,C","LD C,D","LD C,E","LD C,I%h","LD C,I%l","LD C,(I%+^h)","LD C,A",
"LD D,B","LD D,C","LD D,D","LD D,E","LD D,I%h","LD D,I%l","LD D,(I%+^h)","LD D,A",
"LD E,B","LD E,C","LD E,D","LD E,E","LD E,I%h","LD E,I%l","LD E,(I%+^h)","LD E,A",
"LD I%h,B","LD I%h,C","LD I%h,D","LD I%h,E","LD I%h,I%h","LD I%h,I%l","LD H,(I%+^h)","LD I%h,A",
"LD I%l,B","LD I%l,C","LD I%l,D","LD I%l,E","LD I%l,I%h","LD I%l,I%l","LD L,(I%+^h)","LD I%l,A",
"LD (I%+^h),B","LD (I%+^h),C","LD (I%+^h),D","LD (I%+^h),E","LD (I%+^h),H","LD (I%+^h),L","HALT","LD (I%+^h),A",
"LD A,B","LD A,C","LD A,D","LD A,E","LD A,I%h","LD A,I%l","LD A,(I%+^h)","LD A,A",
"ADD B","ADD C","ADD D","ADD E","ADD I%h","ADD I%l","ADD (I%+^h)","ADD A",
"ADC B","ADC C","ADC D","ADC E","ADC I%h","ADC I%l","ADC (I%+^h)","ADC,A",
"SUB B","SUB C","SUB D","SUB E","SUB I%h","SUB I%l","SUB (I%+^h)","SUB A",
"SBC B","SBC C","SBC D","SBC E","SBC I%h","SBC I%l","SBC (I%+^h)","SBC A",
"AND B","AND C","AND D","AND E","AND I%h","AND I%l","AND (I%+^h)","AND A",
"XOR B","XOR C","XOR D","XOR E","XOR I%h","XOR I%l","XOR (I%+^h)","XOR A",
"OR B","OR C","OR D","OR E","OR I%h","OR I%l","OR (I%+^h)","OR A",
"CP B","CP C","CP D","CP E","CP I%h","CP I%l","CP (I%+^h)","CP A",
"RET NZ","POP BC","JP NZ,#h","JP #h","CALL NZ,#h","PUSH BC","ADD *h","RST 00h",
"RET Z","RET","JP Z,#h","PFX_CB","CALL Z,#h","CALL #h","ADC *h","RST 08h",
"RET NC","POP DE","JP NC,#h","OUTA (*h)","CALL NC,#h","PUSH DE","SUB *h","RST 10h",
"RET C","EXX","JP C,#h","INA (*h)","CALL C,#h","PFX_DD","SBC *h","RST 18h",
"RET PO","POP I%","JP PO,#h","EX I%,(SP)","CALL PO,#h","PUSH I%","AND *h","RST 20h",
"RET PE","LD PC,I%","JP PE,#h","EX DE,I%","CALL PE,#h","PFX_ED","XOR *h","RST 28h",
"RET P","POP AF","JP P,#h","DI","CALL P,#h","PUSH AF","OR *h","RST 30h",
"RET M","LD SP,I%","JP M,#h","EI","CALL M,#h","PFX_FD","CP *h","RST 38h"
};
static const char *MnemonicsXCB[256] =
{
"RLC B","RLC C","RLC D","RLC E","RLC H","RLC L","RLC (I%@h)","RLC A",
"RRC B","RRC C","RRC D","RRC E","RRC H","RRC L","RRC (I%@h)","RRC A",
"RL B","RL C","RL D","RL E","RL H","RL L","RL (I%@h)","RL A",
"RR B","RR C","RR D","RR E","RR H","RR L","RR (I%@h)","RR A",
"SLA B","SLA C","SLA D","SLA E","SLA H","SLA L","SLA (I%@h)","SLA A",
"SRA B","SRA C","SRA D","SRA E","SRA H","SRA L","SRA (I%@h)","SRA A",
"SLL B","SLL C","SLL D","SLL E","SLL H","SLL L","SLL (I%@h)","SLL A",
"SRL B","SRL C","SRL D","SRL E","SRL H","SRL L","SRL (I%@h)","SRL A",
"BIT 0,B","BIT 0,C","BIT 0,D","BIT 0,E","BIT 0,H","BIT 0,L","BIT 0,(I%@h)","BIT 0,A",
"BIT 1,B","BIT 1,C","BIT 1,D","BIT 1,E","BIT 1,H","BIT 1,L","BIT 1,(I%@h)","BIT 1,A",
"BIT 2,B","BIT 2,C","BIT 2,D","BIT 2,E","BIT 2,H","BIT 2,L","BIT 2,(I%@h)","BIT 2,A",
"BIT 3,B","BIT 3,C","BIT 3,D","BIT 3,E","BIT 3,H","BIT 3,L","BIT 3,(I%@h)","BIT 3,A",
"BIT 4,B","BIT 4,C","BIT 4,D","BIT 4,E","BIT 4,H","BIT 4,L","BIT 4,(I%@h)","BIT 4,A",
"BIT 5,B","BIT 5,C","BIT 5,D","BIT 5,E","BIT 5,H","BIT 5,L","BIT 5,(I%@h)","BIT 5,A",
"BIT 6,B","BIT 6,C","BIT 6,D","BIT 6,E","BIT 6,H","BIT 6,L","BIT 6,(I%@h)","BIT 6,A",
"BIT 7,B","BIT 7,C","BIT 7,D","BIT 7,E","BIT 7,H","BIT 7,L","BIT 7,(I%@h)","BIT 7,A",
"RES 0,B","RES 0,C","RES 0,D","RES 0,E","RES 0,H","RES 0,L","RES 0,(I%@h)","RES 0,A",
"RES 1,B","RES 1,C","RES 1,D","RES 1,E","RES 1,H","RES 1,L","RES 1,(I%@h)","RES 1,A",
"RES 2,B","RES 2,C","RES 2,D","RES 2,E","RES 2,H","RES 2,L","RES 2,(I%@h)","RES 2,A",
"RES 3,B","RES 3,C","RES 3,D","RES 3,E","RES 3,H","RES 3,L","RES 3,(I%@h)","RES 3,A",
"RES 4,B","RES 4,C","RES 4,D","RES 4,E","RES 4,H","RES 4,L","RES 4,(I%@h)","RES 4,A",
"RES 5,B","RES 5,C","RES 5,D","RES 5,E","RES 5,H","RES 5,L","RES 5,(I%@h)","RES 5,A",
"RES 6,B","RES 6,C","RES 6,D","RES 6,E","RES 6,H","RES 6,L","RES 6,(I%@h)","RES 6,A",
"RES 7,B","RES 7,C","RES 7,D","RES 7,E","RES 7,H","RES 7,L","RES 7,(I%@h)","RES 7,A",
"SET 0,B","SET 0,C","SET 0,D","SET 0,E","SET 0,H","SET 0,L","SET 0,(I%@h)","SET 0,A",
"SET 1,B","SET 1,C","SET 1,D","SET 1,E","SET 1,H","SET 1,L","SET 1,(I%@h)","SET 1,A",
"SET 2,B","SET 2,C","SET 2,D","SET 2,E","SET 2,H","SET 2,L","SET 2,(I%@h)","SET 2,A",
"SET 3,B","SET 3,C","SET 3,D","SET 3,E","SET 3,H","SET 3,L","SET 3,(I%@h)","SET 3,A",
"SET 4,B","SET 4,C","SET 4,D","SET 4,E","SET 4,H","SET 4,L","SET 4,(I%@h)","SET 4,A",
"SET 5,B","SET 5,C","SET 5,D","SET 5,E","SET 5,H","SET 5,L","SET 5,(I%@h)","SET 5,A",
"SET 6,B","SET 6,C","SET 6,D","SET 6,E","SET 6,H","SET 6,L","SET 6,(I%@h)","SET 6,A",
"SET 7,B","SET 7,C","SET 7,D","SET 7,E","SET 7,H","SET 7,L","SET 7,(I%@h)","SET 7,A"
};
/** DAsm() ***************************************************/
/** DAsm() will disassemble the code at adress A and put **/
/** the output text into S. It will return the number of **/
/** bytes disassembled. **/
/*************************************************************/
static int DAsm(char *S,word A)
{
char R[128],H[10],C,*P;
const char *T;
byte J,Offset;
word B;
Offset=0;
B=A;
C='\0';
J=0;
switch(RdZ80(B))
{
case 0xCB: B++;T=MnemonicsCB[RdZ80(B++)];break;
case 0xED: B++;T=MnemonicsED[RdZ80(B++)];break;
case 0xDD: B++;C='X';
if(RdZ80(B)!=0xCB) T=MnemonicsXX[RdZ80(B++)];
else
{ B++;Offset=RdZ80(B++);J=1;T=MnemonicsXCB[RdZ80(B++)]; }
break;
case 0xFD: B++;C='Y';
if(RdZ80(B)!=0xCB) T=MnemonicsXX[RdZ80(B++)];
else
{ B++;Offset=RdZ80(B++);J=1;T=MnemonicsXCB[RdZ80(B++)]; }
break;
default: T=Mnemonics[RdZ80(B++)];
}
if((P=strchr(T,'^')))
{
strncpy(R,T,P-T);R[P-T]='\0';
sprintf(H,"%02X",RdZ80(B++));
strcat(R,H);strcat(R,P+1);
}
else strcpy(R,T);
if((P=strchr(R,'%'))) *P=C;
if((P=strchr(R,'*')))
{
strncpy(S,R,P-R);S[P-R]='\0';
sprintf(H,"%02X",RdZ80(B++));
strcat(S,H);strcat(S,P+1);
}
else if((P=strchr(R,'@')))
{
strncpy(S,R,P-R);S[P-R]='\0';
if(!J) Offset=RdZ80(B++);
strcat(S,Offset&0x80? "-":"+");
J=Offset&0x80? 256-Offset:Offset;
sprintf(H,"%02X",J);
strcat(S,H);strcat(S,P+1);
}
else if((P=strchr(R,'#')))
{
strncpy(S,R,P-R);S[P-R]='\0';
sprintf(H,"%04X",RdZ80(B)+256*RdZ80(B+1));
strcat(S,H);strcat(S,P+1);
B+=2;
}
else strcpy(S,R);
return(B-A);
}
/** DebugZ80() ***********************************************/
/** This function should exist if DEBUG is #defined. When **/
/** Trace!=0, it is called after each command executed by **/
/** the CPU, and given the Z80 registers. **/
/*************************************************************/
byte DebugZ80(Z80 *R)
{
static const char Flags[9] = "SZ.H.PNC";
char S[128],T[10];
byte J,I;
DAsm(S,R->PC.W);
for(J=0,I=R->AF.B.l;J<8;J++,I<<=1) T[J]=I&0x80? Flags[J]:'.';
T[8]='\0';
printf
(
"AF:%04X HL:%04X DE:%04X BC:%04X PC:%04X SP:%04X IX:%04X IY:%04X I:%02X\n",
R->AF.W,R->HL.W,R->DE.W,R->BC.W,R->PC.W,R->SP.W,R->IX.W,R->IY.W,R->I
);
printf
(
"AT PC: [%02X - %s] AT SP: [%04X] FLAGS: [%s] %s: %s\n\n",
RdZ80(R->PC.W),S,RdZ80(R->SP.W)+RdZ80(R->SP.W+1)*256,T,
R->IFF&0x04? "IM2":R->IFF&0x02? "IM1":"IM0",
R->IFF&0x01? "EI":"DI"
);
while(1)
{
printf("\n[Command,'?']-> ");
fflush(stdout);fflush(stdin);
if(!fgets(S,50,stdin)) return(1);
for(J=0;S[J]>=' ';J++)
S[J]=toupper(S[J]);
S[J]='\0';
switch(S[0])
{
case 'H':
case '?':
puts("\n***** Built-in Z80 Debugger Commands *****");
puts("<CR> : Break at next instruction");
puts("= <addr> : Break at addr");
puts("+ <offset> : Break at PC + offset");
puts("c : Continue without break");
puts("j <addr> : Continue from addr");
puts("m <addr> : Memory dump at addr");
puts("d <addr> : Disassembly at addr");
puts("?,h : Show this help text");
puts("q : Exit Z80 emulation");
break;
case '\0': return(1);
case '=': if(strlen(S)>=2)
{ sscanf(S+1,"%hX",&(R->Trap));R->Trace=0;return(1); }
break;
case '+': if(strlen(S)>=2)
{
sscanf(S+1,"%hX",&(R->Trap));
R->Trap+=R->PC.W;R->Trace=0;
return(1);
}
break;
case 'J': if(strlen(S)>=2)
{ sscanf(S+1,"%hX",&(R->PC.W));R->Trace=0;return(1); }
break;
case 'C': R->Trap=0xFFFF;R->Trace=0;return(1);
case 'Q': return(0);
case 'M':
{
word Addr;
if(strlen(S)>1) sscanf(S+1,"%hX",&Addr); else Addr=R->PC.W;
puts("");
for(J=0;J<16;J++)
{
printf("%04X: ",Addr);
for(I=0;I<16;I++,Addr++)
printf("%02X ",RdZ80(Addr));
printf(" | ");Addr-=16;
for(I=0;I<16;I++,Addr++)
putchar(isprint(RdZ80(Addr))? RdZ80(Addr):'.');
puts("");
}
}
break;
case 'D':
{
word Addr;
if(strlen(S)>1) sscanf(S+1,"%hX",&Addr); else Addr=R->PC.W;
puts("");
for(J=0;J<16;J++)
{
printf("%04X: ",Addr);
Addr+=DAsm(S,Addr);
puts(S);
}
}
break;
#ifdef FMSX
case 'S':
for(J=0;J<AY8910_CHANNELS;J++)
{
printf("Channel %d: Volume %d, Frequency %dHz",J,PSG.Volume[J],PSG.Freq[J]);
if(!(PSG.R[8+(J%3)]&0x10)) printf("\n");
else printf(", Envelope %d\n",PSG.R[8+(J%3)]&0x0F);
}
printf("Envelope period %dms\n",PSG.EPeriod);
break;
#endif /* FMSX */
}
}
/* Continue emulation */
return(1);
}
#endif /* DEBUG */
|
the_stack_data/97012892.c
|
/*Exercise 3 - Repetition
Write a C program to calculate the sum of the numbers from 1 to n.
Where n is a keyboard input.
e.g.
n -> 100
sum = 1+2+3+....+ 99+100 = 5050
n -> 1-
sum = 1+2+3+...+10 = 55 */
#include <stdio.h>
int main() {
int num,i,sum=0;
printf("Input a value for n: ");
scanf("%d",&num);
for(i=1;i<=num;i++)
{
sum=sum+i;
}
printf("\nSum = %d ",sum);
return 0;
}
|
the_stack_data/904981.c
|
// KASAN: slab-out-of-bounds Read in resample_shrink
// https://syzkaller.appspot.com/bug?id=e1fe9f44fb8ecf4fb5dd
// status:6
// autogenerated by syzkaller (https://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <linux/futex.h>
unsigned long long procid;
static void sleep_ms(uint64_t ms)
{
usleep(ms * 1000);
}
static uint64_t current_time_ms(void)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
exit(1);
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void thread_start(void* (*fn)(void*), void* arg)
{
pthread_t th;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
int i;
for (i = 0; i < 100; i++) {
if (pthread_create(&th, &attr, fn, arg) == 0) {
pthread_attr_destroy(&attr);
return;
}
if (errno == EAGAIN) {
usleep(50);
continue;
}
break;
}
exit(1);
}
typedef struct {
int state;
} event_t;
static void event_init(event_t* ev)
{
ev->state = 0;
}
static void event_reset(event_t* ev)
{
ev->state = 0;
}
static void event_set(event_t* ev)
{
if (ev->state)
exit(1);
__atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
}
static void event_wait(event_t* ev)
{
while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
}
static int event_isset(event_t* ev)
{
return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}
static int event_timedwait(event_t* ev, uint64_t timeout)
{
uint64_t start = current_time_ms();
uint64_t now = start;
for (;;) {
uint64_t remain = timeout - (now - start);
struct timespec ts;
ts.tv_sec = remain / 1000;
ts.tv_nsec = (remain % 1000) * 1000 * 1000;
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
if (__atomic_load_n(&ev->state, __ATOMIC_RELAXED))
return 1;
now = current_time_ms();
if (now - start > timeout)
return 0;
}
}
static bool write_file(const char* file, const char* what, ...)
{
char buf[1024];
va_list args;
va_start(args, what);
vsnprintf(buf, sizeof(buf), what, args);
va_end(args);
buf[sizeof(buf) - 1] = 0;
int len = strlen(buf);
int fd = open(file, O_WRONLY | O_CLOEXEC);
if (fd == -1)
return false;
if (write(fd, buf, len) != len) {
int err = errno;
close(fd);
errno = err;
return false;
}
close(fd);
return true;
}
static void kill_and_wait(int pid, int* status)
{
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
int i;
for (i = 0; i < 100; i++) {
if (waitpid(-1, status, WNOHANG | __WALL) == pid)
return;
usleep(1000);
}
DIR* dir = opendir("/sys/fs/fuse/connections");
if (dir) {
for (;;) {
struct dirent* ent = readdir(dir);
if (!ent)
break;
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
continue;
char abort[300];
snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
ent->d_name);
int fd = open(abort, O_WRONLY);
if (fd == -1) {
continue;
}
if (write(fd, abort, 1) < 0) {
}
close(fd);
}
closedir(dir);
} else {
}
while (waitpid(-1, status, __WALL) != pid) {
}
}
static void setup_test()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
write_file("/proc/self/oom_score_adj", "1000");
}
struct thread_t {
int created, call;
event_t ready, done;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static void* thr(void* arg)
{
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
event_wait(&th->ready);
event_reset(&th->ready);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
event_set(&th->done);
}
return 0;
}
static void execute_one(void)
{
int i, call, thread;
for (call = 0; call < 5; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);
thread_start(thr, th);
}
if (!event_isset(&th->done))
continue;
event_reset(&th->done);
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
event_timedwait(&th->done, 45);
break;
}
}
for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
sleep_ms(1);
}
static void execute_one(void);
#define WAIT_FLAGS __WALL
static void loop(void)
{
int iter;
for (iter = 0;; iter++) {
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
setup_test();
execute_one();
exit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
break;
sleep_ms(1);
if (current_time_ms() - start < 5 * 1000)
continue;
kill_and_wait(pid, &status);
break;
}
}
}
uint64_t r[1] = {0xffffffffffffffff};
void execute_call(int call)
{
intptr_t res;
switch (call) {
case 0:
memcpy((void*)0x20000080, "/dev/audio\000", 11);
res = syscall(__NR_openat, 0xffffffffffffff9cul, 0x20000080ul, 0ul, 0ul);
if (res != -1)
r[0] = res;
break;
case 1:
*(uint64_t*)0x20000300 = 0x52e5cf88;
syscall(__NR_ioctl, r[0], 0x800000c004500aul, 0x20000300ul);
break;
case 2:
*(uint32_t*)0x200000c0 = 0;
syscall(__NR_ioctl, r[0], 0xc0045002ul, 0x200000c0ul);
break;
case 3:
*(uint64_t*)0x20395000 = 0x204d2000;
*(uint64_t*)0x20395008 = 0x1000;
syscall(__NR_readv, r[0], 0x20395000ul, 1ul);
break;
case 4:
syscall(__NR_ioctl, r[0], 0x5001ul, 0ul);
break;
}
}
int main(void)
{
syscall(__NR_mmap, 0x20000000ul, 0x1000000ul, 3ul, 0x32ul, -1, 0);
for (procid = 0; procid < 6; procid++) {
if (fork() == 0) {
loop();
}
}
sleep(1000000);
return 0;
}
|
the_stack_data/3262602.c
|
/*
This is a version (aka dlmalloc) of malloc/free/realloc written by
Doug Lea and released to the public domain, as explained at
http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
comments, complaints, performance data, etc to [email protected]
* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
Note: There may be an updated version of this malloc obtainable at
ftp://gee.cs.oswego.edu/pub/misc/malloc.c
Check before installing!
* Quickstart
This library is all in one file to simplify the most common usage:
ftp it, compile it (-O3), and link it into another program. All of
the compile-time options default to reasonable values for use on
most platforms. You might later want to step through various
compile-time and dynamic tuning options.
For convenience, an include file for code using this malloc is at:
ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
You don't really need this .h file unless you call functions not
defined in your system include files. The .h file contains only the
excerpts from this file needed for using this malloc on ANSI C/C++
systems, so long as you haven't changed compile-time options about
naming and tuning parameters. If you do, then you can create your
own malloc.h that does include all settings by cutting at the point
indicated below. Note that you may already by default be using a C
library containing a malloc that is based on some version of this
malloc (for example in linux). You might still want to use the one
in this file to customize settings or to avoid overheads associated
with library versions.
* Vital statistics:
Supported pointer/size_t representation: 4 or 8 bytes
size_t MUST be an unsigned type of the same width as
pointers. (If you are using an ancient system that declares
size_t as a signed type, or need it to be a different width
than pointers, you can use a previous release of this malloc
(e.g. 2.7.2) supporting these.)
Alignment: 8 bytes (minimum)
This suffices for nearly all current machines and C compilers.
However, you can define MALLOC_ALIGNMENT to be wider than this
if necessary (up to 128bytes), at the expense of using more space.
Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
8 or 16 bytes (if 8byte sizes)
Each malloced chunk has a hidden word of overhead holding size
and status information, and additional cross-check word
if FOOTERS is defined.
Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
8-byte ptrs: 32 bytes (including overhead)
Even a request for zero bytes (i.e., malloc(0)) returns a
pointer to something of the minimum allocatable size.
The maximum overhead wastage (i.e., number of extra bytes
allocated than were requested in malloc) is less than or equal
to the minimum size, except for requests >= mmap_threshold that
are serviced via mmap(), where the worst case wastage is about
32 bytes plus the remainder from a system page (the minimal
mmap unit); typically 4096 or 8192 bytes.
Security: static-safe; optionally more or less
The "security" of malloc refers to the ability of malicious
code to accentuate the effects of errors (for example, freeing
space that is not currently malloc'ed or overwriting past the
ends of chunks) in code that calls malloc. This malloc
guarantees not to modify any memory locations below the base of
heap, i.e., static variables, even in the presence of usage
errors. The routines additionally detect most improper frees
and reallocs. All this holds as long as the static bookkeeping
for malloc itself is not corrupted by some other means. This
is only one aspect of security -- these checks do not, and
cannot, detect all possible programming errors.
If FOOTERS is defined nonzero, then each allocated chunk
carries an additional check word to verify that it was malloced
from its space. These check words are the same within each
execution of a program using malloc, but differ across
executions, so externally crafted fake chunks cannot be
freed. This improves security by rejecting frees/reallocs that
could corrupt heap memory, in addition to the checks preventing
writes to statics that are always on. This may further improve
security at the expense of time and space overhead. (Note that
FOOTERS may also be worth using with MSPACES.)
By default detected errors cause the program to abort (calling
"abort()"). You can override this to instead proceed past
errors by defining PROCEED_ON_ERROR. In this case, a bad free
has no effect, and a malloc that encounters a bad address
caused by user overwrites will ignore the bad address by
dropping pointers and indices to all known memory. This may
be appropriate for programs that should continue if at all
possible in the face of programming errors, although they may
run out of memory because dropped memory is never reclaimed.
If you don't like either of these options, you can define
CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
else. And if if you are sure that your program using malloc has
no errors or vulnerabilities, you can define INSECURE to 1,
which might (or might not) provide a small performance improvement.
It is also possible to limit the maximum total allocatable
space, using malloc_set_footprint_limit. This is not
designed as a security feature in itself (calls to set limits
are not screened or privileged), but may be useful as one
aspect of a secure implementation.
Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
When USE_LOCKS is defined, each public call to malloc, free,
etc is surrounded with a lock. By default, this uses a plain
pthread mutex, win32 critical section, or a spin-lock if if
available for the platform and not disabled by setting
USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,
recursive versions are used instead (which are not required for
base functionality but may be needed in layered extensions).
Using a global lock is not especially fast, and can be a major
bottleneck. It is designed only to provide minimal protection
in concurrent environments, and to provide a basis for
extensions. If you are using malloc in a concurrent program,
consider instead using nedmalloc
(http://www.nedprod.com/programs/portable/nedmalloc/) or
ptmalloc (See http://www.malloc.de), which are derived from
versions of this malloc.
System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
This malloc can use unix sbrk or any emulation (invoked using
the CALL_MORECORE macro) and/or mmap/munmap or any emulation
(invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
memory. On most unix systems, it tends to work best if both
MORECORE and MMAP are enabled. On Win32, it uses emulations
based on VirtualAlloc. It also uses common C library functions
like memset.
Compliance: I believe it is compliant with the Single Unix Specification
(See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
others as well.
* Overview of algorithms
This is not the fastest, most space-conserving, most portable, or
most tunable malloc ever written. However it is among the fastest
while also being among the most space-conserving, portable and
tunable. Consistent balance across these factors results in a good
general-purpose allocator for malloc-intensive programs.
In most ways, this malloc is a best-fit allocator. Generally, it
chooses the best-fitting existing chunk for a request, with ties
broken in approximately least-recently-used order. (This strategy
normally maintains low fragmentation.) However, for requests less
than 256bytes, it deviates from best-fit when there is not an
exactly fitting available chunk by preferring to use space adjacent
to that used for the previous small request, as well as by breaking
ties in approximately most-recently-used order. (These enhance
locality of series of small allocations.) And for very large requests
(>= 256Kb by default), it relies on system memory mapping
facilities, if supported. (This helps avoid carrying around and
possibly fragmenting memory used only for large chunks.)
All operations (except malloc_stats and mallinfo) have execution
times that are bounded by a constant factor of the number of bits in
a size_t, not counting any clearing in calloc or copying in realloc,
or actions surrounding MORECORE and MMAP that have times
proportional to the number of non-contiguous regions returned by
system allocation routines, which is often just 1. In real-time
applications, you can optionally suppress segment traversals using
NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
system allocators return non-contiguous spaces, at the typical
expense of carrying around more memory and increased fragmentation.
The implementation is not very modular and seriously overuses
macros. Perhaps someday all C compilers will do as good a job
inlining modular code as can now be done by brute-force expansion,
but now, enough of them seem not to.
Some compilers issue a lot of warnings about code that is
dead/unreachable only on some platforms, and also about intentional
uses of negation on unsigned types. All known cases of each can be
ignored.
For a longer but out of date high-level description, see
http://gee.cs.oswego.edu/dl/html/malloc.html
* MSPACES
If MSPACES is defined, then in addition to malloc, free, etc.,
this file also defines mspace_malloc, mspace_free, etc. These
are versions of malloc routines that take an "mspace" argument
obtained using create_mspace, to control all internal bookkeeping.
If ONLY_MSPACES is defined, only these versions are compiled.
So if you would like to use this allocator for only some allocations,
and your system malloc for others, you can compile with
ONLY_MSPACES and then do something like...
static mspace mymspace = create_mspace(0,0); // for example
#define mymalloc(bytes) mspace_malloc(mymspace, bytes)
(Note: If you only need one instance of an mspace, you can instead
use "USE_DL_PREFIX" to relabel the global malloc.)
You can similarly create thread-local allocators by storing
mspaces as thread-locals. For example:
static __thread mspace tlms = 0;
void* tlmalloc(size_t bytes) {
if (tlms == 0) tlms = create_mspace(0, 0);
return mspace_malloc(tlms, bytes);
}
void tlfree(void* mem) { mspace_free(tlms, mem); }
Unless FOOTERS is defined, each mspace is completely independent.
You cannot allocate from one and free to another (although
conformance is only weakly checked, so usage errors are not always
caught). If FOOTERS is defined, then each chunk carries around a tag
indicating its originating mspace, and frees are directed to their
originating spaces. Normally, this requires use of locks.
------------------------- Compile-time options ---------------------------
Be careful in setting #define values for numerical constants of type
size_t. On some systems, literal values are not automatically extended
to size_t precision unless they are explicitly casted. You can also
use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
WIN32 default: defined if _WIN32 defined
Defining WIN32 sets up defaults for MS environment and compilers.
Otherwise defaults are for unix. Beware that there seem to be some
cases where this malloc might not be a pure drop-in replacement for
Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
SetDIBits()) may be due to bugs in some video driver implementations
when pixel buffers are malloc()ed, and the region spans more than
one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
default granularity, pixel buffers may straddle virtual allocation
regions more often than when using the Microsoft allocator. You can
avoid this by using VirtualAlloc() and VirtualFree() for all pixel
buffers rather than using malloc(). If this is not possible,
recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
in cases where MSC and gcc (cygwin) are known to differ on WIN32,
conditions use _MSC_VER to distinguish them.
DLMALLOC_EXPORT default: extern
Defines how public APIs are declared. If you want to export via a
Windows DLL, you might define this as
#define DLMALLOC_EXPORT extern __declspec(dllexport)
If you want a POSIX ELF shared object, you might use
#define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))
Controls the minimum alignment for malloc'ed chunks. It must be a
power of two and at least 8, even on machines for which smaller
alignments would suffice. It may be defined as larger than this
though. Note however that code and data structures are optimized for
the case of 8-byte alignment.
MSPACES default: 0 (false)
If true, compile in support for independent allocation spaces.
This is only supported if HAVE_MMAP is true.
ONLY_MSPACES default: 0 (false)
If true, only compile in mspace versions, not regular versions.
USE_LOCKS default: 0 (false)
Causes each call to each public routine to be surrounded with
pthread or WIN32 mutex lock/unlock. (If set true, this can be
overridden on a per-mspace basis for mspace versions.) If set to a
non-zero value other than 1, locks are used, but their
implementation is left out, so lock functions must be supplied manually,
as described below.
USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available
If true, uses custom spin locks for locking. This is currently
supported only gcc >= 4.1, older gccs on x86 platforms, and recent
MS compilers. Otherwise, posix locks or win32 critical sections are
used.
USE_RECURSIVE_LOCKS default: not defined
If defined nonzero, uses recursive (aka reentrant) locks, otherwise
uses plain mutexes. This is not required for malloc proper, but may
be needed for layered allocators such as nedmalloc.
LOCK_AT_FORK default: not defined
If defined nonzero, performs pthread_atfork upon initialization
to initialize child lock while holding parent lock. The implementation
assumes that pthread locks (not custom locks) are being used. In other
cases, you may need to customize the implementation.
FOOTERS default: 0
If true, provide extra checking and dispatching by placing
information in the footers of allocated chunks. This adds
space and time overhead.
INSECURE default: 0
If true, omit checks for usage errors and heap space overwrites.
USE_DL_PREFIX default: NOT defined
Causes compiler to prefix all public routines with the string 'dl'.
This can be useful when you only want to use this malloc in one part
of a program, using your regular system malloc elsewhere.
MALLOC_INSPECT_ALL default: NOT defined
If defined, compiles malloc_inspect_all and mspace_inspect_all, that
perform traversal of all heap space. Unless access to these
functions is otherwise restricted, you probably do not want to
include them in secure implementations.
ABORT default: defined as abort()
Defines how to abort on failed checks. On most systems, a failed
check cannot die with an "assert" or even print an informative
message, because the underlying print routines in turn call malloc,
which will fail again. Generally, the best policy is to simply call
abort(). It's not very useful to do more than this because many
errors due to overwriting will show up as address faults (null, odd
addresses etc) rather than malloc-triggered checks, so will also
abort. Also, most compilers know that abort() does not return, so
can better optimize code conditionally calling it.
PROCEED_ON_ERROR default: defined as 0 (false)
Controls whether detected bad addresses cause them to bypassed
rather than aborting. If set, detected bad arguments to free and
realloc are ignored. And all bookkeeping information is zeroed out
upon a detected overwrite of freed heap space, thus losing the
ability to ever return it from malloc again, but enabling the
application to proceed. If PROCEED_ON_ERROR is defined, the
static variable malloc_corruption_error_count is compiled in
and can be examined to see if errors have occurred. This option
generates slower code than the default abort policy.
DEBUG default: NOT defined
The DEBUG setting is mainly intended for people trying to modify
this code or diagnose problems when porting to new platforms.
However, it may also be able to better isolate user errors than just
using runtime checks. The assertions in the check routines spell
out in more detail the assumptions and invariants underlying the
algorithms. The checking is fairly extensive, and will slow down
execution noticeably. Calling malloc_stats or mallinfo with DEBUG
set will attempt to check every non-mmapped allocated and free chunk
in the course of computing the summaries.
ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
Debugging assertion failures can be nearly impossible if your
version of the assert macro causes malloc to be called, which will
lead to a cascade of further failures, blowing the runtime stack.
ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
which will usually make debugging easier.
MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
The action to take before "return 0" when malloc fails to be able to
return memory because there is none available.
HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
True if this system supports sbrk or an emulation of it.
MORECORE default: sbrk
The name of the sbrk-style system routine to call to obtain more
memory. See below for guidance on writing custom MORECORE
functions. The type of the argument to sbrk/MORECORE varies across
systems. It cannot be size_t, because it supports negative
arguments, so it is normally the signed type of the same width as
size_t (sometimes declared as "intptr_t"). It doesn't much matter
though. Internally, we only call it with arguments less than half
the max value of a size_t, which should work across all reasonable
possibilities, although sometimes generating compiler warnings.
MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE
If true, take advantage of fact that consecutive calls to MORECORE
with positive arguments always return contiguous increasing
addresses. This is true of unix sbrk. It does not hurt too much to
set it true anyway, since malloc copes with non-contiguities.
Setting it false when definitely non-contiguous saves time
and possibly wasted space it would take to discover this though.
MORECORE_CANNOT_TRIM default: NOT defined
True if MORECORE cannot release space back to the system when given
negative arguments. This is generally necessary only if you are
using a hand-crafted MORECORE function that cannot handle negative
arguments.
NO_SEGMENT_TRAVERSAL default: 0
If non-zero, suppresses traversals of memory segments
returned by either MORECORE or CALL_MMAP. This disables
merging of segments that are contiguous, and selectively
releasing them to the OS if unused, but bounds execution times.
HAVE_MMAP default: 1 (true)
True if this system supports mmap or an emulation of it. If so, and
HAVE_MORECORE is not true, MMAP is used for all system
allocation. If set and HAVE_MORECORE is true as well, MMAP is
primarily used to directly allocate very large blocks. It is also
used as a backup strategy in cases where MORECORE fails to provide
space from system. Note: A single call to MUNMAP is assumed to be
able to unmap memory that may have be allocated using multiple calls
to MMAP, so long as they are adjacent.
HAVE_MREMAP default: 1 on linux, else 0
If true realloc() uses mremap() to re-allocate large blocks and
extend or shrink allocation spaces.
MMAP_CLEARS default: 1 except on WINCE.
True if mmap clears memory so calloc doesn't need to. This is true
for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
USE_BUILTIN_FFS default: 0 (i.e., not used)
Causes malloc to use the builtin ffs() function to compute indices.
Some compilers may recognize and intrinsify ffs to be faster than the
supplied C version. Also, the case of x86 using gcc is special-cased
to an asm instruction, so is already as fast as it can be, and so
this setting has no effect. Similarly for Win32 under recent MS compilers.
(On most x86s, the asm version is only slightly faster than the C version.)
malloc_getpagesize default: derive from system includes, or 4096.
The system page size. To the extent possible, this malloc manages
memory from the system in page-size units. This may be (and
usually is) a function rather than a constant. This is ignored
if WIN32, where page size is determined using getSystemInfo during
initialization.
USE_DEV_RANDOM default: 0 (i.e., not used)
Causes malloc to use /dev/random to initialize secure magic seed for
stamping footers. Otherwise, the current time is used.
NO_MALLINFO default: 0
If defined, don't compile "mallinfo". This can be a simple way
of dealing with mismatches between system declarations and
those in this file.
MALLINFO_FIELD_TYPE default: size_t
The type of the fields in the mallinfo struct. This was originally
defined as "int" in SVID etc, but is more usefully defined as
size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
NO_MALLOC_STATS default: 0
If defined, don't compile "malloc_stats". This avoids calls to
fprintf and bringing in stdio dependencies you might not want.
REALLOC_ZERO_BYTES_FREES default: not defined
This should be set if a call to realloc with zero bytes should
be the same as a call to free. Some people think it should. Otherwise,
since this malloc returns a unique pointer for malloc(0), so does
realloc(p, 0).
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32
Define these if your system does not have these header files.
You might need to manually insert some of the declarations they provide.
DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
system_info.dwAllocationGranularity in WIN32,
otherwise 64K.
Also settable using mallopt(M_GRANULARITY, x)
The unit for allocating and deallocating memory from the system. On
most systems with contiguous MORECORE, there is no reason to
make this more than a page. However, systems with MMAP tend to
either require or encourage larger granularities. You can increase
this value to prevent system allocation functions to be called so
often, especially if they are slow. The value must be at least one
page and must be a power of two. Setting to 0 causes initialization
to either page size or win32 region size. (Note: In previous
versions of malloc, the equivalent of this option was called
"TOP_PAD")
DEFAULT_TRIM_THRESHOLD default: 2MB
Also settable using mallopt(M_TRIM_THRESHOLD, x)
The maximum amount of unused top-most memory to keep before
releasing via malloc_trim in free(). Automatic trimming is mainly
useful in long-lived programs using contiguous MORECORE. Because
trimming via sbrk can be slow on some systems, and can sometimes be
wasteful (in cases where programs immediately afterward allocate
more large chunks) the value should be high enough so that your
overall system performance would improve by releasing this much
memory. As a rough guide, you might set to a value close to the
average size of a process (program) running on your system.
Releasing this much memory would allow such a process to run in
memory. Generally, it is worth tuning trim thresholds when a
program undergoes phases where several large chunks are allocated
and released in ways that can reuse each other's storage, perhaps
mixed with phases where there are no such chunks at all. The trim
value must be greater than page size to have any useful effect. To
disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
some people use of mallocing a huge space and then freeing it at
program startup, in an attempt to reserve system memory, doesn't
have the intended effect under automatic trimming, since that memory
will immediately be returned to the system.
DEFAULT_MMAP_THRESHOLD default: 256K
Also settable using mallopt(M_MMAP_THRESHOLD, x)
The request size threshold for using MMAP to directly service a
request. Requests of at least this size that cannot be allocated
using already-existing space will be serviced via mmap. (If enough
normal freed space already exists it is used instead.) Using mmap
segregates relatively large chunks of memory so that they can be
individually obtained and released from the host system. A request
serviced through mmap is never reused by any other request (at least
not directly; the system may just so happen to remap successive
requests to the same locations). Segregating space in this way has
the benefits that: Mmapped space can always be individually released
back to the system, which helps keep the system level memory demands
of a long-lived program low. Also, mapped memory doesn't become
`locked' between other chunks, as can happen with normally allocated
chunks, which means that even trimming via malloc_trim would not
release them. However, it has the disadvantage that the space
cannot be reclaimed, consolidated, and then used to service later
requests, as happens with normal chunks. The advantages of mmap
nearly always outweigh disadvantages for "large" chunks, but the
value of "large" may vary across systems. The default is an
empirically derived value that works well in most systems. You can
disable mmap by setting to MAX_SIZE_T.
MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
The number of consolidated frees between checks to release
unused segments when freeing. When using non-contiguous segments,
especially with multiple mspaces, checking only for topmost space
doesn't always suffice to trigger trimming. To compensate for this,
free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
current number of segments, if greater) try to release unused
segments to the OS when freeing chunks that result in
consolidation. The best value for this parameter is a compromise
between slowing down frees with relatively costly checks that
rarely trigger versus holding on to unused memory. To effectively
disable, set to MAX_SIZE_T. This may lead to a very slight speed
improvement at the expense of carrying around more memory.
*/
#define USE_DL_PREFIX
#define USE_LOCKS 1
/* Version identifier to allow people to support multiple versions */
#ifndef DLMALLOC_VERSION
#define DLMALLOC_VERSION 20806
#endif /* DLMALLOC_VERSION */
#ifndef DLMALLOC_EXPORT
#define DLMALLOC_EXPORT extern
#endif
#ifndef WIN32
#ifdef _WIN32
#define WIN32 1
#endif /* _WIN32 */
#ifdef _WIN32_WCE
#define LACKS_FCNTL_H
#define WIN32 1
#endif /* _WIN32_WCE */
#endif /* WIN32 */
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <tchar.h>
#define HAVE_MMAP 1
#define HAVE_MORECORE 0
#define LACKS_UNISTD_H
#define LACKS_SYS_PARAM_H
#define LACKS_SYS_MMAN_H
#define LACKS_STRING_H
#define LACKS_STRINGS_H
#define LACKS_SYS_TYPES_H
#define LACKS_ERRNO_H
#define LACKS_SCHED_H
#ifndef MALLOC_FAILURE_ACTION
#define MALLOC_FAILURE_ACTION
#endif /* MALLOC_FAILURE_ACTION */
#ifndef MMAP_CLEARS
#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
#define MMAP_CLEARS 0
#else
#define MMAP_CLEARS 1
#endif /* _WIN32_WCE */
#endif /*MMAP_CLEARS */
#endif /* WIN32 */
#if defined(DARWIN) || defined(_DARWIN)
/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
#ifndef HAVE_MORECORE
#define HAVE_MORECORE 0
#define HAVE_MMAP 1
/* OSX allocators provide 16 byte alignment */
#ifndef MALLOC_ALIGNMENT
#define MALLOC_ALIGNMENT ((size_t)16U)
#endif
#endif /* HAVE_MORECORE */
#endif /* DARWIN */
#ifndef LACKS_SYS_TYPES_H
#include <sys/types.h> /* For size_t */
#endif /* LACKS_SYS_TYPES_H */
/* The maximum possible size_t value has all bits set */
#define MAX_SIZE_T (~(size_t)0)
#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
(defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
#endif /* USE_LOCKS */
#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
#if ((defined(__GNUC__) && \
((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
defined(__i386__) || defined(__x86_64__))) || \
(defined(_MSC_VER) && _MSC_VER>=1310))
#ifndef USE_SPIN_LOCKS
#define USE_SPIN_LOCKS 1
#endif /* USE_SPIN_LOCKS */
#elif USE_SPIN_LOCKS
#error "USE_SPIN_LOCKS defined without implementation"
#endif /* ... locks available... */
#elif !defined(USE_SPIN_LOCKS)
#define USE_SPIN_LOCKS 0
#endif /* USE_LOCKS */
#ifndef ONLY_MSPACES
#define ONLY_MSPACES 0
#endif /* ONLY_MSPACES */
#ifndef MSPACES
#if ONLY_MSPACES
#define MSPACES 1
#else /* ONLY_MSPACES */
#define MSPACES 0
#endif /* ONLY_MSPACES */
#endif /* MSPACES */
#ifndef MALLOC_ALIGNMENT
#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
#endif /* MALLOC_ALIGNMENT */
#ifndef FOOTERS
#define FOOTERS 0
#endif /* FOOTERS */
#ifndef ABORT
#define ABORT abort()
#endif /* ABORT */
#ifndef ABORT_ON_ASSERT_FAILURE
#define ABORT_ON_ASSERT_FAILURE 1
#endif /* ABORT_ON_ASSERT_FAILURE */
#ifndef PROCEED_ON_ERROR
#define PROCEED_ON_ERROR 0
#endif /* PROCEED_ON_ERROR */
#ifndef INSECURE
#define INSECURE 0
#endif /* INSECURE */
#ifndef MALLOC_INSPECT_ALL
#define MALLOC_INSPECT_ALL 0
#endif /* MALLOC_INSPECT_ALL */
#ifndef HAVE_MMAP
#define HAVE_MMAP 1
#endif /* HAVE_MMAP */
#ifndef MMAP_CLEARS
#define MMAP_CLEARS 1
#endif /* MMAP_CLEARS */
#ifndef HAVE_MREMAP
#ifdef linux
#define HAVE_MREMAP 1
#define _GNU_SOURCE /* Turns on mremap() definition */
#else /* linux */
#define HAVE_MREMAP 0
#endif /* linux */
#endif /* HAVE_MREMAP */
#ifndef MALLOC_FAILURE_ACTION
#define MALLOC_FAILURE_ACTION errno = ENOMEM;
#endif /* MALLOC_FAILURE_ACTION */
#ifndef HAVE_MORECORE
#if ONLY_MSPACES
#define HAVE_MORECORE 0
#else /* ONLY_MSPACES */
#define HAVE_MORECORE 1
#endif /* ONLY_MSPACES */
#endif /* HAVE_MORECORE */
#if !HAVE_MORECORE
#define MORECORE_CONTIGUOUS 0
#else /* !HAVE_MORECORE */
#define MORECORE_DEFAULT sbrk
#ifndef MORECORE_CONTIGUOUS
#define MORECORE_CONTIGUOUS 1
#endif /* MORECORE_CONTIGUOUS */
#endif /* HAVE_MORECORE */
#ifndef DEFAULT_GRANULARITY
#if (MORECORE_CONTIGUOUS || defined(WIN32))
#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
#else /* MORECORE_CONTIGUOUS */
#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
#endif /* MORECORE_CONTIGUOUS */
#endif /* DEFAULT_GRANULARITY */
#ifndef DEFAULT_TRIM_THRESHOLD
#ifndef MORECORE_CANNOT_TRIM
#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
#else /* MORECORE_CANNOT_TRIM */
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
#endif /* MORECORE_CANNOT_TRIM */
#endif /* DEFAULT_TRIM_THRESHOLD */
#ifndef DEFAULT_MMAP_THRESHOLD
#if HAVE_MMAP
#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
#else /* HAVE_MMAP */
#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
#endif /* HAVE_MMAP */
#endif /* DEFAULT_MMAP_THRESHOLD */
#ifndef MAX_RELEASE_CHECK_RATE
#if HAVE_MMAP
#define MAX_RELEASE_CHECK_RATE 4095
#else
#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
#endif /* HAVE_MMAP */
#endif /* MAX_RELEASE_CHECK_RATE */
#ifndef USE_BUILTIN_FFS
#define USE_BUILTIN_FFS 0
#endif /* USE_BUILTIN_FFS */
#ifndef USE_DEV_RANDOM
#define USE_DEV_RANDOM 0
#endif /* USE_DEV_RANDOM */
#ifndef NO_MALLINFO
#define NO_MALLINFO 0
#endif /* NO_MALLINFO */
#ifndef MALLINFO_FIELD_TYPE
#define MALLINFO_FIELD_TYPE size_t
#endif /* MALLINFO_FIELD_TYPE */
#ifndef NO_MALLOC_STATS
#define NO_MALLOC_STATS 0
#endif /* NO_MALLOC_STATS */
#ifndef NO_SEGMENT_TRAVERSAL
#define NO_SEGMENT_TRAVERSAL 0
#endif /* NO_SEGMENT_TRAVERSAL */
/*
mallopt tuning options. SVID/XPG defines four standard parameter
numbers for mallopt, normally defined in malloc.h. None of these
are used in this malloc, so setting them has no effect. But this
malloc does support the following options.
*/
#define M_TRIM_THRESHOLD (-1)
#define M_GRANULARITY (-2)
#define M_MMAP_THRESHOLD (-3)
/* ------------------------ Mallinfo declarations ------------------------ */
#if !NO_MALLINFO
/*
This version of malloc supports the standard SVID/XPG mallinfo
routine that returns a struct containing usage properties and
statistics. It should work on any system that has a
/usr/include/malloc.h defining struct mallinfo. The main
declaration needed is the mallinfo struct that is returned (by-copy)
by mallinfo(). The malloinfo struct contains a bunch of fields that
are not even meaningful in this version of malloc. These fields are
are instead filled by mallinfo() with other numbers that might be of
interest.
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
/usr/include/malloc.h file that includes a declaration of struct
mallinfo. If so, it is included; else a compliant version is
declared below. These must be precisely the same for mallinfo() to
work. The original SVID version of this struct, defined on most
systems with mallinfo, declares all fields as ints. But some others
define as unsigned long. If your system defines the fields using a
type of different width than listed here, you MUST #include your
system version and #define HAVE_USR_INCLUDE_MALLOC_H.
*/
/* #define HAVE_USR_INCLUDE_MALLOC_H */
#ifdef HAVE_USR_INCLUDE_MALLOC_H
#include "/usr/include/malloc.h"
#else /* HAVE_USR_INCLUDE_MALLOC_H */
#ifndef STRUCT_MALLINFO_DECLARED
/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
#define _STRUCT_MALLINFO
#define STRUCT_MALLINFO_DECLARED 1
struct mallinfo {
MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
MALLINFO_FIELD_TYPE smblks; /* always 0 */
MALLINFO_FIELD_TYPE hblks; /* always 0 */
MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
MALLINFO_FIELD_TYPE fordblks; /* total free space */
MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
};
#endif /* STRUCT_MALLINFO_DECLARED */
#endif /* HAVE_USR_INCLUDE_MALLOC_H */
#endif /* NO_MALLINFO */
/*
Try to persuade compilers to inline. The most critical functions for
inlining are defined as macros, so these aren't used for them.
*/
#ifndef FORCEINLINE
#if defined(__GNUC__)
#define FORCEINLINE __inline __attribute__ ((always_inline))
#elif defined(_MSC_VER)
#define FORCEINLINE __forceinline
#endif
#endif
#ifndef NOINLINE
#if defined(__GNUC__)
#define NOINLINE __attribute__ ((noinline))
#elif defined(_MSC_VER)
#define NOINLINE __declspec(noinline)
#else
#define NOINLINE
#endif
#endif
#ifdef __cplusplus
extern "C" {
#ifndef FORCEINLINE
#define FORCEINLINE inline
#endif
#endif /* __cplusplus */
#ifndef FORCEINLINE
#define FORCEINLINE
#endif
#if !ONLY_MSPACES
/* ------------------- Declarations of public routines ------------------- */
#ifndef USE_DL_PREFIX
#define dlcalloc calloc
#define dlfree free
#define dlmalloc malloc
#define dlmemalign memalign
#define dlposix_memalign posix_memalign
#define dlrealloc realloc
#define dlrealloc_in_place realloc_in_place
#define dlvalloc valloc
#define dlpvalloc pvalloc
#define dlmallinfo mallinfo
#define dlmallopt mallopt
#define dlmalloc_trim malloc_trim
#define dlmalloc_stats malloc_stats
#define dlmalloc_usable_size malloc_usable_size
#define dlmalloc_footprint malloc_footprint
#define dlmalloc_max_footprint malloc_max_footprint
#define dlmalloc_footprint_limit malloc_footprint_limit
#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
#define dlmalloc_inspect_all malloc_inspect_all
#define dlindependent_calloc independent_calloc
#define dlindependent_comalloc independent_comalloc
#define dlbulk_free bulk_free
#endif /* USE_DL_PREFIX */
/*
malloc(size_t n)
Returns a pointer to a newly allocated chunk of at least n bytes, or
null if no space is available, in which case errno is set to ENOMEM
on ANSI C systems.
If n is zero, malloc returns a minimum-sized chunk. (The minimum
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
systems.) Note that size_t is an unsigned type, so calls with
arguments that would be negative if signed are interpreted as
requests for huge amounts of space, which will often fail. The
maximum supported value of n differs across systems, but is in all
cases less than the maximum representable value of a size_t.
*/
DLMALLOC_EXPORT void* dlmalloc(size_t);
/*
free(void* p)
Releases the chunk of memory pointed to by p, that had been previously
allocated using malloc or a related routine such as realloc.
It has no effect if p is null. If p was not malloced or already
freed, free(p) will by default cause the current program to abort.
*/
DLMALLOC_EXPORT void dlfree(void*);
/*
calloc(size_t n_elements, size_t element_size);
Returns a pointer to n_elements * element_size bytes, with all locations
set to zero.
*/
DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
/*
realloc(void* p, size_t n)
Returns a pointer to a chunk of size n that contains the same data
as does chunk p up to the minimum of (n, p's size) bytes, or null
if no space is available.
The returned pointer may or may not be the same as p. The algorithm
prefers extending p in most cases when possible, otherwise it
employs the equivalent of a malloc-copy-free sequence.
If p is null, realloc is equivalent to malloc.
If space is not available, realloc returns null, errno is set (if on
ANSI) and p is NOT freed.
if n is for fewer bytes than already held by p, the newly unused
space is lopped off and freed if possible. realloc with a size
argument of zero (re)allocates a minimum-sized chunk.
The old unix realloc convention of allowing the last-free'd chunk
to be used as an argument to realloc is not supported.
*/
DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
/*
realloc_in_place(void* p, size_t n)
Resizes the space allocated for p to size n, only if this can be
done without moving p (i.e., only if there is adjacent space
available if n is greater than p's current allocated size, or n is
less than or equal to p's size). This may be used instead of plain
realloc if an alternative allocation strategy is needed upon failure
to expand space; for example, reallocation of a buffer that must be
memory-aligned or cleared. You can use realloc_in_place to trigger
these alternatives only when needed.
Returns p if successful; otherwise null.
*/
DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
/*
memalign(size_t alignment, size_t n);
Returns a pointer to a newly allocated chunk of n bytes, aligned
in accord with the alignment argument.
The alignment argument should be a power of two. If the argument is
not a power of two, the nearest greater power is used.
8-byte alignment is guaranteed by normal malloc calls, so don't
bother calling memalign with an argument of 8 or less.
Overreliance on memalign is a sure way to fragment space.
*/
DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
/*
int posix_memalign(void** pp, size_t alignment, size_t n);
Allocates a chunk of n bytes, aligned in accord with the alignment
argument. Differs from memalign only in that it (1) assigns the
allocated memory to *pp rather than returning it, (2) fails and
returns EINVAL if the alignment is not a power of two (3) fails and
returns ENOMEM if memory cannot be allocated.
*/
DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
/*
valloc(size_t n);
Equivalent to memalign(pagesize, n), where pagesize is the page
size of the system. If the pagesize is unknown, 4096 is used.
*/
DLMALLOC_EXPORT void* dlvalloc(size_t);
/*
mallopt(int parameter_number, int parameter_value)
Sets tunable parameters The format is to provide a
(parameter-number, parameter-value) pair. mallopt then sets the
corresponding parameter to the argument value if it can (i.e., so
long as the value is meaningful), and returns 1 if successful else
0. To workaround the fact that mallopt is specified to use int,
not size_t parameters, the value -1 is specially treated as the
maximum unsigned size_t value.
SVID/XPG/ANSI defines four standard param numbers for mallopt,
normally defined in malloc.h. None of these are use in this malloc,
so setting them has no effect. But this malloc also supports other
options in mallopt. See below for details. Briefly, supported
parameters are as follows (listed defaults are for "typical"
configurations).
Symbol param # default allowed param values
M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
M_GRANULARITY -2 page size any power of 2 >= page size
M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
*/
DLMALLOC_EXPORT int dlmallopt(int, int);
/*
malloc_footprint();
Returns the number of bytes obtained from the system. The total
number of bytes allocated by malloc, realloc etc., is less than this
value. Unlike mallinfo, this function returns only a precomputed
result, so can be called frequently to monitor memory consumption.
Even if locks are otherwise defined, this function does not use them,
so results might not be up to date.
*/
DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
/*
malloc_max_footprint();
Returns the maximum number of bytes obtained from the system. This
value will be greater than current footprint if deallocated space
has been reclaimed by the system. The peak number of bytes allocated
by malloc, realloc etc., is less than this value. Unlike mallinfo,
this function returns only a precomputed result, so can be called
frequently to monitor memory consumption. Even if locks are
otherwise defined, this function does not use them, so results might
not be up to date.
*/
DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
/*
malloc_footprint_limit();
Returns the number of bytes that the heap is allowed to obtain from
the system, returning the last value returned by
malloc_set_footprint_limit, or the maximum size_t value if
never set. The returned value reflects a permission. There is no
guarantee that this number of bytes can actually be obtained from
the system.
*/
DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
/*
malloc_set_footprint_limit();
Sets the maximum number of bytes to obtain from the system, causing
failure returns from malloc and related functions upon attempts to
exceed this value. The argument value may be subject to page
rounding to an enforceable limit; this actual value is returned.
Using an argument of the maximum possible size_t effectively
disables checks. If the argument is less than or equal to the
current malloc_footprint, then all future allocations that require
additional system memory will fail. However, invocation cannot
retroactively deallocate existing used memory.
*/
DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
#if MALLOC_INSPECT_ALL
/*
malloc_inspect_all(void(*handler)(void *start,
void *end,
size_t used_bytes,
void* callback_arg),
void* arg);
Traverses the heap and calls the given handler for each managed
region, skipping all bytes that are (or may be) used for bookkeeping
purposes. Traversal does not include include chunks that have been
directly memory mapped. Each reported region begins at the start
address, and continues up to but not including the end address. The
first used_bytes of the region contain allocated data. If
used_bytes is zero, the region is unallocated. The handler is
invoked with the given callback argument. If locks are defined, they
are held during the entire traversal. It is a bad idea to invoke
other malloc functions from within the handler.
For example, to count the number of in-use chunks with size greater
than 1000, you could write:
static int count = 0;
void count_chunks(void* start, void* end, size_t used, void* arg) {
if (used >= 1000) ++count;
}
then:
malloc_inspect_all(count_chunks, NULL);
malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
*/
DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
void* arg);
#endif /* MALLOC_INSPECT_ALL */
#if !NO_MALLINFO
/*
mallinfo()
Returns (by copy) a struct containing various summary statistics:
arena: current total non-mmapped bytes allocated from system
ordblks: the number of free chunks
smblks: always zero.
hblks: current number of mmapped regions
hblkhd: total bytes held in mmapped regions
usmblks: the maximum total allocated space. This will be greater
than current total if trimming has occurred.
fsmblks: always zero
uordblks: current total allocated space (normal or mmapped)
fordblks: total free space
keepcost: the maximum number of bytes that could ideally be released
back to system via malloc_trim. ("ideally" means that
it ignores page restrictions etc.)
Because these fields are ints, but internal bookkeeping may
be kept as longs, the reported values may wrap around zero and
thus be inaccurate.
*/
DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
#endif /* NO_MALLINFO */
/*
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
independent_calloc is similar to calloc, but instead of returning a
single cleared space, it returns an array of pointers to n_elements
independent elements that can hold contents of size elem_size, each
of which starts out cleared, and can be independently freed,
realloc'ed etc. The elements are guaranteed to be adjacently
allocated (this is not guaranteed to occur with multiple callocs or
mallocs), which may also improve cache locality in some
applications.
The "chunks" argument is optional (i.e., may be null, which is
probably the most typical usage). If it is null, the returned array
is itself dynamically allocated and should also be freed when it is
no longer needed. Otherwise, the chunks array must be of at least
n_elements in length. It is filled in with the pointers to the
chunks.
In either case, independent_calloc returns this pointer array, or
null if the allocation failed. If n_elements is zero and "chunks"
is null, it returns a chunk representing an array with zero elements
(which should be freed if not wanted).
Each element must be freed when it is no longer needed. This can be
done all at once using bulk_free.
independent_calloc simplifies and speeds up implementations of many
kinds of pools. It may also be useful when constructing large data
structures that initially have a fixed number of fixed-sized nodes,
but the number is not known at compile time, and some of the nodes
may later need to be freed. For example:
struct Node { int item; struct Node* next; };
struct Node* build_list() {
struct Node** pool;
int n = read_number_of_nodes_needed();
if (n <= 0) return 0;
pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
if (pool == 0) die();
// organize into a linked list...
struct Node* first = pool[0];
for (i = 0; i < n-1; ++i)
pool[i]->next = pool[i+1];
free(pool); // Can now free the array (or not, if it is needed later)
return first;
}
*/
DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
/*
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
independent_comalloc allocates, all at once, a set of n_elements
chunks with sizes indicated in the "sizes" array. It returns
an array of pointers to these elements, each of which can be
independently freed, realloc'ed etc. The elements are guaranteed to
be adjacently allocated (this is not guaranteed to occur with
multiple callocs or mallocs), which may also improve cache locality
in some applications.
The "chunks" argument is optional (i.e., may be null). If it is null
the returned array is itself dynamically allocated and should also
be freed when it is no longer needed. Otherwise, the chunks array
must be of at least n_elements in length. It is filled in with the
pointers to the chunks.
In either case, independent_comalloc returns this pointer array, or
null if the allocation failed. If n_elements is zero and chunks is
null, it returns a chunk representing an array with zero elements
(which should be freed if not wanted).
Each element must be freed when it is no longer needed. This can be
done all at once using bulk_free.
independent_comallac differs from independent_calloc in that each
element may have a different size, and also that it does not
automatically clear elements.
independent_comalloc can be used to speed up allocation in cases
where several structs or objects must always be allocated at the
same time. For example:
struct Head { ... }
struct Foot { ... }
void send_message(char* msg) {
int msglen = strlen(msg);
size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
void* chunks[3];
if (independent_comalloc(3, sizes, chunks) == 0)
die();
struct Head* head = (struct Head*)(chunks[0]);
char* body = (char*)(chunks[1]);
struct Foot* foot = (struct Foot*)(chunks[2]);
// ...
}
In general though, independent_comalloc is worth using only for
larger values of n_elements. For small values, you probably won't
detect enough difference from series of malloc calls to bother.
Overuse of independent_comalloc can increase overall memory usage,
since it cannot reuse existing noncontiguous small chunks that
might be available for some of the elements.
*/
DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
/*
bulk_free(void* array[], size_t n_elements)
Frees and clears (sets to null) each non-null pointer in the given
array. This is likely to be faster than freeing them one-by-one.
If footers are used, pointers that have been allocated in different
mspaces are not freed or cleared, and the count of all such pointers
is returned. For large arrays of pointers with poor locality, it
may be worthwhile to sort this array before calling bulk_free.
*/
DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
/*
pvalloc(size_t n);
Equivalent to valloc(minimum-page-that-holds(n)), that is,
round up n to nearest pagesize.
*/
DLMALLOC_EXPORT void* dlpvalloc(size_t);
/*
malloc_trim(size_t pad);
If possible, gives memory back to the system (via negative arguments
to sbrk) if there is unused memory at the `high' end of the malloc
pool or in unused MMAP segments. You can call this after freeing
large blocks of memory to potentially reduce the system-level memory
requirements of a program. However, it cannot guarantee to reduce
memory. Under some allocation patterns, some large free blocks of
memory will be locked between two used chunks, so they cannot be
given back to the system.
The `pad' argument to malloc_trim represents the amount of free
trailing space to leave untrimmed. If this argument is zero, only
the minimum amount of memory to maintain internal data structures
will be left. Non-zero arguments can be supplied to maintain enough
trailing space to service future expected allocations without having
to re-obtain memory from the system.
Malloc_trim returns 1 if it actually released any memory, else 0.
*/
DLMALLOC_EXPORT int dlmalloc_trim(size_t);
/*
malloc_stats();
Prints on stderr the amount of space obtained from the system (both
via sbrk and mmap), the maximum amount (which may be more than
current if malloc_trim and/or munmap got called), and the current
number of bytes allocated via malloc (or realloc, etc) but not yet
freed. Note that this is the number of bytes allocated, not the
number requested. It will be larger than the number requested
because of alignment and bookkeeping overhead. Because it includes
alignment wastage as being in use, this figure may be greater than
zero even when no user-level chunks are allocated.
The reported current and maximum system memory can be inaccurate if
a program makes other calls to system memory allocation functions
(normally sbrk) outside of malloc.
malloc_stats prints only the most commonly interesting statistics.
More information can be obtained by calling mallinfo.
*/
DLMALLOC_EXPORT void dlmalloc_stats(void);
/*
malloc_usable_size(void* p);
Returns the number of bytes you can actually use in
an allocated chunk, which may be more than you requested (although
often not) due to alignment and minimum size constraints.
You can use this many bytes without worrying about
overwriting other allocated objects. This is not a particularly great
programming practice. malloc_usable_size can be more useful in
debugging and assertions, for example:
p = malloc(n);
assert(malloc_usable_size(p) >= 256);
*/
size_t dlmalloc_usable_size(void*);
#endif /* ONLY_MSPACES */
#if MSPACES
/*
mspace is an opaque type representing an independent
region of space that supports mspace_malloc, etc.
*/
typedef void* mspace;
/*
create_mspace creates and returns a new independent space with the
given initial capacity, or, if 0, the default granularity size. It
returns null if there is no system memory available to create the
space. If argument locked is non-zero, the space uses a separate
lock to control access. The capacity of the space will grow
dynamically as needed to service mspace_malloc requests. You can
control the sizes of incremental increases of this space by
compiling with a different DEFAULT_GRANULARITY or dynamically
setting with mallopt(M_GRANULARITY, value).
*/
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
/*
destroy_mspace destroys the given space, and attempts to return all
of its memory back to the system, returning the total number of
bytes freed. After destruction, the results of access to all memory
used by the space become undefined.
*/
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
/*
create_mspace_with_base uses the memory supplied as the initial base
of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
space is used for bookkeeping, so the capacity must be at least this
large. (Otherwise 0 is returned.) When this initial space is
exhausted, additional memory will be obtained from the system.
Destroying this space will deallocate all additionally allocated
space (if possible) but not the initial base.
*/
DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
/*
mspace_track_large_chunks controls whether requests for large chunks
are allocated in their own untracked mmapped regions, separate from
others in this mspace. By default large chunks are not tracked,
which reduces fragmentation. However, such chunks are not
necessarily released to the system upon destroy_mspace. Enabling
tracking by setting to true may increase fragmentation, but avoids
leakage when relying on destroy_mspace to release all memory
allocated using this space. The function returns the previous
setting.
*/
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
/*
mspace_malloc behaves as malloc, but operates within
the given space.
*/
DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
/*
mspace_free behaves as free, but operates within
the given space.
If compiled with FOOTERS==1, mspace_free is not actually needed.
free may be called instead of mspace_free because freed chunks from
any space are handled by their originating spaces.
*/
DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
/*
mspace_realloc behaves as realloc, but operates within
the given space.
If compiled with FOOTERS==1, mspace_realloc is not actually
needed. realloc may be called instead of mspace_realloc because
realloced chunks from any space are handled by their originating
spaces.
*/
DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
/*
mspace_calloc behaves as calloc, but operates within
the given space.
*/
DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
/*
mspace_memalign behaves as memalign, but operates within
the given space.
*/
DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
/*
mspace_independent_calloc behaves as independent_calloc, but
operates within the given space.
*/
DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
size_t elem_size, void* chunks[]);
/*
mspace_independent_comalloc behaves as independent_comalloc, but
operates within the given space.
*/
DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
size_t sizes[], void* chunks[]);
/*
mspace_footprint() returns the number of bytes obtained from the
system for this space.
*/
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
/*
mspace_max_footprint() returns the peak number of bytes obtained from the
system for this space.
*/
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
#if !NO_MALLINFO
/*
mspace_mallinfo behaves as mallinfo, but reports properties of
the given space.
*/
DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
#endif /* NO_MALLINFO */
/*
malloc_usable_size(void* p) behaves the same as malloc_usable_size;
*/
DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
/*
mspace_malloc_stats behaves as malloc_stats, but reports
properties of the given space.
*/
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
/*
mspace_trim behaves as malloc_trim, but
operates within the given space.
*/
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
/*
An alias for mallopt.
*/
DLMALLOC_EXPORT int mspace_mallopt(int, int);
#endif /* MSPACES */
#ifdef __cplusplus
} /* end of extern "C" */
#endif /* __cplusplus */
/*
========================================================================
To make a fully customizable malloc.h header file, cut everything
above this line, put into file malloc.h, edit to suit, and #include it
on the next line, as well as in programs that use this malloc.
========================================================================
*/
/* #include "malloc.h" */
/*------------------------------ internal #includes ---------------------- */
#ifdef _MSC_VER
#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
#endif /* _MSC_VER */
#if !NO_MALLOC_STATS
#include <stdio.h> /* for printing in malloc_stats */
#endif /* NO_MALLOC_STATS */
#ifndef LACKS_ERRNO_H
#include <errno.h> /* for MALLOC_FAILURE_ACTION */
#endif /* LACKS_ERRNO_H */
#ifdef DEBUG
#if ABORT_ON_ASSERT_FAILURE
#undef assert
#define assert(x) if(!(x)) ABORT
#else /* ABORT_ON_ASSERT_FAILURE */
#include <assert.h>
#endif /* ABORT_ON_ASSERT_FAILURE */
#else /* DEBUG */
#ifndef assert
#define assert(x)
#endif
#define DEBUG 0
#endif /* DEBUG */
#if !defined(WIN32) && !defined(LACKS_TIME_H)
#include <time.h> /* for magic initialization */
#endif /* WIN32 */
#ifndef LACKS_STDLIB_H
#include <stdlib.h> /* for abort() */
#endif /* LACKS_STDLIB_H */
#ifndef LACKS_STRING_H
#include <string.h> /* for memset etc */
#endif /* LACKS_STRING_H */
#if USE_BUILTIN_FFS
#ifndef LACKS_STRINGS_H
#include <strings.h> /* for ffs */
#endif /* LACKS_STRINGS_H */
#endif /* USE_BUILTIN_FFS */
#if HAVE_MMAP
#ifndef LACKS_SYS_MMAN_H
/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
#if (defined(linux) && !defined(__USE_GNU))
#define __USE_GNU 1
#include <sys/mman.h> /* for mmap */
#undef __USE_GNU
#else
#include <sys/mman.h> /* for mmap */
#endif /* linux */
#endif /* LACKS_SYS_MMAN_H */
#ifndef LACKS_FCNTL_H
#include <fcntl.h>
#endif /* LACKS_FCNTL_H */
#endif /* HAVE_MMAP */
#ifndef LACKS_UNISTD_H
#include <unistd.h> /* for sbrk, sysconf */
#else /* LACKS_UNISTD_H */
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
extern void* sbrk(ptrdiff_t);
#endif /* FreeBSD etc */
#endif /* LACKS_UNISTD_H */
/* Declarations for locking */
#if USE_LOCKS
#ifndef WIN32
#if defined (__SVR4) && defined (__sun) /* solaris */
#include <thread.h>
#elif !defined(LACKS_SCHED_H)
#include <sched.h>
#endif /* solaris or LACKS_SCHED_H */
#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
#include <pthread.h>
#endif /* USE_RECURSIVE_LOCKS ... */
#elif defined(_MSC_VER)
#ifndef _M_AMD64
/* These are already defined on AMD64 builds */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _M_AMD64 */
#pragma intrinsic (_InterlockedCompareExchange)
#pragma intrinsic (_InterlockedExchange)
#define interlockedcompareexchange _InterlockedCompareExchange
#define interlockedexchange _InterlockedExchange
#elif defined(WIN32) && defined(__GNUC__)
#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
#define interlockedexchange __sync_lock_test_and_set
#endif /* Win32 */
#else /* USE_LOCKS */
#endif /* USE_LOCKS */
#ifndef LOCK_AT_FORK
#define LOCK_AT_FORK 0
#endif
/* Declarations for bit scanning on win32 */
#if defined(_MSC_VER) && _MSC_VER>=1300
#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#define BitScanForward _BitScanForward
#define BitScanReverse _BitScanReverse
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
#endif /* BitScanForward */
#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
#ifndef WIN32
#ifndef malloc_getpagesize
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
# ifndef _SC_PAGE_SIZE
# define _SC_PAGE_SIZE _SC_PAGESIZE
# endif
# endif
# ifdef _SC_PAGE_SIZE
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
# else
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
extern size_t getpagesize();
# define malloc_getpagesize getpagesize()
# else
# ifdef WIN32 /* use supplied emulation of getpagesize */
# define malloc_getpagesize getpagesize()
# else
# ifndef LACKS_SYS_PARAM_H
# include <sys/param.h>
# endif
# ifdef EXEC_PAGESIZE
# define malloc_getpagesize EXEC_PAGESIZE
# else
# ifdef NBPG
# ifndef CLSIZE
# define malloc_getpagesize NBPG
# else
# define malloc_getpagesize (NBPG * CLSIZE)
# endif
# else
# ifdef NBPC
# define malloc_getpagesize NBPC
# else
# ifdef PAGESIZE
# define malloc_getpagesize PAGESIZE
# else /* just guess */
# define malloc_getpagesize ((size_t)4096U)
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
#endif
/* ------------------- size_t and alignment properties -------------------- */
/* The byte and bit size of a size_t */
#define SIZE_T_SIZE (sizeof(size_t))
#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
/* Some constants coerced to size_t */
/* Annoying but necessary to avoid errors on some platforms */
#define SIZE_T_ZERO ((size_t)0)
#define SIZE_T_ONE ((size_t)1)
#define SIZE_T_TWO ((size_t)2)
#define SIZE_T_FOUR ((size_t)4)
#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
/* True if address a has acceptable alignment */
#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
/* the number of bytes to offset an address to align it */
#define align_offset(A)\
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
/* -------------------------- MMAP preliminaries ------------------------- */
/*
If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
checks to fail so compiler optimizer can delete code rather than
using so many "#if"s.
*/
/* MORECORE and MMAP must return MFAIL on failure */
#define MFAIL ((void*)(MAX_SIZE_T))
#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
#if HAVE_MMAP
#ifndef WIN32
#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
#define MMAP_PROT (PROT_READ|PROT_WRITE)
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
#define MAP_ANONYMOUS MAP_ANON
#endif /* MAP_ANON */
#ifdef MAP_ANONYMOUS
#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
#else /* MAP_ANONYMOUS */
/*
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
is unlikely to be needed, but is supplied just in case.
*/
#define MMAP_FLAGS (MAP_PRIVATE)
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
(dev_zero_fd = open("/dev/zero", O_RDWR), \
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
#endif /* MAP_ANONYMOUS */
#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
#else /* WIN32 */
/* Win32 MMAP via VirtualAlloc */
static FORCEINLINE void* win32mmap(size_t size) {
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
return (ptr != 0)? ptr: MFAIL;
}
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
static FORCEINLINE void* win32direct_mmap(size_t size) {
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
PAGE_READWRITE);
return (ptr != 0)? ptr: MFAIL;
}
/* This function supports releasing coalesed segments */
static FORCEINLINE int win32munmap(void* ptr, size_t size) {
MEMORY_BASIC_INFORMATION minfo;
char* cptr = (char*)ptr;
while (size) {
if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
return -1;
if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
minfo.State != MEM_COMMIT || minfo.RegionSize > size)
return -1;
if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
return -1;
cptr += minfo.RegionSize;
size -= minfo.RegionSize;
}
return 0;
}
#define MMAP_DEFAULT(s) win32mmap(s)
#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
#endif /* WIN32 */
#endif /* HAVE_MMAP */
#if HAVE_MREMAP
#ifndef WIN32
#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
#endif /* WIN32 */
#endif /* HAVE_MREMAP */
/**
* Define CALL_MORECORE
*/
#if HAVE_MORECORE
#ifdef MORECORE
#define CALL_MORECORE(S) MORECORE(S)
#else /* MORECORE */
#define CALL_MORECORE(S) MORECORE_DEFAULT(S)
#endif /* MORECORE */
#else /* HAVE_MORECORE */
#define CALL_MORECORE(S) MFAIL
#endif /* HAVE_MORECORE */
/**
* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
*/
#if HAVE_MMAP
#define USE_MMAP_BIT (SIZE_T_ONE)
#ifdef MMAP
#define CALL_MMAP(s) MMAP(s)
#else /* MMAP */
#define CALL_MMAP(s) MMAP_DEFAULT(s)
#endif /* MMAP */
#ifdef MUNMAP
#define CALL_MUNMAP(a, s) MUNMAP((a), (s))
#else /* MUNMAP */
#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
#endif /* MUNMAP */
#ifdef DIRECT_MMAP
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
#else /* DIRECT_MMAP */
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
#endif /* DIRECT_MMAP */
#else /* HAVE_MMAP */
#define USE_MMAP_BIT (SIZE_T_ZERO)
#define MMAP(s) MFAIL
#define MUNMAP(a, s) (-1)
#define DIRECT_MMAP(s) MFAIL
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
#define CALL_MMAP(s) MMAP(s)
#define CALL_MUNMAP(a, s) MUNMAP((a), (s))
#endif /* HAVE_MMAP */
/**
* Define CALL_MREMAP
*/
#if HAVE_MMAP && HAVE_MREMAP
#ifdef MREMAP
#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
#else /* MREMAP */
#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
#endif /* MREMAP */
#else /* HAVE_MMAP && HAVE_MREMAP */
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
#endif /* HAVE_MMAP && HAVE_MREMAP */
/* mstate bit set if continguous morecore disabled or failed */
#define USE_NONCONTIGUOUS_BIT (4U)
/* segment bit set in create_mspace_with_base */
#define EXTERN_BIT (8U)
/* --------------------------- Lock preliminaries ------------------------ */
/*
When locks are defined, there is one global lock, plus
one per-mspace lock.
The global lock_ensures that mparams.magic and other unique
mparams values are initialized only once. It also protects
sequences of calls to MORECORE. In many cases sys_alloc requires
two calls, that should not be interleaved with calls by other
threads. This does not protect against direct calls to MORECORE
by other threads not using this lock, so there is still code to
cope the best we can on interference.
Per-mspace locks surround calls to malloc, free, etc.
By default, locks are simple non-reentrant mutexes.
Because lock-protected regions generally have bounded times, it is
OK to use the supplied simple spinlocks. Spinlocks are likely to
improve performance for lightly contended applications, but worsen
performance under heavy contention.
If USE_LOCKS is > 1, the definitions of lock routines here are
bypassed, in which case you will need to define the type MLOCK_T,
and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
and TRY_LOCK. You must also declare a
static MLOCK_T malloc_global_mutex = { initialization values };.
*/
#if !USE_LOCKS
#define USE_LOCK_BIT (0U)
#define INITIAL_LOCK(l) (0)
#define DESTROY_LOCK(l) (0)
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define RELEASE_MALLOC_GLOBAL_LOCK()
#else
#if USE_LOCKS > 1
/* ----------------------- User-defined locks ------------------------ */
/* Define your own lock implementation here */
/* #define INITIAL_LOCK(lk) ... */
/* #define DESTROY_LOCK(lk) ... */
/* #define ACQUIRE_LOCK(lk) ... */
/* #define RELEASE_LOCK(lk) ... */
/* #define TRY_LOCK(lk) ... */
/* static MLOCK_T malloc_global_mutex = ... */
#elif USE_SPIN_LOCKS
/* First, define CAS_LOCK and CLEAR_LOCK on ints */
/* Note CAS_LOCK defined to return 0 on success */
#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
#define CLEAR_LOCK(sl) __sync_lock_release(sl)
#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
/* Custom spin locks for older gcc on x86 */
static FORCEINLINE int x86_cas_lock(int *sl) {
int ret;
int val = 1;
int cmp = 0;
__asm__ __volatile__ ("lock; cmpxchgl %1, %2"
: "=a" (ret)
: "r" (val), "m" (*(sl)), "0"(cmp)
: "memory", "cc");
return ret;
}
static FORCEINLINE void x86_clear_lock(int* sl) {
assert(*sl != 0);
int prev = 0;
int ret;
__asm__ __volatile__ ("lock; xchgl %0, %1"
: "=r" (ret)
: "m" (*(sl)), "0"(prev)
: "memory");
}
#define CAS_LOCK(sl) x86_cas_lock(sl)
#define CLEAR_LOCK(sl) x86_clear_lock(sl)
#else /* Win32 MSC */
#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
#endif /* ... gcc spins locks ... */
/* How to yield for a spin lock */
#define SPINS_PER_YIELD 63
#if defined(_MSC_VER)
#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
#elif defined (__SVR4) && defined (__sun) /* solaris */
#define SPIN_LOCK_YIELD thr_yield();
#elif !defined(LACKS_SCHED_H)
#define SPIN_LOCK_YIELD sched_yield();
#else
#define SPIN_LOCK_YIELD
#endif /* ... yield ... */
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
/* Plain spin locks use single word (embedded in malloc_states) */
static int spin_acquire_lock(int *sl) {
int spins = 0;
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
if ((++spins & SPINS_PER_YIELD) == 0) {
SPIN_LOCK_YIELD;
}
}
return 0;
}
#define MLOCK_T int
#define TRY_LOCK(sl) !CAS_LOCK(sl)
#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
#define INITIAL_LOCK(sl) (*sl = 0)
#define DESTROY_LOCK(sl) (0)
static MLOCK_T malloc_global_mutex = 0;
#else /* USE_RECURSIVE_LOCKS */
/* types for lock owners */
#ifdef WIN32
#define THREAD_ID_T DWORD
#define CURRENT_THREAD GetCurrentThreadId()
#define EQ_OWNER(X,Y) ((X) == (Y))
#else
/*
Note: the following assume that pthread_t is a type that can be
initialized to (casted) zero. If this is not the case, you will need to
somehow redefine these or not use spin locks.
*/
#define THREAD_ID_T pthread_t
#define CURRENT_THREAD pthread_self()
#define EQ_OWNER(X,Y) pthread_equal(X, Y)
#endif
struct malloc_recursive_lock {
int sl;
unsigned int c;
THREAD_ID_T threadid;
};
#define MLOCK_T struct malloc_recursive_lock
static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
assert(lk->sl != 0);
if (--lk->c == 0) {
CLEAR_LOCK(&lk->sl);
}
}
static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
THREAD_ID_T mythreadid = CURRENT_THREAD;
int spins = 0;
for (;;) {
if (*((volatile int *)(&lk->sl)) == 0) {
if (!CAS_LOCK(&lk->sl)) {
lk->threadid = mythreadid;
lk->c = 1;
return 0;
}
}
else if (EQ_OWNER(lk->threadid, mythreadid)) {
++lk->c;
return 0;
}
if ((++spins & SPINS_PER_YIELD) == 0) {
SPIN_LOCK_YIELD;
}
}
}
static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
THREAD_ID_T mythreadid = CURRENT_THREAD;
if (*((volatile int *)(&lk->sl)) == 0) {
if (!CAS_LOCK(&lk->sl)) {
lk->threadid = mythreadid;
lk->c = 1;
return 1;
}
}
else if (EQ_OWNER(lk->threadid, mythreadid)) {
++lk->c;
return 1;
}
return 0;
}
#define RELEASE_LOCK(lk) recursive_release_lock(lk)
#define TRY_LOCK(lk) recursive_try_lock(lk)
#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
#define DESTROY_LOCK(lk) (0)
#endif /* USE_RECURSIVE_LOCKS */
#elif defined(WIN32) /* Win32 critical sections */
#define MLOCK_T CRITICAL_SECTION
#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
#define NEED_GLOBAL_LOCK_INIT
static MLOCK_T malloc_global_mutex;
static volatile LONG malloc_global_mutex_status;
/* Use spin loop to initialize global lock */
static void init_malloc_global_mutex() {
for (;;) {
long stat = malloc_global_mutex_status;
if (stat > 0)
return;
/* transition to < 0 while initializing, then to > 0) */
if (stat == 0 &&
interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
InitializeCriticalSection(&malloc_global_mutex);
interlockedexchange(&malloc_global_mutex_status, (LONG)1);
return;
}
SleepEx(0, FALSE);
}
}
#else /* pthreads-based locks */
#define MLOCK_T pthread_mutex_t
#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
#define INITIAL_LOCK(lk) pthread_init_lock(lk)
#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
/* Cope with old-style linux recursive lock initialization by adding */
/* skipped internal declaration from pthread.h */
extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
int __kind));
#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
#endif /* USE_RECURSIVE_LOCKS ... */
static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
static int pthread_init_lock (MLOCK_T *lk) {
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr)) return 1;
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
#endif
if (pthread_mutex_init(lk, &attr)) return 1;
if (pthread_mutexattr_destroy(&attr)) return 1;
return 0;
}
#endif /* ... lock types ... */
/* Common code for all lock types */
#define USE_LOCK_BIT (2U)
#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
#endif
#ifndef RELEASE_MALLOC_GLOBAL_LOCK
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
#endif
#endif /* USE_LOCKS */
/* ----------------------- Chunk representations ------------------------ */
/*
(The following includes lightly edited explanations by Colin Plumb.)
The malloc_chunk declaration below is misleading (but accurate and
necessary). It declares a "view" into memory allowing access to
necessary fields at known offsets from a given base.
Chunks of memory are maintained using a `boundary tag' method as
originally described by Knuth. (See the paper by Paul Wilson
ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
techniques.) Sizes of free chunks are stored both in the front of
each chunk and at the end. This makes consolidating fragmented
chunks into bigger chunks fast. The head fields also hold bits
representing whether chunks are free or in use.
Here are some pictures to make it clearer. They are "exploded" to
show that the state of a chunk can be thought of as extending from
the high 31 bits of the head field of its header through the
prev_foot and PINUSE_BIT bit of the following chunk header.
A chunk that's in use looks like:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk (if P = 0) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
| Size of this chunk 1| +-+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+- -+
| |
+- -+
| :
+- size - sizeof(size_t) available payload bytes -+
: |
chunk-> +- -+
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
| Size of next chunk (may or may not be in use) | +-+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
And if it's free, it looks like this:
chunk-> +- -+
| User payload (must be in use, or we would have merged!) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
| Size of this chunk 0| +-+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next pointer |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Prev pointer |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| :
+- size - sizeof(struct chunk) unused bytes -+
: |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of this chunk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
| Size of next chunk (must be in use, or we would have merged)| +-+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| :
+- User payload -+
: |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|0|
+-+
Note that since we always merge adjacent free chunks, the chunks
adjacent to a free chunk must be in use.
Given a pointer to a chunk (which can be derived trivially from the
payload pointer) we can, in O(1) time, find out whether the adjacent
chunks are free, and if so, unlink them from the lists that they
are on and merge them with the current chunk.
Chunks always begin on even word boundaries, so the mem portion
(which is returned to the user) is also on an even word boundary, and
thus at least double-word aligned.
The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
chunk size (which is always a multiple of two words), is an in-use
bit for the *previous* chunk. If that bit is *clear*, then the
word before the current chunk size contains the previous chunk
size, and can be used to find the front of the previous chunk.
The very first chunk allocated always has this bit set, preventing
access to non-existent (or non-owned) memory. If pinuse is set for
any given chunk, then you CANNOT determine the size of the
previous chunk, and might even get a memory addressing fault when
trying to do so.
The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
the chunk size redundantly records whether the current chunk is
inuse (unless the chunk is mmapped). This redundancy enables usage
checks within free and realloc, and reduces indirection when freeing
and consolidating chunks.
Each freshly allocated chunk must have both cinuse and pinuse set.
That is, each allocated chunk borders either a previously allocated
and still in-use chunk, or the base of its memory arena. This is
ensured by making all allocations from the `lowest' part of any
found chunk. Further, no free chunk physically borders another one,
so each free chunk is known to be preceded and followed by either
inuse chunks or the ends of memory.
Note that the `foot' of the current chunk is actually represented
as the prev_foot of the NEXT chunk. This makes it easier to
deal with alignments etc but can be very confusing when trying
to extend or adapt this code.
The exceptions to all this are
1. The special chunk `top' is the top-most available chunk (i.e.,
the one bordering the end of available memory). It is treated
specially. Top is never included in any bin, is used only if
no other chunk is available, and is released back to the
system if it is very large (see M_TRIM_THRESHOLD). In effect,
the top chunk is treated as larger (and thus less well
fitting) than any other available chunk. The top chunk
doesn't update its trailing size field since there is no next
contiguous chunk that would have to index off it. However,
space is still allocated for it (TOP_FOOT_SIZE) to enable
separation or merging when space is extended.
3. Chunks allocated via mmap, have both cinuse and pinuse bits
cleared in their head fields. Because they are allocated
one-by-one, each must carry its own prev_foot field, which is
also used to hold the offset this chunk has within its mmapped
region, which is needed to preserve alignment. Each mmapped
chunk is trailed by the first two fields of a fake next-chunk
for sake of usage checks.
*/
struct malloc_chunk {
size_t prev_foot; /* Size of previous chunk (if free). */
size_t head; /* Size and inuse bits. */
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
};
typedef struct malloc_chunk mchunk;
typedef struct malloc_chunk* mchunkptr;
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
typedef unsigned int bindex_t; /* Described below */
typedef unsigned int binmap_t; /* Described below */
typedef unsigned int flag_t; /* The type of various bit flag sets */
/* ------------------- Chunks sizes and alignments ----------------------- */
#define MCHUNK_SIZE (sizeof(mchunk))
#if FOOTERS
#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
#else /* FOOTERS */
#define CHUNK_OVERHEAD (SIZE_T_SIZE)
#endif /* FOOTERS */
/* MMapped chunks need a second word of overhead ... */
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
/* ... and additional padding for fake next-chunk at foot */
#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
/* The smallest size we can malloc is an aligned minimal chunk */
#define MIN_CHUNK_SIZE\
((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
/* conversion from malloc headers to user pointers, and back */
#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
/* chunk associated with aligned address A */
#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
/* Bounds on request (not chunk) sizes. */
#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
/* pad request bytes into a usable size */
#define pad_request(req) \
(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
/* pad request, checking for minimum (but not maximum) */
#define request2size(req) \
(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
/* ------------------ Operations on head and foot fields ----------------- */
/*
The head field of a chunk is or'ed with PINUSE_BIT when previous
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
use, unless mmapped, in which case both bits are cleared.
FLAG4_BIT is not used by this malloc, but might be useful in extensions.
*/
#define PINUSE_BIT (SIZE_T_ONE)
#define CINUSE_BIT (SIZE_T_TWO)
#define FLAG4_BIT (SIZE_T_FOUR)
#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
/* Head value for fenceposts */
#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
/* extraction of fields from head words */
#define cinuse(p) ((p)->head & CINUSE_BIT)
#define pinuse(p) ((p)->head & PINUSE_BIT)
#define flag4inuse(p) ((p)->head & FLAG4_BIT)
#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
#define chunksize(p) ((p)->head & ~(FLAG_BITS))
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
#define set_flag4(p) ((p)->head |= FLAG4_BIT)
#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
/* Treat space at ptr +/- offset as a chunk */
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
/* Ptr to next or previous physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
/* extract next chunk's pinuse bit */
#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
/* Get/set size at footer */
#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
/* Set size, pinuse bit, and foot */
#define set_size_and_pinuse_of_free_chunk(p, s)\
((p)->head = (s|PINUSE_BIT), set_foot(p, s))
/* Set size, pinuse bit, foot, and clear next pinuse */
#define set_free_with_pinuse(p, s, n)\
(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
/* Get the internal overhead associated with chunk p */
#define overhead_for(p)\
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
/* Return true if malloced space is not necessarily cleared */
#if MMAP_CLEARS
#define calloc_must_clear(p) (!is_mmapped(p))
#else /* MMAP_CLEARS */
#define calloc_must_clear(p) (1)
#endif /* MMAP_CLEARS */
/* ---------------------- Overlaid data structures ----------------------- */
/*
When chunks are not in use, they are treated as nodes of either
lists or trees.
"Small" chunks are stored in circular doubly-linked lists, and look
like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Back pointer to previous chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Unused space (may be 0 bytes long) .
. .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Larger chunks are kept in a form of bitwise digital trees (aka
tries) keyed on chunksizes. Because malloc_tree_chunks are only for
free chunks greater than 256 bytes, their size doesn't impose any
constraints on user chunk sizes. Each node looks like:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk of same size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Back pointer to previous chunk of same size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Pointer to left child (child[0]) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Pointer to right child (child[1]) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Pointer to parent |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| bin index of this chunk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Unused space .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Each tree holding treenodes is a tree of unique chunk sizes. Chunks
of the same size are arranged in a circularly-linked list, with only
the oldest chunk (the next to be used, in our FIFO ordering)
actually in the tree. (Tree members are distinguished by a non-null
parent pointer.) If a chunk with the same size an an existing node
is inserted, it is linked off the existing node using pointers that
work in the same way as fd/bk pointers of small chunks.
Each tree contains a power of 2 sized range of chunk sizes (the
smallest is 0x100 <= x < 0x180), which is is divided in half at each
tree level, with the chunks in the smaller half of the range (0x100
<= x < 0x140 for the top nose) in the left subtree and the larger
half (0x140 <= x < 0x180) in the right subtree. This is, of course,
done by inspecting individual bits.
Using these rules, each node's left subtree contains all smaller
sizes than its right subtree. However, the node at the root of each
subtree has no particular ordering relationship to either. (The
dividing line between the subtree sizes is based on trie relation.)
If we remove the last chunk of a given size from the interior of the
tree, we need to replace it with a leaf node. The tree ordering
rules permit a node to be replaced by any leaf below it.
The smallest chunk in a tree (a common operation in a best-fit
allocator) can be found by walking a path to the leftmost leaf in
the tree. Unlike a usual binary tree, where we follow left child
pointers until we reach a null, here we follow the right child
pointer any time the left one is null, until we reach a leaf with
both child pointers null. The smallest chunk in the tree will be
somewhere along that path.
The worst case number of steps to add, find, or remove a node is
bounded by the number of bits differentiating chunks within
bins. Under current bin calculations, this ranges from 6 up to 21
(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
is of course much better.
*/
struct malloc_tree_chunk {
/* The first four fields must be compatible with malloc_chunk */
size_t prev_foot;
size_t head;
struct malloc_tree_chunk* fd;
struct malloc_tree_chunk* bk;
struct malloc_tree_chunk* child[2];
struct malloc_tree_chunk* parent;
bindex_t index;
};
typedef struct malloc_tree_chunk tchunk;
typedef struct malloc_tree_chunk* tchunkptr;
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
/* A little helper macro for trees */
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
/* ----------------------------- Segments -------------------------------- */
/*
Each malloc space may include non-contiguous segments, held in a
list headed by an embedded malloc_segment record representing the
top-most space. Segments also include flags holding properties of
the space. Large chunks that are directly allocated by mmap are not
included in this list. They are instead independently created and
destroyed without otherwise keeping track of them.
Segment management mainly comes into play for spaces allocated by
MMAP. Any call to MMAP might or might not return memory that is
adjacent to an existing segment. MORECORE normally contiguously
extends the current space, so this space is almost always adjacent,
which is simpler and faster to deal with. (This is why MORECORE is
used preferentially to MMAP when both are available -- see
sys_alloc.) When allocating using MMAP, we don't use any of the
hinting mechanisms (inconsistently) supported in various
implementations of unix mmap, or distinguish reserving from
committing memory. Instead, we just ask for space, and exploit
contiguity when we get it. It is probably possible to do
better than this on some systems, but no general scheme seems
to be significantly better.
Management entails a simpler variant of the consolidation scheme
used for chunks to reduce fragmentation -- new adjacent memory is
normally prepended or appended to an existing segment. However,
there are limitations compared to chunk consolidation that mostly
reflect the fact that segment processing is relatively infrequent
(occurring only when getting memory from system) and that we
don't expect to have huge numbers of segments:
* Segments are not indexed, so traversal requires linear scans. (It
would be possible to index these, but is not worth the extra
overhead and complexity for most programs on most platforms.)
* New segments are only appended to old ones when holding top-most
memory; if they cannot be prepended to others, they are held in
different segments.
Except for the top-most segment of an mstate, each segment record
is kept at the tail of its segment. Segments are added by pushing
segment records onto the list headed by &mstate.seg for the
containing mstate.
Segment flags control allocation/merge/deallocation policies:
* If EXTERN_BIT set, then we did not allocate this segment,
and so should not try to deallocate or merge with others.
(This currently holds only for the initial segment passed
into create_mspace_with_base.)
* If USE_MMAP_BIT set, the segment may be merged with
other surrounding mmapped segments and trimmed/de-allocated
using munmap.
* If neither bit is set, then the segment was obtained using
MORECORE so can be merged with surrounding MORECORE'd segments
and deallocated/trimmed using MORECORE with negative arguments.
*/
struct malloc_segment {
char* base; /* base address */
size_t size; /* allocated size */
struct malloc_segment* next; /* ptr to next segment */
flag_t sflags; /* mmap and extern flag */
};
#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
typedef struct malloc_segment msegment;
typedef struct malloc_segment* msegmentptr;
/* ---------------------------- malloc_state ----------------------------- */
/*
A malloc_state holds all of the bookkeeping for a space.
The main fields are:
Top
The topmost chunk of the currently active segment. Its size is
cached in topsize. The actual size of topmost space is
topsize+TOP_FOOT_SIZE, which includes space reserved for adding
fenceposts and segment records if necessary when getting more
space from the system. The size at which to autotrim top is
cached from mparams in trim_check, except that it is disabled if
an autotrim fails.
Designated victim (dv)
This is the preferred chunk for servicing small requests that
don't have exact fits. It is normally the chunk split off most
recently to service another small request. Its size is cached in
dvsize. The link fields of this chunk are not maintained since it
is not kept in a bin.
SmallBins
An array of bin headers for free chunks. These bins hold chunks
with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
chunks of all the same size, spaced 8 bytes apart. To simplify
use in double-linked lists, each bin header acts as a malloc_chunk
pointing to the real first node, if it exists (else pointing to
itself). This avoids special-casing for headers. But to avoid
waste, we allocate only the fd/bk pointers of bins, and then use
repositioning tricks to treat these as the fields of a chunk.
TreeBins
Treebins are pointers to the roots of trees holding a range of
sizes. There are 2 equally spaced treebins for each power of two
from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
larger.
Bin maps
There is one bit map for small bins ("smallmap") and one for
treebins ("treemap). Each bin sets its bit when non-empty, and
clears the bit when empty. Bit operations are then used to avoid
bin-by-bin searching -- nearly all "search" is done without ever
looking at bins that won't be selected. The bit maps
conservatively use 32 bits per map word, even if on 64bit system.
For a good description of some of the bit-based techniques used
here, see Henry S. Warren Jr's book "Hacker's Delight" (and
supplement at http://hackersdelight.org/). Many of these are
intended to reduce the branchiness of paths through malloc etc, as
well as to reduce the number of memory locations read or written.
Segments
A list of segments headed by an embedded malloc_segment record
representing the initial space.
Address check support
The least_addr field is the least address ever obtained from
MORECORE or MMAP. Attempted frees and reallocs of any address less
than this are trapped (unless INSECURE is defined).
Magic tag
A cross-check field that should always hold same value as mparams.magic.
Max allowed footprint
The maximum allowed bytes to allocate from system (zero means no limit)
Flags
Bits recording whether to use MMAP, locks, or contiguous MORECORE
Statistics
Each space keeps track of current and maximum system memory
obtained via MORECORE or MMAP.
Trim support
Fields holding the amount of unused topmost memory that should trigger
trimming, and a counter to force periodic scanning to release unused
non-topmost segments.
Locking
If USE_LOCKS is defined, the "mutex" lock is acquired and released
around every public call using this mspace.
Extension support
A void* pointer and a size_t field that can be used to help implement
extensions to this malloc.
*/
/* Bin types, widths and sizes */
#define NSMALLBINS (32U)
#define NTREEBINS (32U)
#define SMALLBIN_SHIFT (3U)
#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
#define TREEBIN_SHIFT (8U)
#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
struct malloc_state {
binmap_t smallmap;
binmap_t treemap;
size_t dvsize;
size_t topsize;
char* least_addr;
mchunkptr dv;
mchunkptr top;
size_t trim_check;
size_t release_checks;
size_t magic;
mchunkptr smallbins[(NSMALLBINS+1)*2];
tbinptr treebins[NTREEBINS];
size_t footprint;
size_t max_footprint;
size_t footprint_limit; /* zero means no limit */
flag_t mflags;
#if USE_LOCKS
MLOCK_T mutex; /* locate lock among fields that rarely change */
#endif /* USE_LOCKS */
msegment seg;
void* extp; /* Unused but available for extensions */
size_t exts;
};
typedef struct malloc_state* mstate;
/* ------------- Global malloc_state and malloc_params ------------------- */
/*
malloc_params holds global properties, including those that can be
dynamically set using mallopt. There is a single instance, mparams,
initialized in init_mparams. Note that the non-zeroness of "magic"
also serves as an initialization flag.
*/
struct malloc_params {
size_t magic;
size_t page_size;
size_t granularity;
size_t mmap_threshold;
size_t trim_threshold;
flag_t default_mflags;
};
static struct malloc_params mparams;
/* Ensure mparams initialized */
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
#if !ONLY_MSPACES
/* The global malloc_state used for all non-"mspace" calls */
static struct malloc_state _gm_;
#define gm (&_gm_)
#define is_global(M) ((M) == &_gm_)
#endif /* !ONLY_MSPACES */
#define is_initialized(M) ((M)->top != 0)
/* -------------------------- system alloc setup ------------------------- */
/* Operations on mflags */
#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
#if USE_LOCKS
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
#else
#define disable_lock(M)
#endif
#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
#if HAVE_MMAP
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
#else
#define disable_mmap(M)
#endif
#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
#define set_lock(M,L)\
((M)->mflags = (L)?\
((M)->mflags | USE_LOCK_BIT) :\
((M)->mflags & ~USE_LOCK_BIT))
/* page-align a size */
#define page_align(S)\
(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
/* granularity-align a size */
#define granularity_align(S)\
(((S) + (mparams.granularity - SIZE_T_ONE))\
& ~(mparams.granularity - SIZE_T_ONE))
/* For mmap, use granularity alignment on windows, else page-align */
#ifdef WIN32
#define mmap_align(S) granularity_align(S)
#else
#define mmap_align(S) page_align(S)
#endif
/* For sys_alloc, enough padding to ensure can malloc request on success */
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
#define is_page_aligned(S)\
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
#define is_granularity_aligned(S)\
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
/* True if segment S holds address A */
#define segment_holds(S, A)\
((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
/* Return segment holding given address */
static msegmentptr segment_holding(mstate m, char* addr) {
msegmentptr sp = &m->seg;
for (;;) {
if (addr >= sp->base && addr < sp->base + sp->size)
return sp;
if ((sp = sp->next) == 0)
return 0;
}
}
/* Return true if segment contains a segment link */
static int has_segment_link(mstate m, msegmentptr ss) {
msegmentptr sp = &m->seg;
for (;;) {
if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
return 1;
if ((sp = sp->next) == 0)
return 0;
}
}
#ifndef MORECORE_CANNOT_TRIM
#define should_trim(M,s) ((s) > (M)->trim_check)
#else /* MORECORE_CANNOT_TRIM */
#define should_trim(M,s) (0)
#endif /* MORECORE_CANNOT_TRIM */
/*
TOP_FOOT_SIZE is padding at the end of a segment, including space
that may be needed to place segment records and fenceposts when new
noncontiguous segments are added.
*/
#define TOP_FOOT_SIZE\
(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
/* ------------------------------- Hooks -------------------------------- */
/*
PREACTION should be defined to return 0 on success, and nonzero on
failure. If you are not using locking, you can redefine these to do
anything you like.
*/
#if USE_LOCKS
#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
#else /* USE_LOCKS */
#ifndef PREACTION
#define PREACTION(M) (0)
#endif /* PREACTION */
#ifndef POSTACTION
#define POSTACTION(M)
#endif /* POSTACTION */
#endif /* USE_LOCKS */
/*
CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
USAGE_ERROR_ACTION is triggered on detected bad frees and
reallocs. The argument p is an address that might have triggered the
fault. It is ignored by the two predefined actions, but might be
useful in custom actions that try to help diagnose errors.
*/
#if PROCEED_ON_ERROR
/* A count of the number of corruption errors causing resets */
int malloc_corruption_error_count;
/* default corruption action */
static void reset_on_error(mstate m);
#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
#define USAGE_ERROR_ACTION(m, p)
#else /* PROCEED_ON_ERROR */
#ifndef CORRUPTION_ERROR_ACTION
#define CORRUPTION_ERROR_ACTION(m) ABORT
#endif /* CORRUPTION_ERROR_ACTION */
#ifndef USAGE_ERROR_ACTION
#define USAGE_ERROR_ACTION(m,p) ABORT
#endif /* USAGE_ERROR_ACTION */
#endif /* PROCEED_ON_ERROR */
/* -------------------------- Debugging setup ---------------------------- */
#if ! DEBUG
#define check_free_chunk(M,P)
#define check_inuse_chunk(M,P)
#define check_malloced_chunk(M,P,N)
#define check_mmapped_chunk(M,P)
#define check_malloc_state(M)
#define check_top_chunk(M,P)
#else /* DEBUG */
#define check_free_chunk(M,P) do_check_free_chunk(M,P)
#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
#define check_top_chunk(M,P) do_check_top_chunk(M,P)
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
#define check_malloc_state(M) do_check_malloc_state(M)
static void do_check_any_chunk(mstate m, mchunkptr p);
static void do_check_top_chunk(mstate m, mchunkptr p);
static void do_check_mmapped_chunk(mstate m, mchunkptr p);
static void do_check_inuse_chunk(mstate m, mchunkptr p);
static void do_check_free_chunk(mstate m, mchunkptr p);
static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
static void do_check_tree(mstate m, tchunkptr t);
static void do_check_treebin(mstate m, bindex_t i);
static void do_check_smallbin(mstate m, bindex_t i);
static void do_check_malloc_state(mstate m);
static int bin_find(mstate m, mchunkptr x);
static size_t traverse_and_check(mstate m);
#endif /* DEBUG */
/* ---------------------------- Indexing Bins ---------------------------- */
#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
/* addressing by index. See above about smallbin repositioning */
#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
#define treebin_at(M,i) (&((M)->treebins[i]))
/* assign tree index for size S to variable I. Use x86 asm if possible */
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define compute_tree_index(S, I)\
{\
unsigned int X = S >> TREEBIN_SHIFT;\
if (X == 0)\
I = 0;\
else if (X > 0xFFFF)\
I = NTREEBINS-1;\
else {\
unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
}\
}
#elif defined (__INTEL_COMPILER)
#define compute_tree_index(S, I)\
{\
size_t X = S >> TREEBIN_SHIFT;\
if (X == 0)\
I = 0;\
else if (X > 0xFFFF)\
I = NTREEBINS-1;\
else {\
unsigned int K = _bit_scan_reverse (X); \
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
}\
}
#elif defined(_MSC_VER) && _MSC_VER>=1300
#define compute_tree_index(S, I)\
{\
size_t X = S >> TREEBIN_SHIFT;\
if (X == 0)\
I = 0;\
else if (X > 0xFFFF)\
I = NTREEBINS-1;\
else {\
unsigned int K;\
_BitScanReverse((DWORD *) &K, (DWORD) X);\
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
}\
}
#else /* GNUC */
#define compute_tree_index(S, I)\
{\
size_t X = S >> TREEBIN_SHIFT;\
if (X == 0)\
I = 0;\
else if (X > 0xFFFF)\
I = NTREEBINS-1;\
else {\
unsigned int Y = (unsigned int)X;\
unsigned int N = ((Y - 0x100) >> 16) & 8;\
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
N += K;\
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
K = 14 - N + ((Y <<= K) >> 15);\
I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
}\
}
#endif /* GNUC */
/* Bit representing maximum resolved size in a treebin at i */
#define bit_for_tree_index(i) \
(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
#define leftshift_for_tree_index(i) \
((i == NTREEBINS-1)? 0 : \
((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
/* The size of the smallest chunk held in bin with index i */
#define minsize_for_tree_index(i) \
((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
/* ------------------------ Operations on bin maps ----------------------- */
/* bit corresponding to given index */
#define idx2bit(i) ((binmap_t)(1) << (i))
/* Mark/Clear bits with given index */
#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
/* isolate the least set bit of a bitmap */
#define least_bit(x) ((x) & -(x))
/* mask with all bits to left of least bit of x on */
#define left_bits(x) ((x<<1) | -(x<<1))
/* mask with all bits to left of or equal to least bit of x on */
#define same_or_left_bits(x) ((x) | -(x))
/* index corresponding to given bit. Use x86 asm if possible */
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define compute_bit2idx(X, I)\
{\
unsigned int J;\
J = __builtin_ctz(X); \
I = (bindex_t)J;\
}
#elif defined (__INTEL_COMPILER)
#define compute_bit2idx(X, I)\
{\
unsigned int J;\
J = _bit_scan_forward (X); \
I = (bindex_t)J;\
}
#elif defined(_MSC_VER) && _MSC_VER>=1300
#define compute_bit2idx(X, I)\
{\
unsigned int J;\
_BitScanForward((DWORD *) &J, X);\
I = (bindex_t)J;\
}
#elif USE_BUILTIN_FFS
#define compute_bit2idx(X, I) I = ffs(X)-1
#else
#define compute_bit2idx(X, I)\
{\
unsigned int Y = X - 1;\
unsigned int K = Y >> (16-4) & 16;\
unsigned int N = K; Y >>= K;\
N += K = Y >> (8-3) & 8; Y >>= K;\
N += K = Y >> (4-2) & 4; Y >>= K;\
N += K = Y >> (2-1) & 2; Y >>= K;\
N += K = Y >> (1-0) & 1; Y >>= K;\
I = (bindex_t)(N + Y);\
}
#endif /* GNUC */
/* ----------------------- Runtime Check Support ------------------------- */
/*
For security, the main invariant is that malloc/free/etc never
writes to a static address other than malloc_state, unless static
malloc_state itself has been corrupted, which cannot occur via
malloc (because of these checks). In essence this means that we
believe all pointers, sizes, maps etc held in malloc_state, but
check all of those linked or offsetted from other embedded data
structures. These checks are interspersed with main code in a way
that tends to minimize their run-time cost.
When FOOTERS is defined, in addition to range checking, we also
verify footer fields of inuse chunks, which can be used guarantee
that the mstate controlling malloc/free is intact. This is a
streamlined version of the approach described by William Robertson
et al in "Run-time Detection of Heap-based Overflows" LISA'03
http://www.usenix.org/events/lisa03/tech/robertson.html The footer
of an inuse chunk holds the xor of its mstate and a random seed,
that is checked upon calls to free() and realloc(). This is
(probabalistically) unguessable from outside the program, but can be
computed by any code successfully malloc'ing any chunk, so does not
itself provide protection against code that has already broken
security through some other means. Unlike Robertson et al, we
always dynamically check addresses of all offset chunks (previous,
next, etc). This turns out to be cheaper than relying on hashes.
*/
#if !INSECURE
/* Check if address a is at least as high as any from MORECORE or MMAP */
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
/* Check if address of next chunk n is higher than base chunk p */
#define ok_next(p, n) ((char*)(p) < (char*)(n))
/* Check if p has inuse status */
#define ok_inuse(p) is_inuse(p)
/* Check if p has its pinuse bit on */
#define ok_pinuse(p) pinuse(p)
#else /* !INSECURE */
#define ok_address(M, a) (1)
#define ok_next(b, n) (1)
#define ok_inuse(p) (1)
#define ok_pinuse(p) (1)
#endif /* !INSECURE */
#if (FOOTERS && !INSECURE)
/* Check if (alleged) mstate m has expected magic field */
#define ok_magic(M) ((M)->magic == mparams.magic)
#else /* (FOOTERS && !INSECURE) */
#define ok_magic(M) (1)
#endif /* (FOOTERS && !INSECURE) */
/* In gcc, use __builtin_expect to minimize impact of checks */
#if !INSECURE
#if defined(__GNUC__) && __GNUC__ >= 3
#define RTCHECK(e) __builtin_expect(e, 1)
#else /* GNUC */
#define RTCHECK(e) (e)
#endif /* GNUC */
#else /* !INSECURE */
#define RTCHECK(e) (1)
#endif /* !INSECURE */
/* macros to set up inuse chunks with or without footers */
#if !FOOTERS
#define mark_inuse_foot(M,p,s)
/* Macros for setting head/foot of non-mmapped chunks */
/* Set cinuse bit and pinuse bit of next chunk */
#define set_inuse(M,p,s)\
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
#define set_inuse_and_pinuse(M,p,s)\
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
/* Set size, cinuse and pinuse bit of this chunk */
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
#else /* FOOTERS */
/* Set foot of inuse chunk to be xor of mstate and seed */
#define mark_inuse_foot(M,p,s)\
(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
#define get_mstate_for(p)\
((mstate)(((mchunkptr)((char*)(p) +\
(chunksize(p))))->prev_foot ^ mparams.magic))
#define set_inuse(M,p,s)\
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
mark_inuse_foot(M,p,s))
#define set_inuse_and_pinuse(M,p,s)\
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
mark_inuse_foot(M,p,s))
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
mark_inuse_foot(M, p, s))
#endif /* !FOOTERS */
/* ---------------------------- setting mparams -------------------------- */
#if LOCK_AT_FORK
static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
#endif /* LOCK_AT_FORK */
/* Initialize mparams */
static int init_mparams(void) {
#ifdef NEED_GLOBAL_LOCK_INIT
if (malloc_global_mutex_status <= 0)
init_malloc_global_mutex();
#endif
ACQUIRE_MALLOC_GLOBAL_LOCK();
if (mparams.magic == 0) {
size_t magic;
size_t psize;
size_t gsize;
#ifndef WIN32
psize = malloc_getpagesize;
gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
#else /* WIN32 */
{
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
psize = system_info.dwPageSize;
gsize = ((DEFAULT_GRANULARITY != 0)?
DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
}
#endif /* WIN32 */
/* Sanity-check configuration:
size_t must be unsigned and as wide as pointer type.
ints must be at least 4 bytes.
alignment must be at least 8.
Alignment, min chunk size, and page size must all be powers of 2.
*/
if ((sizeof(size_t) != sizeof(char*)) ||
(MAX_SIZE_T < MIN_CHUNK_SIZE) ||
(sizeof(int) < 4) ||
(MALLOC_ALIGNMENT < (size_t)8U) ||
((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
((gsize & (gsize-SIZE_T_ONE)) != 0) ||
((psize & (psize-SIZE_T_ONE)) != 0))
ABORT;
mparams.granularity = gsize;
mparams.page_size = psize;
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
#if MORECORE_CONTIGUOUS
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
#else /* MORECORE_CONTIGUOUS */
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
#endif /* MORECORE_CONTIGUOUS */
#if !ONLY_MSPACES
/* Set up lock for main malloc area */
gm->mflags = mparams.default_mflags;
(void)INITIAL_LOCK(&gm->mutex);
#endif
#if LOCK_AT_FORK
pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
#endif
{
#if USE_DEV_RANDOM
int fd;
unsigned char buf[sizeof(size_t)];
/* Try to use /dev/urandom, else fall back on using time */
if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
read(fd, buf, sizeof(buf)) == sizeof(buf)) {
magic = *((size_t *) buf);
close(fd);
}
else
#endif /* USE_DEV_RANDOM */
#ifdef WIN32
magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
#elif defined(LACKS_TIME_H)
magic = (size_t)&magic ^ (size_t)0x55555555U;
#else
magic = (size_t)(time(0) ^ (size_t)0x55555555U);
#endif
magic |= (size_t)8U; /* ensure nonzero */
magic &= ~(size_t)7U; /* improve chances of fault for bad values */
/* Until memory modes commonly available, use volatile-write */
(*(volatile size_t *)(&(mparams.magic))) = magic;
}
}
RELEASE_MALLOC_GLOBAL_LOCK();
return 1;
}
/* support for mallopt */
static int change_mparam(int param_number, int value) {
size_t val;
ensure_initialization();
val = (value == -1)? MAX_SIZE_T : (size_t)value;
switch(param_number) {
case M_TRIM_THRESHOLD:
mparams.trim_threshold = val;
return 1;
case M_GRANULARITY:
if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
mparams.granularity = val;
return 1;
}
else
return 0;
case M_MMAP_THRESHOLD:
mparams.mmap_threshold = val;
return 1;
default:
return 0;
}
}
#if DEBUG
/* ------------------------- Debugging Support --------------------------- */
/* Check properties of any chunk, whether free, inuse, mmapped etc */
static void do_check_any_chunk(mstate m, mchunkptr p) {
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
assert(ok_address(m, p));
}
/* Check properties of top chunk */
static void do_check_top_chunk(mstate m, mchunkptr p) {
msegmentptr sp = segment_holding(m, (char*)p);
size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
assert(sp != 0);
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
assert(ok_address(m, p));
assert(sz == m->topsize);
assert(sz > 0);
assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
assert(pinuse(p));
assert(!pinuse(chunk_plus_offset(p, sz)));
}
/* Check properties of (inuse) mmapped chunks */
static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
size_t sz = chunksize(p);
size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
assert(is_mmapped(p));
assert(use_mmap(m));
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
assert(ok_address(m, p));
assert(!is_small(sz));
assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
}
/* Check properties of inuse chunks */
static void do_check_inuse_chunk(mstate m, mchunkptr p) {
do_check_any_chunk(m, p);
assert(is_inuse(p));
assert(next_pinuse(p));
/* If not pinuse and not mmapped, previous chunk has OK offset */
assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
if (is_mmapped(p))
do_check_mmapped_chunk(m, p);
}
/* Check properties of free chunks */
static void do_check_free_chunk(mstate m, mchunkptr p) {
size_t sz = chunksize(p);
mchunkptr next = chunk_plus_offset(p, sz);
do_check_any_chunk(m, p);
assert(!is_inuse(p));
assert(!next_pinuse(p));
assert (!is_mmapped(p));
if (p != m->dv && p != m->top) {
if (sz >= MIN_CHUNK_SIZE) {
assert((sz & CHUNK_ALIGN_MASK) == 0);
assert(is_aligned(chunk2mem(p)));
assert(next->prev_foot == sz);
assert(pinuse(p));
assert (next == m->top || is_inuse(next));
assert(p->fd->bk == p);
assert(p->bk->fd == p);
}
else /* markers are always of size SIZE_T_SIZE */
assert(sz == SIZE_T_SIZE);
}
}
/* Check properties of malloced chunks at the point they are malloced */
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
size_t sz = p->head & ~INUSE_BITS;
do_check_inuse_chunk(m, p);
assert((sz & CHUNK_ALIGN_MASK) == 0);
assert(sz >= MIN_CHUNK_SIZE);
assert(sz >= s);
/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
}
}
/* Check a tree and its subtrees. */
static void do_check_tree(mstate m, tchunkptr t) {
tchunkptr head = 0;
tchunkptr u = t;
bindex_t tindex = t->index;
size_t tsize = chunksize(t);
bindex_t idx;
compute_tree_index(tsize, idx);
assert(tindex == idx);
assert(tsize >= MIN_LARGE_SIZE);
assert(tsize >= minsize_for_tree_index(idx));
assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
do { /* traverse through chain of same-sized nodes */
do_check_any_chunk(m, ((mchunkptr)u));
assert(u->index == tindex);
assert(chunksize(u) == tsize);
assert(!is_inuse(u));
assert(!next_pinuse(u));
assert(u->fd->bk == u);
assert(u->bk->fd == u);
if (u->parent == 0) {
assert(u->child[0] == 0);
assert(u->child[1] == 0);
}
else {
assert(head == 0); /* only one node on chain has parent */
head = u;
assert(u->parent != u);
assert (u->parent->child[0] == u ||
u->parent->child[1] == u ||
*((tbinptr*)(u->parent)) == u);
if (u->child[0] != 0) {
assert(u->child[0]->parent == u);
assert(u->child[0] != u);
do_check_tree(m, u->child[0]);
}
if (u->child[1] != 0) {
assert(u->child[1]->parent == u);
assert(u->child[1] != u);
do_check_tree(m, u->child[1]);
}
if (u->child[0] != 0 && u->child[1] != 0) {
assert(chunksize(u->child[0]) < chunksize(u->child[1]));
}
}
u = u->fd;
} while (u != t);
assert(head != 0);
}
/* Check all the chunks in a treebin. */
static void do_check_treebin(mstate m, bindex_t i) {
tbinptr* tb = treebin_at(m, i);
tchunkptr t = *tb;
int empty = (m->treemap & (1U << i)) == 0;
if (t == 0)
assert(empty);
if (!empty)
do_check_tree(m, t);
}
/* Check all the chunks in a smallbin. */
static void do_check_smallbin(mstate m, bindex_t i) {
sbinptr b = smallbin_at(m, i);
mchunkptr p = b->bk;
unsigned int empty = (m->smallmap & (1U << i)) == 0;
if (p == b)
assert(empty);
if (!empty) {
for (; p != b; p = p->bk) {
size_t size = chunksize(p);
mchunkptr q;
/* each chunk claims to be free */
do_check_free_chunk(m, p);
/* chunk belongs in bin */
assert(small_index(size) == i);
assert(p->bk == b || chunksize(p->bk) == chunksize(p));
/* chunk is followed by an inuse chunk */
q = next_chunk(p);
if (q->head != FENCEPOST_HEAD)
do_check_inuse_chunk(m, q);
}
}
}
/* Find x in a bin. Used in other check functions. */
static int bin_find(mstate m, mchunkptr x) {
size_t size = chunksize(x);
if (is_small(size)) {
bindex_t sidx = small_index(size);
sbinptr b = smallbin_at(m, sidx);
if (smallmap_is_marked(m, sidx)) {
mchunkptr p = b;
do {
if (p == x)
return 1;
} while ((p = p->fd) != b);
}
}
else {
bindex_t tidx;
compute_tree_index(size, tidx);
if (treemap_is_marked(m, tidx)) {
tchunkptr t = *treebin_at(m, tidx);
size_t sizebits = size << leftshift_for_tree_index(tidx);
while (t != 0 && chunksize(t) != size) {
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
sizebits <<= 1;
}
if (t != 0) {
tchunkptr u = t;
do {
if (u == (tchunkptr)x)
return 1;
} while ((u = u->fd) != t);
}
}
}
return 0;
}
/* Traverse each chunk and check it; return total */
static size_t traverse_and_check(mstate m) {
size_t sum = 0;
if (is_initialized(m)) {
msegmentptr s = &m->seg;
sum += m->topsize + TOP_FOOT_SIZE;
while (s != 0) {
mchunkptr q = align_as_chunk(s->base);
mchunkptr lastq = 0;
assert(pinuse(q));
while (segment_holds(s, q) &&
q != m->top && q->head != FENCEPOST_HEAD) {
sum += chunksize(q);
if (is_inuse(q)) {
assert(!bin_find(m, q));
do_check_inuse_chunk(m, q);
}
else {
assert(q == m->dv || bin_find(m, q));
assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
do_check_free_chunk(m, q);
}
lastq = q;
q = next_chunk(q);
}
s = s->next;
}
}
return sum;
}
/* Check all properties of malloc_state. */
static void do_check_malloc_state(mstate m) {
bindex_t i;
size_t total;
/* check bins */
for (i = 0; i < NSMALLBINS; ++i)
do_check_smallbin(m, i);
for (i = 0; i < NTREEBINS; ++i)
do_check_treebin(m, i);
if (m->dvsize != 0) { /* check dv chunk */
do_check_any_chunk(m, m->dv);
assert(m->dvsize == chunksize(m->dv));
assert(m->dvsize >= MIN_CHUNK_SIZE);
assert(bin_find(m, m->dv) == 0);
}
if (m->top != 0) { /* check top chunk */
do_check_top_chunk(m, m->top);
/*assert(m->topsize == chunksize(m->top)); redundant */
assert(m->topsize > 0);
assert(bin_find(m, m->top) == 0);
}
total = traverse_and_check(m);
assert(total <= m->footprint);
assert(m->footprint <= m->max_footprint);
}
#endif /* DEBUG */
/* ----------------------------- statistics ------------------------------ */
#if !NO_MALLINFO
static struct mallinfo internal_mallinfo(mstate m) {
struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
ensure_initialization();
if (!PREACTION(m)) {
check_malloc_state(m);
if (is_initialized(m)) {
size_t nfree = SIZE_T_ONE; /* top always free */
size_t mfree = m->topsize + TOP_FOOT_SIZE;
size_t sum = mfree;
msegmentptr s = &m->seg;
while (s != 0) {
mchunkptr q = align_as_chunk(s->base);
while (segment_holds(s, q) &&
q != m->top && q->head != FENCEPOST_HEAD) {
size_t sz = chunksize(q);
sum += sz;
if (!is_inuse(q)) {
mfree += sz;
++nfree;
}
q = next_chunk(q);
}
s = s->next;
}
nm.arena = sum;
nm.ordblks = nfree;
nm.hblkhd = m->footprint - sum;
nm.usmblks = m->max_footprint;
nm.uordblks = m->footprint - mfree;
nm.fordblks = mfree;
nm.keepcost = m->topsize;
}
POSTACTION(m);
}
return nm;
}
#endif /* !NO_MALLINFO */
#if !NO_MALLOC_STATS
static void internal_malloc_stats(mstate m) {
ensure_initialization();
if (!PREACTION(m)) {
size_t maxfp = 0;
size_t fp = 0;
size_t used = 0;
check_malloc_state(m);
if (is_initialized(m)) {
msegmentptr s = &m->seg;
maxfp = m->max_footprint;
fp = m->footprint;
used = fp - (m->topsize + TOP_FOOT_SIZE);
while (s != 0) {
mchunkptr q = align_as_chunk(s->base);
while (segment_holds(s, q) &&
q != m->top && q->head != FENCEPOST_HEAD) {
if (!is_inuse(q))
used -= chunksize(q);
q = next_chunk(q);
}
s = s->next;
}
}
POSTACTION(m); /* drop lock */
fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
}
}
#endif /* NO_MALLOC_STATS */
/* ----------------------- Operations on smallbins ----------------------- */
/*
Various forms of linking and unlinking are defined as macros. Even
the ones for trees, which are very long but have very short typical
paths. This is ugly but reduces reliance on inlining support of
compilers.
*/
/* Link a free chunk into a smallbin */
#define insert_small_chunk(M, P, S) {\
bindex_t I = small_index(S);\
mchunkptr B = smallbin_at(M, I);\
mchunkptr F = B;\
assert(S >= MIN_CHUNK_SIZE);\
if (!smallmap_is_marked(M, I))\
mark_smallmap(M, I);\
else if (RTCHECK(ok_address(M, B->fd)))\
F = B->fd;\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
B->fd = P;\
F->bk = P;\
P->fd = F;\
P->bk = B;\
}
/* Unlink a chunk from a smallbin */
#define unlink_small_chunk(M, P, S) {\
mchunkptr F = P->fd;\
mchunkptr B = P->bk;\
bindex_t I = small_index(S);\
assert(P != B);\
assert(P != F);\
assert(chunksize(P) == small_index2size(I));\
if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
if (B == F) {\
clear_smallmap(M, I);\
}\
else if (RTCHECK(B == smallbin_at(M,I) ||\
(ok_address(M, B) && B->fd == P))) {\
F->bk = B;\
B->fd = F;\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
}
/* Unlink the first chunk from a smallbin */
#define unlink_first_small_chunk(M, B, P, I) {\
mchunkptr F = P->fd;\
assert(P != B);\
assert(P != F);\
assert(chunksize(P) == small_index2size(I));\
if (B == F) {\
clear_smallmap(M, I);\
}\
else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
F->bk = B;\
B->fd = F;\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
}
/* Replace dv node, binning the old one */
/* Used only when dvsize known to be small */
#define replace_dv(M, P, S) {\
size_t DVS = M->dvsize;\
assert(is_small(DVS));\
if (DVS != 0) {\
mchunkptr DV = M->dv;\
insert_small_chunk(M, DV, DVS);\
}\
M->dvsize = S;\
M->dv = P;\
}
/* ------------------------- Operations on trees ------------------------- */
/* Insert chunk into tree */
#define insert_large_chunk(M, X, S) {\
tbinptr* H;\
bindex_t I;\
compute_tree_index(S, I);\
H = treebin_at(M, I);\
X->index = I;\
X->child[0] = X->child[1] = 0;\
if (!treemap_is_marked(M, I)) {\
mark_treemap(M, I);\
*H = X;\
X->parent = (tchunkptr)H;\
X->fd = X->bk = X;\
}\
else {\
tchunkptr T = *H;\
size_t K = S << leftshift_for_tree_index(I);\
for (;;) {\
if (chunksize(T) != S) {\
tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
K <<= 1;\
if (*C != 0)\
T = *C;\
else if (RTCHECK(ok_address(M, C))) {\
*C = X;\
X->parent = T;\
X->fd = X->bk = X;\
break;\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
break;\
}\
}\
else {\
tchunkptr F = T->fd;\
if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
T->fd = F->bk = X;\
X->fd = F;\
X->bk = T;\
X->parent = 0;\
break;\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
break;\
}\
}\
}\
}\
}
/*
Unlink steps:
1. If x is a chained node, unlink it from its same-sized fd/bk links
and choose its bk node as its replacement.
2. If x was the last node of its size, but not a leaf node, it must
be replaced with a leaf node (not merely one with an open left or
right), to make sure that lefts and rights of descendents
correspond properly to bit masks. We use the rightmost descendent
of x. We could use any other leaf, but this is easy to locate and
tends to counteract removal of leftmosts elsewhere, and so keeps
paths shorter than minimally guaranteed. This doesn't loop much
because on average a node in a tree is near the bottom.
3. If x is the base of a chain (i.e., has parent links) relink
x's parent and children to x's replacement (or null if none).
*/
#define unlink_large_chunk(M, X) {\
tchunkptr XP = X->parent;\
tchunkptr R;\
if (X->bk != X) {\
tchunkptr F = X->fd;\
R = X->bk;\
if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
F->bk = R;\
R->fd = F;\
}\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
}\
else {\
tchunkptr* RP;\
if (((R = *(RP = &(X->child[1]))) != 0) ||\
((R = *(RP = &(X->child[0]))) != 0)) {\
tchunkptr* CP;\
while ((*(CP = &(R->child[1])) != 0) ||\
(*(CP = &(R->child[0])) != 0)) {\
R = *(RP = CP);\
}\
if (RTCHECK(ok_address(M, RP)))\
*RP = 0;\
else {\
CORRUPTION_ERROR_ACTION(M);\
}\
}\
}\
if (XP != 0) {\
tbinptr* H = treebin_at(M, X->index);\
if (X == *H) {\
if ((*H = R) == 0) \
clear_treemap(M, X->index);\
}\
else if (RTCHECK(ok_address(M, XP))) {\
if (XP->child[0] == X) \
XP->child[0] = R;\
else \
XP->child[1] = R;\
}\
else\
CORRUPTION_ERROR_ACTION(M);\
if (R != 0) {\
if (RTCHECK(ok_address(M, R))) {\
tchunkptr C0, C1;\
R->parent = XP;\
if ((C0 = X->child[0]) != 0) {\
if (RTCHECK(ok_address(M, C0))) {\
R->child[0] = C0;\
C0->parent = R;\
}\
else\
CORRUPTION_ERROR_ACTION(M);\
}\
if ((C1 = X->child[1]) != 0) {\
if (RTCHECK(ok_address(M, C1))) {\
R->child[1] = C1;\
C1->parent = R;\
}\
else\
CORRUPTION_ERROR_ACTION(M);\
}\
}\
else\
CORRUPTION_ERROR_ACTION(M);\
}\
}\
}
/* Relays to large vs small bin operations */
#define insert_chunk(M, P, S)\
if (is_small(S)) insert_small_chunk(M, P, S)\
else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
#define unlink_chunk(M, P, S)\
if (is_small(S)) unlink_small_chunk(M, P, S)\
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
/* Relays to internal calls to malloc/free from realloc, memalign etc */
#if ONLY_MSPACES
#define internal_malloc(m, b) mspace_malloc(m, b)
#define internal_free(m, mem) mspace_free(m,mem);
#else /* ONLY_MSPACES */
#if MSPACES
#define internal_malloc(m, b)\
((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
#define internal_free(m, mem)\
if (m == gm) dlfree(mem); else mspace_free(m,mem);
#else /* MSPACES */
#define internal_malloc(m, b) dlmalloc(b)
#define internal_free(m, mem) dlfree(mem)
#endif /* MSPACES */
#endif /* ONLY_MSPACES */
/* ----------------------- Direct-mmapping chunks ----------------------- */
/*
Directly mmapped chunks are set up with an offset to the start of
the mmapped region stored in the prev_foot field of the chunk. This
allows reconstruction of the required argument to MUNMAP when freed,
and also allows adjustment of the returned chunk to meet alignment
requirements (especially in memalign).
*/
/* Malloc using mmap */
static void* mmap_alloc(mstate m, size_t nb) {
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
if (m->footprint_limit != 0) {
size_t fp = m->footprint + mmsize;
if (fp <= m->footprint || fp > m->footprint_limit)
return 0;
}
if (mmsize > nb) { /* Check for wrap around 0 */
char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
if (mm != CMFAIL) {
size_t offset = align_offset(chunk2mem(mm));
size_t psize = mmsize - offset - MMAP_FOOT_PAD;
mchunkptr p = (mchunkptr)(mm + offset);
p->prev_foot = offset;
p->head = psize;
mark_inuse_foot(m, p, psize);
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
if (m->least_addr == 0 || mm < m->least_addr)
m->least_addr = mm;
if ((m->footprint += mmsize) > m->max_footprint)
m->max_footprint = m->footprint;
assert(is_aligned(chunk2mem(p)));
check_mmapped_chunk(m, p);
return chunk2mem(p);
}
}
return 0;
}
/* Realloc using mmap */
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
size_t oldsize = chunksize(oldp);
(void)flags; /* placate people compiling -Wunused */
if (is_small(nb)) /* Can't shrink mmap regions below small size */
return 0;
/* Keep old chunk if big enough but not too big */
if (oldsize >= nb + SIZE_T_SIZE &&
(oldsize - nb) <= (mparams.granularity << 1))
return oldp;
else {
size_t offset = oldp->prev_foot;
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
oldmmsize, newmmsize, flags);
if (cp != CMFAIL) {
mchunkptr newp = (mchunkptr)(cp + offset);
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
newp->head = psize;
mark_inuse_foot(m, newp, psize);
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
if (cp < m->least_addr)
m->least_addr = cp;
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
m->max_footprint = m->footprint;
check_mmapped_chunk(m, newp);
return newp;
}
}
return 0;
}
/* -------------------------- mspace management -------------------------- */
/* Initialize top chunk and its size */
static void init_top(mstate m, mchunkptr p, size_t psize) {
/* Ensure alignment */
size_t offset = align_offset(chunk2mem(p));
p = (mchunkptr)((char*)p + offset);
psize -= offset;
m->top = p;
m->topsize = psize;
p->head = psize | PINUSE_BIT;
/* set size of fake trailing chunk holding overhead space only once */
chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
m->trim_check = mparams.trim_threshold; /* reset on each update */
}
/* Initialize bins for a new mstate that is otherwise zeroed out */
static void init_bins(mstate m) {
/* Establish circular links for smallbins */
bindex_t i;
for (i = 0; i < NSMALLBINS; ++i) {
sbinptr bin = smallbin_at(m,i);
bin->fd = bin->bk = bin;
}
}
#if PROCEED_ON_ERROR
/* default corruption action */
static void reset_on_error(mstate m) {
int i;
++malloc_corruption_error_count;
/* Reinitialize fields to forget about all memory */
m->smallmap = m->treemap = 0;
m->dvsize = m->topsize = 0;
m->seg.base = 0;
m->seg.size = 0;
m->seg.next = 0;
m->top = m->dv = 0;
for (i = 0; i < NTREEBINS; ++i)
*treebin_at(m, i) = 0;
init_bins(m);
}
#endif /* PROCEED_ON_ERROR */
/* Allocate chunk and prepend remainder with chunk in successor base. */
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
size_t nb) {
mchunkptr p = align_as_chunk(newbase);
mchunkptr oldfirst = align_as_chunk(oldbase);
size_t psize = (char*)oldfirst - (char*)p;
mchunkptr q = chunk_plus_offset(p, nb);
size_t qsize = psize - nb;
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
assert((char*)oldfirst > (char*)q);
assert(pinuse(oldfirst));
assert(qsize >= MIN_CHUNK_SIZE);
/* consolidate remainder with first chunk of old base */
if (oldfirst == m->top) {
size_t tsize = m->topsize += qsize;
m->top = q;
q->head = tsize | PINUSE_BIT;
check_top_chunk(m, q);
}
else if (oldfirst == m->dv) {
size_t dsize = m->dvsize += qsize;
m->dv = q;
set_size_and_pinuse_of_free_chunk(q, dsize);
}
else {
if (!is_inuse(oldfirst)) {
size_t nsize = chunksize(oldfirst);
unlink_chunk(m, oldfirst, nsize);
oldfirst = chunk_plus_offset(oldfirst, nsize);
qsize += nsize;
}
set_free_with_pinuse(q, qsize, oldfirst);
insert_chunk(m, q, qsize);
check_free_chunk(m, q);
}
check_malloced_chunk(m, chunk2mem(p), nb);
return chunk2mem(p);
}
/* Add a segment to hold a new noncontiguous region */
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
/* Determine locations and sizes of segment, fenceposts, old top */
char* old_top = (char*)m->top;
msegmentptr oldsp = segment_holding(m, old_top);
char* old_end = oldsp->base + oldsp->size;
size_t ssize = pad_request(sizeof(struct malloc_segment));
char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
size_t offset = align_offset(chunk2mem(rawsp));
char* asp = rawsp + offset;
char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
mchunkptr sp = (mchunkptr)csp;
msegmentptr ss = (msegmentptr)(chunk2mem(sp));
mchunkptr tnext = chunk_plus_offset(sp, ssize);
mchunkptr p = tnext;
int nfences = 0;
/* reset top to new space */
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
/* Set up segment record */
assert(is_aligned(ss));
set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
*ss = m->seg; /* Push current record */
m->seg.base = tbase;
m->seg.size = tsize;
m->seg.sflags = mmapped;
m->seg.next = ss;
/* Insert trailing fenceposts */
for (;;) {
mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
p->head = FENCEPOST_HEAD;
++nfences;
if ((char*)(&(nextp->head)) < old_end)
p = nextp;
else
break;
}
assert(nfences >= 2);
/* Insert the rest of old top into a bin as an ordinary free chunk */
if (csp != old_top) {
mchunkptr q = (mchunkptr)old_top;
size_t psize = csp - old_top;
mchunkptr tn = chunk_plus_offset(q, psize);
set_free_with_pinuse(q, psize, tn);
insert_chunk(m, q, psize);
}
check_top_chunk(m, m->top);
}
/* -------------------------- System allocation -------------------------- */
/* Get memory from system using MORECORE or MMAP */
static void* sys_alloc(mstate m, size_t nb) {
char* tbase = CMFAIL;
size_t tsize = 0;
flag_t mmap_flag = 0;
size_t asize; /* allocation size */
ensure_initialization();
/* Directly map large chunks, but only if already initialized */
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
void* mem = mmap_alloc(m, nb);
if (mem != 0)
return mem;
}
asize = granularity_align(nb + SYS_ALLOC_PADDING);
if (asize <= nb)
return 0; /* wraparound */
if (m->footprint_limit != 0) {
size_t fp = m->footprint + asize;
if (fp <= m->footprint || fp > m->footprint_limit)
return 0;
}
/*
Try getting memory in any of three ways (in most-preferred to
least-preferred order):
1. A call to MORECORE that can normally contiguously extend memory.
(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
or main space is mmapped or a previous contiguous call failed)
2. A call to MMAP new space (disabled if not HAVE_MMAP).
Note that under the default settings, if MORECORE is unable to
fulfill a request, and HAVE_MMAP is true, then mmap is
used as a noncontiguous system allocator. This is a useful backup
strategy for systems with holes in address spaces -- in this case
sbrk cannot contiguously expand the heap, but mmap may be able to
find space.
3. A call to MORECORE that cannot usually contiguously extend memory.
(disabled if not HAVE_MORECORE)
In all cases, we need to request enough bytes from system to ensure
we can malloc nb bytes upon success, so pad with enough space for
top_foot, plus alignment-pad to make sure we don't lose bytes if
not on boundary, and round this up to a granularity unit.
*/
if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
char* br = CMFAIL;
size_t ssize = asize; /* sbrk call size */
msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
ACQUIRE_MALLOC_GLOBAL_LOCK();
if (ss == 0) { /* First time through or recovery */
char* base = (char*)CALL_MORECORE(0);
if (base != CMFAIL) {
size_t fp;
/* Adjust to end on a page boundary */
if (!is_page_aligned(base))
ssize += (page_align((size_t)base) - (size_t)base);
fp = m->footprint + ssize; /* recheck limits */
if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
(m->footprint_limit == 0 ||
(fp > m->footprint && fp <= m->footprint_limit)) &&
(br = (char*)(CALL_MORECORE(ssize))) == base) {
tbase = base;
tsize = ssize;
}
}
}
else {
/* Subtract out existing available top space from MORECORE request. */
ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
/* Use mem here only if it did continuously extend old space */
if (ssize < HALF_MAX_SIZE_T &&
(br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
tbase = br;
tsize = ssize;
}
}
if (tbase == CMFAIL) { /* Cope with partial failure */
if (br != CMFAIL) { /* Try to use/extend the space we did get */
if (ssize < HALF_MAX_SIZE_T &&
ssize < nb + SYS_ALLOC_PADDING) {
size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
if (esize < HALF_MAX_SIZE_T) {
char* end = (char*)CALL_MORECORE(esize);
if (end != CMFAIL)
ssize += esize;
else { /* Can't use; try to release */
(void) CALL_MORECORE(-ssize);
br = CMFAIL;
}
}
}
}
if (br != CMFAIL) { /* Use the space we did get */
tbase = br;
tsize = ssize;
}
else
disable_contiguous(m); /* Don't try contiguous path in the future */
}
RELEASE_MALLOC_GLOBAL_LOCK();
}
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
char* mp = (char*)(CALL_MMAP(asize));
if (mp != CMFAIL) {
tbase = mp;
tsize = asize;
mmap_flag = USE_MMAP_BIT;
}
}
if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
if (asize < HALF_MAX_SIZE_T) {
char* br = CMFAIL;
char* end = CMFAIL;
ACQUIRE_MALLOC_GLOBAL_LOCK();
br = (char*)(CALL_MORECORE(asize));
end = (char*)(CALL_MORECORE(0));
RELEASE_MALLOC_GLOBAL_LOCK();
if (br != CMFAIL && end != CMFAIL && br < end) {
size_t ssize = end - br;
if (ssize > nb + TOP_FOOT_SIZE) {
tbase = br;
tsize = ssize;
}
}
}
}
if (tbase != CMFAIL) {
if ((m->footprint += tsize) > m->max_footprint)
m->max_footprint = m->footprint;
if (!is_initialized(m)) { /* first-time initialization */
if (m->least_addr == 0 || tbase < m->least_addr)
m->least_addr = tbase;
m->seg.base = tbase;
m->seg.size = tsize;
m->seg.sflags = mmap_flag;
m->magic = mparams.magic;
m->release_checks = MAX_RELEASE_CHECK_RATE;
init_bins(m);
#if !ONLY_MSPACES
if (is_global(m))
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
else
#endif
{
/* Offset top by embedded malloc_state */
mchunkptr mn = next_chunk(mem2chunk(m));
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
}
}
else {
/* Try to merge with an existing segment */
msegmentptr sp = &m->seg;
/* Only consider most recent segment if traversal suppressed */
while (sp != 0 && tbase != sp->base + sp->size)
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
if (sp != 0 &&
!is_extern_segment(sp) &&
(sp->sflags & USE_MMAP_BIT) == mmap_flag &&
segment_holds(sp, m->top)) { /* append */
sp->size += tsize;
init_top(m, m->top, m->topsize + tsize);
}
else {
if (tbase < m->least_addr)
m->least_addr = tbase;
sp = &m->seg;
while (sp != 0 && sp->base != tbase + tsize)
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
if (sp != 0 &&
!is_extern_segment(sp) &&
(sp->sflags & USE_MMAP_BIT) == mmap_flag) {
char* oldbase = sp->base;
sp->base = tbase;
sp->size += tsize;
return prepend_alloc(m, tbase, oldbase, nb);
}
else
add_segment(m, tbase, tsize, mmap_flag);
}
}
if (nb < m->topsize) { /* Allocate from new or extended top space */
size_t rsize = m->topsize -= nb;
mchunkptr p = m->top;
mchunkptr r = m->top = chunk_plus_offset(p, nb);
r->head = rsize | PINUSE_BIT;
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
check_top_chunk(m, m->top);
check_malloced_chunk(m, chunk2mem(p), nb);
return chunk2mem(p);
}
}
MALLOC_FAILURE_ACTION;
return 0;
}
/* ----------------------- system deallocation -------------------------- */
/* Unmap and unlink any mmapped segments that don't contain used chunks */
static size_t release_unused_segments(mstate m) {
size_t released = 0;
int nsegs = 0;
msegmentptr pred = &m->seg;
msegmentptr sp = pred->next;
while (sp != 0) {
char* base = sp->base;
size_t size = sp->size;
msegmentptr next = sp->next;
++nsegs;
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
mchunkptr p = align_as_chunk(base);
size_t psize = chunksize(p);
/* Can unmap if first chunk holds entire segment and not pinned */
if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
tchunkptr tp = (tchunkptr)p;
assert(segment_holds(sp, (char*)sp));
if (p == m->dv) {
m->dv = 0;
m->dvsize = 0;
}
else {
unlink_large_chunk(m, tp);
}
if (CALL_MUNMAP(base, size) == 0) {
released += size;
m->footprint -= size;
/* unlink obsoleted record */
sp = pred;
sp->next = next;
}
else { /* back out if cannot unmap */
insert_large_chunk(m, tp, psize);
}
}
}
if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
break;
pred = sp;
sp = next;
}
/* Reset check counter */
m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
(size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
return released;
}
static int sys_trim(mstate m, size_t pad) {
size_t released = 0;
ensure_initialization();
if (pad < MAX_REQUEST && is_initialized(m)) {
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
if (m->topsize > pad) {
/* Shrink top space in granularity-size units, keeping at least one */
size_t unit = mparams.granularity;
size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
SIZE_T_ONE) * unit;
msegmentptr sp = segment_holding(m, (char*)m->top);
if (!is_extern_segment(sp)) {
if (is_mmapped_segment(sp)) {
if (HAVE_MMAP &&
sp->size >= extra &&
!has_segment_link(m, sp)) { /* can't shrink if pinned */
size_t newsize = sp->size - extra;
(void)newsize; /* placate people compiling -Wunused-variable */
/* Prefer mremap, fall back to munmap */
if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
(CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
released = extra;
}
}
}
else if (HAVE_MORECORE) {
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
ACQUIRE_MALLOC_GLOBAL_LOCK();
{
/* Make sure end of memory is where we last set it. */
char* old_br = (char*)(CALL_MORECORE(0));
if (old_br == sp->base + sp->size) {
char* rel_br = (char*)(CALL_MORECORE(-extra));
char* new_br = (char*)(CALL_MORECORE(0));
if (rel_br != CMFAIL && new_br < old_br)
released = old_br - new_br;
}
}
RELEASE_MALLOC_GLOBAL_LOCK();
}
}
if (released != 0) {
sp->size -= released;
m->footprint -= released;
init_top(m, m->top, m->topsize - released);
check_top_chunk(m, m->top);
}
}
/* Unmap any unused mmapped segments */
if (HAVE_MMAP)
released += release_unused_segments(m);
/* On failure, disable autotrim to avoid repeated failed future calls */
if (released == 0 && m->topsize > m->trim_check)
m->trim_check = MAX_SIZE_T;
}
return (released != 0)? 1 : 0;
}
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
mchunkptr prev;
size_t prevsize = p->prev_foot;
if (is_mmapped(p)) {
psize += prevsize + MMAP_FOOT_PAD;
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
m->footprint -= psize;
return;
}
prev = chunk_minus_offset(p, prevsize);
psize += prevsize;
p = prev;
if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
if (p != m->dv) {
unlink_chunk(m, p, prevsize);
}
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
m->dvsize = psize;
set_free_with_pinuse(p, psize, next);
return;
}
}
else {
CORRUPTION_ERROR_ACTION(m);
return;
}
}
if (RTCHECK(ok_address(m, next))) {
if (!cinuse(next)) { /* consolidate forward */
if (next == m->top) {
size_t tsize = m->topsize += psize;
m->top = p;
p->head = tsize | PINUSE_BIT;
if (p == m->dv) {
m->dv = 0;
m->dvsize = 0;
}
return;
}
else if (next == m->dv) {
size_t dsize = m->dvsize += psize;
m->dv = p;
set_size_and_pinuse_of_free_chunk(p, dsize);
return;
}
else {
size_t nsize = chunksize(next);
psize += nsize;
unlink_chunk(m, next, nsize);
set_size_and_pinuse_of_free_chunk(p, psize);
if (p == m->dv) {
m->dvsize = psize;
return;
}
}
}
else {
set_free_with_pinuse(p, psize, next);
}
insert_chunk(m, p, psize);
}
else {
CORRUPTION_ERROR_ACTION(m);
}
}
/* ---------------------------- malloc --------------------------- */
/* allocate a large request from the best fitting chunk in a treebin */
static void* tmalloc_large(mstate m, size_t nb) {
tchunkptr v = 0;
size_t rsize = -nb; /* Unsigned negation */
tchunkptr t;
bindex_t idx;
compute_tree_index(nb, idx);
if ((t = *treebin_at(m, idx)) != 0) {
/* Traverse tree for this bin looking for node with size == nb */
size_t sizebits = nb << leftshift_for_tree_index(idx);
tchunkptr rst = 0; /* The deepest untaken right subtree */
for (;;) {
tchunkptr rt;
size_t trem = chunksize(t) - nb;
if (trem < rsize) {
v = t;
if ((rsize = trem) == 0)
break;
}
rt = t->child[1];
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
if (rt != 0 && rt != t)
rst = rt;
if (t == 0) {
t = rst; /* set t to least subtree holding sizes > nb */
break;
}
sizebits <<= 1;
}
}
if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
if (leftbits != 0) {
bindex_t i;
binmap_t leastbit = least_bit(leftbits);
compute_bit2idx(leastbit, i);
t = *treebin_at(m, i);
}
}
while (t != 0) { /* find smallest of tree or subtree */
size_t trem = chunksize(t) - nb;
if (trem < rsize) {
rsize = trem;
v = t;
}
t = leftmost_child(t);
}
/* If dv is a better fit, return 0 so malloc will use it */
if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
if (RTCHECK(ok_address(m, v))) { /* split */
mchunkptr r = chunk_plus_offset(v, nb);
assert(chunksize(v) == rsize + nb);
if (RTCHECK(ok_next(v, r))) {
unlink_large_chunk(m, v);
if (rsize < MIN_CHUNK_SIZE)
set_inuse_and_pinuse(m, v, (rsize + nb));
else {
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
insert_chunk(m, r, rsize);
}
return chunk2mem(v);
}
}
CORRUPTION_ERROR_ACTION(m);
}
return 0;
}
/* allocate a small request from the best fitting chunk in a treebin */
static void* tmalloc_small(mstate m, size_t nb) {
tchunkptr t, v;
size_t rsize;
bindex_t i;
binmap_t leastbit = least_bit(m->treemap);
compute_bit2idx(leastbit, i);
v = t = *treebin_at(m, i);
rsize = chunksize(t) - nb;
while ((t = leftmost_child(t)) != 0) {
size_t trem = chunksize(t) - nb;
if (trem < rsize) {
rsize = trem;
v = t;
}
}
if (RTCHECK(ok_address(m, v))) {
mchunkptr r = chunk_plus_offset(v, nb);
assert(chunksize(v) == rsize + nb);
if (RTCHECK(ok_next(v, r))) {
unlink_large_chunk(m, v);
if (rsize < MIN_CHUNK_SIZE)
set_inuse_and_pinuse(m, v, (rsize + nb));
else {
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
replace_dv(m, r, rsize);
}
return chunk2mem(v);
}
}
CORRUPTION_ERROR_ACTION(m);
return 0;
}
#if !ONLY_MSPACES
void* dlmalloc(size_t bytes) {
/*
Basic algorithm:
If a small request (< 256 bytes minus per-chunk overhead):
1. If one exists, use a remainderless chunk in associated smallbin.
(Remainderless means that there are too few excess bytes to
represent as a chunk.)
2. If it is big enough, use the dv chunk, which is normally the
chunk adjacent to the one used for the most recent small request.
3. If one exists, split the smallest available chunk in a bin,
saving remainder in dv.
4. If it is big enough, use the top chunk.
5. If available, get memory from system and use it
Otherwise, for a large request:
1. Find the smallest available binned chunk that fits, and use it
if it is better fitting than dv chunk, splitting if necessary.
2. If better fitting than any binned chunk, use the dv chunk.
3. If it is big enough, use the top chunk.
4. If request size >= mmap threshold, try to directly mmap this chunk.
5. If available, get memory from system and use it
The ugly goto's here ensure that postaction occurs along all paths.
*/
#if USE_LOCKS
ensure_initialization(); /* initialize in sys_alloc if not using locks */
#endif
if (!PREACTION(gm)) {
void* mem;
size_t nb;
if (bytes <= MAX_SMALL_REQUEST) {
bindex_t idx;
binmap_t smallbits;
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
idx = small_index(nb);
smallbits = gm->smallmap >> idx;
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
mchunkptr b, p;
idx += ~smallbits & 1; /* Uses next bin if idx empty */
b = smallbin_at(gm, idx);
p = b->fd;
assert(chunksize(p) == small_index2size(idx));
unlink_first_small_chunk(gm, b, p, idx);
set_inuse_and_pinuse(gm, p, small_index2size(idx));
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
else if (nb > gm->dvsize) {
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
mchunkptr b, p, r;
size_t rsize;
bindex_t i;
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
binmap_t leastbit = least_bit(leftbits);
compute_bit2idx(leastbit, i);
b = smallbin_at(gm, i);
p = b->fd;
assert(chunksize(p) == small_index2size(i));
unlink_first_small_chunk(gm, b, p, i);
rsize = small_index2size(i) - nb;
/* Fit here cannot be remainderless if 4byte sizes */
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
set_inuse_and_pinuse(gm, p, small_index2size(i));
else {
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
r = chunk_plus_offset(p, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
replace_dv(gm, r, rsize);
}
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
}
}
else if (bytes >= MAX_REQUEST)
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
else {
nb = pad_request(bytes);
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
}
if (nb <= gm->dvsize) {
size_t rsize = gm->dvsize - nb;
mchunkptr p = gm->dv;
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
gm->dvsize = rsize;
set_size_and_pinuse_of_free_chunk(r, rsize);
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
}
else { /* exhaust dv */
size_t dvs = gm->dvsize;
gm->dvsize = 0;
gm->dv = 0;
set_inuse_and_pinuse(gm, p, dvs);
}
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
else if (nb < gm->topsize) { /* Split top */
size_t rsize = gm->topsize -= nb;
mchunkptr p = gm->top;
mchunkptr r = gm->top = chunk_plus_offset(p, nb);
r->head = rsize | PINUSE_BIT;
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
mem = chunk2mem(p);
check_top_chunk(gm, gm->top);
check_malloced_chunk(gm, mem, nb);
goto postaction;
}
mem = sys_alloc(gm, nb);
postaction:
POSTACTION(gm);
return mem;
}
return 0;
}
/* ---------------------------- free --------------------------- */
void dlfree(void* mem) {
/*
Consolidate freed chunks with preceeding or succeeding bordering
free chunks, if they exist, and then place in a bin. Intermixed
with special cases for top, dv, mmapped chunks, and usage errors.
*/
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
#if FOOTERS
mstate fm = get_mstate_for(p);
if (!ok_magic(fm)) {
USAGE_ERROR_ACTION(fm, p);
return;
}
#else /* FOOTERS */
#define fm gm
#endif /* FOOTERS */
if (!PREACTION(fm)) {
check_inuse_chunk(fm, p);
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
size_t psize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
size_t prevsize = p->prev_foot;
if (is_mmapped(p)) {
psize += prevsize + MMAP_FOOT_PAD;
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
fm->footprint -= psize;
goto postaction;
}
else {
mchunkptr prev = chunk_minus_offset(p, prevsize);
psize += prevsize;
p = prev;
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
if (p != fm->dv) {
unlink_chunk(fm, p, prevsize);
}
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
fm->dvsize = psize;
set_free_with_pinuse(p, psize, next);
goto postaction;
}
}
else
goto erroraction;
}
}
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
if (!cinuse(next)) { /* consolidate forward */
if (next == fm->top) {
size_t tsize = fm->topsize += psize;
fm->top = p;
p->head = tsize | PINUSE_BIT;
if (p == fm->dv) {
fm->dv = 0;
fm->dvsize = 0;
}
if (should_trim(fm, tsize))
sys_trim(fm, 0);
goto postaction;
}
else if (next == fm->dv) {
size_t dsize = fm->dvsize += psize;
fm->dv = p;
set_size_and_pinuse_of_free_chunk(p, dsize);
goto postaction;
}
else {
size_t nsize = chunksize(next);
psize += nsize;
unlink_chunk(fm, next, nsize);
set_size_and_pinuse_of_free_chunk(p, psize);
if (p == fm->dv) {
fm->dvsize = psize;
goto postaction;
}
}
}
else
set_free_with_pinuse(p, psize, next);
if (is_small(psize)) {
insert_small_chunk(fm, p, psize);
check_free_chunk(fm, p);
}
else {
tchunkptr tp = (tchunkptr)p;
insert_large_chunk(fm, tp, psize);
check_free_chunk(fm, p);
if (--fm->release_checks == 0)
release_unused_segments(fm);
}
goto postaction;
}
}
erroraction:
USAGE_ERROR_ACTION(fm, p);
postaction:
POSTACTION(fm);
}
}
#if !FOOTERS
#undef fm
#endif /* FOOTERS */
}
void* dlcalloc(size_t n_elements, size_t elem_size) {
void* mem;
size_t req = 0;
if (n_elements != 0) {
req = n_elements * elem_size;
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
(req / n_elements != elem_size))
req = MAX_SIZE_T; /* force downstream failure on overflow */
}
mem = dlmalloc(req);
if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
memset(mem, 0, req);
return mem;
}
#endif /* !ONLY_MSPACES */
/* ------------ Internal support for realloc, memalign, etc -------------- */
/* Try to realloc; only in-place unless can_move true */
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
int can_move) {
mchunkptr newp = 0;
size_t oldsize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, oldsize);
if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
ok_next(p, next) && ok_pinuse(next))) {
if (is_mmapped(p)) {
newp = mmap_resize(m, p, nb, can_move);
}
else if (oldsize >= nb) { /* already big enough */
size_t rsize = oldsize - nb;
if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dispose_chunk(m, r, rsize);
}
newp = p;
}
else if (next == m->top) { /* extend into top */
if (oldsize + m->topsize > nb) {
size_t newsize = oldsize + m->topsize;
size_t newtopsize = newsize - nb;
mchunkptr newtop = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
newtop->head = newtopsize |PINUSE_BIT;
m->top = newtop;
m->topsize = newtopsize;
newp = p;
}
}
else if (next == m->dv) { /* extend into dv */
size_t dvs = m->dvsize;
if (oldsize + dvs >= nb) {
size_t dsize = oldsize + dvs - nb;
if (dsize >= MIN_CHUNK_SIZE) {
mchunkptr r = chunk_plus_offset(p, nb);
mchunkptr n = chunk_plus_offset(r, dsize);
set_inuse(m, p, nb);
set_size_and_pinuse_of_free_chunk(r, dsize);
clear_pinuse(n);
m->dvsize = dsize;
m->dv = r;
}
else { /* exhaust dv */
size_t newsize = oldsize + dvs;
set_inuse(m, p, newsize);
m->dvsize = 0;
m->dv = 0;
}
newp = p;
}
}
else if (!cinuse(next)) { /* extend into next free chunk */
size_t nextsize = chunksize(next);
if (oldsize + nextsize >= nb) {
size_t rsize = oldsize + nextsize - nb;
unlink_chunk(m, next, nextsize);
if (rsize < MIN_CHUNK_SIZE) {
size_t newsize = oldsize + nextsize;
set_inuse(m, p, newsize);
}
else {
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dispose_chunk(m, r, rsize);
}
newp = p;
}
}
}
else {
USAGE_ERROR_ACTION(m, chunk2mem(p));
}
return newp;
}
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
alignment = MIN_CHUNK_SIZE;
if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
size_t a = MALLOC_ALIGNMENT << 1;
while (a < alignment) a <<= 1;
alignment = a;
}
if (bytes >= MAX_REQUEST - alignment) {
if (m != 0) { /* Test isn't needed but avoids compiler warning */
MALLOC_FAILURE_ACTION;
}
}
else {
size_t nb = request2size(bytes);
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
mem = internal_malloc(m, req);
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
if (PREACTION(m))
return 0;
if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
/*
Find an aligned spot inside chunk. Since we need to give
back leading space in a chunk of at least MIN_CHUNK_SIZE, if
the first calculation places us at a spot with less than
MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
We've allocated enough total room so that this is always
possible.
*/
char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
SIZE_T_ONE)) &
-alignment));
char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
br : br+alignment;
mchunkptr newp = (mchunkptr)pos;
size_t leadsize = pos - (char*)(p);
size_t newsize = chunksize(p) - leadsize;
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
newp->prev_foot = p->prev_foot + leadsize;
newp->head = newsize;
}
else { /* Otherwise, give back leader, use the rest */
set_inuse(m, newp, newsize);
set_inuse(m, p, leadsize);
dispose_chunk(m, p, leadsize);
}
p = newp;
}
/* Give back spare room at the end */
if (!is_mmapped(p)) {
size_t size = chunksize(p);
if (size > nb + MIN_CHUNK_SIZE) {
size_t remainder_size = size - nb;
mchunkptr remainder = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, remainder, remainder_size);
dispose_chunk(m, remainder, remainder_size);
}
}
mem = chunk2mem(p);
assert (chunksize(p) >= nb);
assert(((size_t)mem & (alignment - 1)) == 0);
check_inuse_chunk(m, p);
POSTACTION(m);
}
}
return mem;
}
/*
Common support for independent_X routines, handling
all of the combinations that can result.
The opts arg has:
bit 0 set if all elements are same size (using sizes[0])
bit 1 set if elements should be zeroed
*/
static void** ialloc(mstate m,
size_t n_elements,
size_t* sizes,
int opts,
void* chunks[]) {
size_t element_size; /* chunksize of each element, if all same */
size_t contents_size; /* total size of elements */
size_t array_size; /* request size of pointer array */
void* mem; /* malloced aggregate space */
mchunkptr p; /* corresponding chunk */
size_t remainder_size; /* remaining bytes while splitting */
void** marray; /* either "chunks" or malloced ptr array */
mchunkptr array_chunk; /* chunk for malloced ptr array */
flag_t was_enabled; /* to disable mmap */
size_t size;
size_t i;
ensure_initialization();
/* compute array length, if needed */
if (chunks != 0) {
if (n_elements == 0)
return chunks; /* nothing to do */
marray = chunks;
array_size = 0;
}
else {
/* if empty req, must still return chunk representing empty array */
if (n_elements == 0)
return (void**)internal_malloc(m, 0);
marray = 0;
array_size = request2size(n_elements * (sizeof(void*)));
}
/* compute total element size */
if (opts & 0x1) { /* all-same-size */
element_size = request2size(*sizes);
contents_size = n_elements * element_size;
}
else { /* add up all the sizes */
element_size = 0;
contents_size = 0;
for (i = 0; i != n_elements; ++i)
contents_size += request2size(sizes[i]);
}
size = contents_size + array_size;
/*
Allocate the aggregate chunk. First disable direct-mmapping so
malloc won't use it, since we would not be able to later
free/realloc space internal to a segregated mmap region.
*/
was_enabled = use_mmap(m);
disable_mmap(m);
mem = internal_malloc(m, size - CHUNK_OVERHEAD);
if (was_enabled)
enable_mmap(m);
if (mem == 0)
return 0;
if (PREACTION(m)) return 0;
p = mem2chunk(mem);
remainder_size = chunksize(p);
assert(!is_mmapped(p));
if (opts & 0x2) { /* optionally clear the elements */
memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
}
/* If not provided, allocate the pointer array as final part of chunk */
if (marray == 0) {
size_t array_chunk_size;
array_chunk = chunk_plus_offset(p, contents_size);
array_chunk_size = remainder_size - contents_size;
marray = (void**) (chunk2mem(array_chunk));
set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
remainder_size = contents_size;
}
/* split out elements */
for (i = 0; ; ++i) {
marray[i] = chunk2mem(p);
if (i != n_elements-1) {
if (element_size != 0)
size = element_size;
else
size = request2size(sizes[i]);
remainder_size -= size;
set_size_and_pinuse_of_inuse_chunk(m, p, size);
p = chunk_plus_offset(p, size);
}
else { /* the final element absorbs any overallocation slop */
set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
break;
}
}
#if DEBUG
if (marray != chunks) {
/* final element must have exactly exhausted chunk */
if (element_size != 0) {
assert(remainder_size == element_size);
}
else {
assert(remainder_size == request2size(sizes[i]));
}
check_inuse_chunk(m, mem2chunk(marray));
}
for (i = 0; i != n_elements; ++i)
check_inuse_chunk(m, mem2chunk(marray[i]));
#endif /* DEBUG */
POSTACTION(m);
return marray;
}
/* Try to free all pointers in the given array.
Note: this could be made faster, by delaying consolidation,
at the price of disabling some user integrity checks, We
still optimize some consolidations by combining adjacent
chunks before freeing, which will occur often if allocated
with ialloc or the array is sorted.
*/
static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
size_t unfreed = 0;
if (!PREACTION(m)) {
void** a;
void** fence = &(array[nelem]);
for (a = array; a != fence; ++a) {
void* mem = *a;
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
size_t psize = chunksize(p);
#if FOOTERS
if (get_mstate_for(p) != m) {
++unfreed;
continue;
}
#endif
check_inuse_chunk(m, p);
*a = 0;
if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
void ** b = a + 1; /* try to merge with next chunk */
mchunkptr next = next_chunk(p);
if (b != fence && *b == chunk2mem(next)) {
size_t newsize = chunksize(next) + psize;
set_inuse(m, p, newsize);
*b = chunk2mem(p);
}
else
dispose_chunk(m, p, psize);
}
else {
CORRUPTION_ERROR_ACTION(m);
break;
}
}
}
if (should_trim(m, m->topsize))
sys_trim(m, 0);
POSTACTION(m);
}
return unfreed;
}
/* Traversal */
#if MALLOC_INSPECT_ALL
static void internal_inspect_all(mstate m,
void(*handler)(void *start,
void *end,
size_t used_bytes,
void* callback_arg),
void* arg) {
if (is_initialized(m)) {
mchunkptr top = m->top;
msegmentptr s;
for (s = &m->seg; s != 0; s = s->next) {
mchunkptr q = align_as_chunk(s->base);
while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
mchunkptr next = next_chunk(q);
size_t sz = chunksize(q);
size_t used;
void* start;
if (is_inuse(q)) {
used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
start = chunk2mem(q);
}
else {
used = 0;
if (is_small(sz)) { /* offset by possible bookkeeping */
start = (void*)((char*)q + sizeof(struct malloc_chunk));
}
else {
start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
}
}
if (start < (void*)next) /* skip if all space is bookkeeping */
handler(start, next, used, arg);
if (q == top)
break;
q = next;
}
}
}
}
#endif /* MALLOC_INSPECT_ALL */
/* ------------------ Exported realloc, memalign, etc -------------------- */
#if !ONLY_MSPACES
void* dlrealloc(void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem == 0) {
mem = dlmalloc(bytes);
}
else if (bytes >= MAX_REQUEST) {
MALLOC_FAILURE_ACTION;
}
#ifdef REALLOC_ZERO_BYTES_FREES
else if (bytes == 0) {
dlfree(oldmem);
}
#endif /* REALLOC_ZERO_BYTES_FREES */
else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if ! FOOTERS
mstate m = gm;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
POSTACTION(m);
if (newp != 0) {
check_inuse_chunk(m, newp);
mem = chunk2mem(newp);
}
else {
mem = internal_malloc(m, bytes);
if (mem != 0) {
size_t oc = chunksize(oldp) - overhead_for(oldp);
memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
internal_free(m, oldmem);
}
}
}
}
return mem;
}
void* dlrealloc_in_place(void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem != 0) {
if (bytes >= MAX_REQUEST) {
MALLOC_FAILURE_ACTION;
}
else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if ! FOOTERS
mstate m = gm;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
POSTACTION(m);
if (newp == oldp) {
check_inuse_chunk(m, newp);
mem = oldmem;
}
}
}
}
return mem;
}
void* dlmemalign(size_t alignment, size_t bytes) {
if (alignment <= MALLOC_ALIGNMENT) {
return dlmalloc(bytes);
}
return internal_memalign(gm, alignment, bytes);
}
int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment == MALLOC_ALIGNMENT)
mem = dlmalloc(bytes);
else {
size_t d = alignment / sizeof(void*);
size_t r = alignment % sizeof(void*);
if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
return EINVAL;
else if (bytes <= MAX_REQUEST - alignment) {
if (alignment < MIN_CHUNK_SIZE)
alignment = MIN_CHUNK_SIZE;
mem = internal_memalign(gm, alignment, bytes);
}
}
if (mem == 0)
return ENOMEM;
else {
*pp = mem;
return 0;
}
}
void* dlvalloc(size_t bytes) {
size_t pagesz;
ensure_initialization();
pagesz = mparams.page_size;
return dlmemalign(pagesz, bytes);
}
void* dlpvalloc(size_t bytes) {
size_t pagesz;
ensure_initialization();
pagesz = mparams.page_size;
return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
}
void** dlindependent_calloc(size_t n_elements, size_t elem_size,
void* chunks[]) {
size_t sz = elem_size; /* serves as 1-element array */
return ialloc(gm, n_elements, &sz, 3, chunks);
}
void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
void* chunks[]) {
return ialloc(gm, n_elements, sizes, 0, chunks);
}
size_t dlbulk_free(void* array[], size_t nelem) {
return internal_bulk_free(gm, array, nelem);
}
#if MALLOC_INSPECT_ALL
void dlmalloc_inspect_all(void(*handler)(void *start,
void *end,
size_t used_bytes,
void* callback_arg),
void* arg) {
ensure_initialization();
if (!PREACTION(gm)) {
internal_inspect_all(gm, handler, arg);
POSTACTION(gm);
}
}
#endif /* MALLOC_INSPECT_ALL */
int dlmalloc_trim(size_t pad) {
int result = 0;
ensure_initialization();
if (!PREACTION(gm)) {
result = sys_trim(gm, pad);
POSTACTION(gm);
}
return result;
}
size_t dlmalloc_footprint(void) {
return gm->footprint;
}
size_t dlmalloc_max_footprint(void) {
return gm->max_footprint;
}
size_t dlmalloc_footprint_limit(void) {
size_t maf = gm->footprint_limit;
return maf == 0 ? MAX_SIZE_T : maf;
}
size_t dlmalloc_set_footprint_limit(size_t bytes) {
size_t result; /* invert sense of 0 */
if (bytes == 0)
result = granularity_align(1); /* Use minimal size */
if (bytes == MAX_SIZE_T)
result = 0; /* disable */
else
result = granularity_align(bytes);
return gm->footprint_limit = result;
}
#if !NO_MALLINFO
struct mallinfo dlmallinfo(void) {
return internal_mallinfo(gm);
}
#endif /* NO_MALLINFO */
#if !NO_MALLOC_STATS
void dlmalloc_stats() {
internal_malloc_stats(gm);
}
#endif /* NO_MALLOC_STATS */
int dlmallopt(int param_number, int value) {
return change_mparam(param_number, value);
}
size_t dlmalloc_usable_size(void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
if (is_inuse(p))
return chunksize(p) - overhead_for(p);
}
return 0;
}
#endif /* !ONLY_MSPACES */
/* ----------------------------- user mspaces ---------------------------- */
#if MSPACES
static mstate init_user_mstate(char* tbase, size_t tsize) {
size_t msize = pad_request(sizeof(struct malloc_state));
mchunkptr mn;
mchunkptr msp = align_as_chunk(tbase);
mstate m = (mstate)(chunk2mem(msp));
memset(m, 0, msize);
(void)INITIAL_LOCK(&m->mutex);
msp->head = (msize|INUSE_BITS);
m->seg.base = m->least_addr = tbase;
m->seg.size = m->footprint = m->max_footprint = tsize;
m->magic = mparams.magic;
m->release_checks = MAX_RELEASE_CHECK_RATE;
m->mflags = mparams.default_mflags;
m->extp = 0;
m->exts = 0;
disable_contiguous(m);
init_bins(m);
mn = next_chunk(mem2chunk(m));
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
check_top_chunk(m, m->top);
return m;
}
mspace create_mspace(size_t capacity, int locked) {
mstate m = 0;
size_t msize;
ensure_initialization();
msize = pad_request(sizeof(struct malloc_state));
if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
size_t rs = ((capacity == 0)? mparams.granularity :
(capacity + TOP_FOOT_SIZE + msize));
size_t tsize = granularity_align(rs);
char* tbase = (char*)(CALL_MMAP(tsize));
if (tbase != CMFAIL) {
m = init_user_mstate(tbase, tsize);
m->seg.sflags = USE_MMAP_BIT;
set_lock(m, locked);
}
}
return (mspace)m;
}
mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
mstate m = 0;
size_t msize;
ensure_initialization();
msize = pad_request(sizeof(struct malloc_state));
if (capacity > msize + TOP_FOOT_SIZE &&
capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
m = init_user_mstate((char*)base, capacity);
m->seg.sflags = EXTERN_BIT;
set_lock(m, locked);
}
return (mspace)m;
}
int mspace_track_large_chunks(mspace msp, int enable) {
int ret = 0;
mstate ms = (mstate)msp;
if (!PREACTION(ms)) {
if (!use_mmap(ms)) {
ret = 1;
}
if (!enable) {
enable_mmap(ms);
} else {
disable_mmap(ms);
}
POSTACTION(ms);
}
return ret;
}
size_t destroy_mspace(mspace msp) {
size_t freed = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
msegmentptr sp = &ms->seg;
(void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
while (sp != 0) {
char* base = sp->base;
size_t size = sp->size;
flag_t flag = sp->sflags;
(void)base; /* placate people compiling -Wunused-variable */
sp = sp->next;
if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
CALL_MUNMAP(base, size) == 0)
freed += size;
}
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return freed;
}
/*
mspace versions of routines are near-clones of the global
versions. This is not so nice but better than the alternatives.
*/
void* mspace_malloc(mspace msp, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
return 0;
}
if (!PREACTION(ms)) {
void* mem;
size_t nb;
if (bytes <= MAX_SMALL_REQUEST) {
bindex_t idx;
binmap_t smallbits;
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
idx = small_index(nb);
smallbits = ms->smallmap >> idx;
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
mchunkptr b, p;
idx += ~smallbits & 1; /* Uses next bin if idx empty */
b = smallbin_at(ms, idx);
p = b->fd;
assert(chunksize(p) == small_index2size(idx));
unlink_first_small_chunk(ms, b, p, idx);
set_inuse_and_pinuse(ms, p, small_index2size(idx));
mem = chunk2mem(p);
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
else if (nb > ms->dvsize) {
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
mchunkptr b, p, r;
size_t rsize;
bindex_t i;
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
binmap_t leastbit = least_bit(leftbits);
compute_bit2idx(leastbit, i);
b = smallbin_at(ms, i);
p = b->fd;
assert(chunksize(p) == small_index2size(i));
unlink_first_small_chunk(ms, b, p, i);
rsize = small_index2size(i) - nb;
/* Fit here cannot be remainderless if 4byte sizes */
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
set_inuse_and_pinuse(ms, p, small_index2size(i));
else {
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
r = chunk_plus_offset(p, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
replace_dv(ms, r, rsize);
}
mem = chunk2mem(p);
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
}
}
else if (bytes >= MAX_REQUEST)
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
else {
nb = pad_request(bytes);
if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
}
if (nb <= ms->dvsize) {
size_t rsize = ms->dvsize - nb;
mchunkptr p = ms->dv;
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
ms->dvsize = rsize;
set_size_and_pinuse_of_free_chunk(r, rsize);
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
}
else { /* exhaust dv */
size_t dvs = ms->dvsize;
ms->dvsize = 0;
ms->dv = 0;
set_inuse_and_pinuse(ms, p, dvs);
}
mem = chunk2mem(p);
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
else if (nb < ms->topsize) { /* Split top */
size_t rsize = ms->topsize -= nb;
mchunkptr p = ms->top;
mchunkptr r = ms->top = chunk_plus_offset(p, nb);
r->head = rsize | PINUSE_BIT;
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
mem = chunk2mem(p);
check_top_chunk(ms, ms->top);
check_malloced_chunk(ms, mem, nb);
goto postaction;
}
mem = sys_alloc(ms, nb);
postaction:
POSTACTION(ms);
return mem;
}
return 0;
}
void mspace_free(mspace msp, void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
#if FOOTERS
mstate fm = get_mstate_for(p);
(void)msp; /* placate people compiling -Wunused */
#else /* FOOTERS */
mstate fm = (mstate)msp;
#endif /* FOOTERS */
if (!ok_magic(fm)) {
USAGE_ERROR_ACTION(fm, p);
return;
}
if (!PREACTION(fm)) {
check_inuse_chunk(fm, p);
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
size_t psize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
size_t prevsize = p->prev_foot;
if (is_mmapped(p)) {
psize += prevsize + MMAP_FOOT_PAD;
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
fm->footprint -= psize;
goto postaction;
}
else {
mchunkptr prev = chunk_minus_offset(p, prevsize);
psize += prevsize;
p = prev;
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
if (p != fm->dv) {
unlink_chunk(fm, p, prevsize);
}
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
fm->dvsize = psize;
set_free_with_pinuse(p, psize, next);
goto postaction;
}
}
else
goto erroraction;
}
}
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
if (!cinuse(next)) { /* consolidate forward */
if (next == fm->top) {
size_t tsize = fm->topsize += psize;
fm->top = p;
p->head = tsize | PINUSE_BIT;
if (p == fm->dv) {
fm->dv = 0;
fm->dvsize = 0;
}
if (should_trim(fm, tsize))
sys_trim(fm, 0);
goto postaction;
}
else if (next == fm->dv) {
size_t dsize = fm->dvsize += psize;
fm->dv = p;
set_size_and_pinuse_of_free_chunk(p, dsize);
goto postaction;
}
else {
size_t nsize = chunksize(next);
psize += nsize;
unlink_chunk(fm, next, nsize);
set_size_and_pinuse_of_free_chunk(p, psize);
if (p == fm->dv) {
fm->dvsize = psize;
goto postaction;
}
}
}
else
set_free_with_pinuse(p, psize, next);
if (is_small(psize)) {
insert_small_chunk(fm, p, psize);
check_free_chunk(fm, p);
}
else {
tchunkptr tp = (tchunkptr)p;
insert_large_chunk(fm, tp, psize);
check_free_chunk(fm, p);
if (--fm->release_checks == 0)
release_unused_segments(fm);
}
goto postaction;
}
}
erroraction:
USAGE_ERROR_ACTION(fm, p);
postaction:
POSTACTION(fm);
}
}
}
void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
void* mem;
size_t req = 0;
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
return 0;
}
if (n_elements != 0) {
req = n_elements * elem_size;
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
(req / n_elements != elem_size))
req = MAX_SIZE_T; /* force downstream failure on overflow */
}
mem = internal_malloc(ms, req);
if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
memset(mem, 0, req);
return mem;
}
void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem == 0) {
mem = mspace_malloc(msp, bytes);
}
else if (bytes >= MAX_REQUEST) {
MALLOC_FAILURE_ACTION;
}
#ifdef REALLOC_ZERO_BYTES_FREES
else if (bytes == 0) {
mspace_free(msp, oldmem);
}
#endif /* REALLOC_ZERO_BYTES_FREES */
else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if ! FOOTERS
mstate m = (mstate)msp;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
POSTACTION(m);
if (newp != 0) {
check_inuse_chunk(m, newp);
mem = chunk2mem(newp);
}
else {
mem = mspace_malloc(m, bytes);
if (mem != 0) {
size_t oc = chunksize(oldp) - overhead_for(oldp);
memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
mspace_free(m, oldmem);
}
}
}
}
return mem;
}
void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem != 0) {
if (bytes >= MAX_REQUEST) {
MALLOC_FAILURE_ACTION;
}
else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if ! FOOTERS
mstate m = (mstate)msp;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
(void)msp; /* placate people compiling -Wunused */
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
POSTACTION(m);
if (newp == oldp) {
check_inuse_chunk(m, newp);
mem = oldmem;
}
}
}
}
return mem;
}
void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
return 0;
}
if (alignment <= MALLOC_ALIGNMENT)
return mspace_malloc(msp, bytes);
return internal_memalign(ms, alignment, bytes);
}
void** mspace_independent_calloc(mspace msp, size_t n_elements,
size_t elem_size, void* chunks[]) {
size_t sz = elem_size; /* serves as 1-element array */
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
return 0;
}
return ialloc(ms, n_elements, &sz, 3, chunks);
}
void** mspace_independent_comalloc(mspace msp, size_t n_elements,
size_t sizes[], void* chunks[]) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
return 0;
}
return ialloc(ms, n_elements, sizes, 0, chunks);
}
size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
return internal_bulk_free((mstate)msp, array, nelem);
}
#if MALLOC_INSPECT_ALL
void mspace_inspect_all(mspace msp,
void(*handler)(void *start,
void *end,
size_t used_bytes,
void* callback_arg),
void* arg) {
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
if (!PREACTION(ms)) {
internal_inspect_all(ms, handler, arg);
POSTACTION(ms);
}
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
}
#endif /* MALLOC_INSPECT_ALL */
int mspace_trim(mspace msp, size_t pad) {
int result = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
if (!PREACTION(ms)) {
result = sys_trim(ms, pad);
POSTACTION(ms);
}
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return result;
}
#if !NO_MALLOC_STATS
void mspace_malloc_stats(mspace msp) {
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
internal_malloc_stats(ms);
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
}
#endif /* NO_MALLOC_STATS */
size_t mspace_footprint(mspace msp) {
size_t result = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
result = ms->footprint;
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return result;
}
size_t mspace_max_footprint(mspace msp) {
size_t result = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
result = ms->max_footprint;
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return result;
}
size_t mspace_footprint_limit(mspace msp) {
size_t result = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
size_t maf = ms->footprint_limit;
result = (maf == 0) ? MAX_SIZE_T : maf;
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return result;
}
size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
size_t result = 0;
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
if (bytes == 0)
result = granularity_align(1); /* Use minimal size */
if (bytes == MAX_SIZE_T)
result = 0; /* disable */
else
result = granularity_align(bytes);
ms->footprint_limit = result;
}
else {
USAGE_ERROR_ACTION(ms,ms);
}
return result;
}
#if !NO_MALLINFO
struct mallinfo mspace_mallinfo(mspace msp) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
USAGE_ERROR_ACTION(ms,ms);
}
return internal_mallinfo(ms);
}
#endif /* NO_MALLINFO */
size_t mspace_usable_size(const void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
if (is_inuse(p))
return chunksize(p) - overhead_for(p);
}
return 0;
}
int mspace_mallopt(int param_number, int value) {
return change_mparam(param_number, value);
}
#endif /* MSPACES */
/* -------------------- Alternative MORECORE functions ------------------- */
/*
Guidelines for creating a custom version of MORECORE:
* For best performance, MORECORE should allocate in multiples of pagesize.
* MORECORE may allocate more memory than requested. (Or even less,
but this will usually result in a malloc failure.)
* MORECORE must not allocate memory when given argument zero, but
instead return one past the end address of memory from previous
nonzero call.
* For best performance, consecutive calls to MORECORE with positive
arguments should return increasing addresses, indicating that
space has been contiguously extended.
* Even though consecutive calls to MORECORE need not return contiguous
addresses, it must be OK for malloc'ed chunks to span multiple
regions in those cases where they do happen to be contiguous.
* MORECORE need not handle negative arguments -- it may instead
just return MFAIL when given negative arguments.
Negative arguments are always multiples of pagesize. MORECORE
must not misinterpret negative args as large positive unsigned
args. You can suppress all such calls from even occurring by defining
MORECORE_CANNOT_TRIM,
As an example alternative MORECORE, here is a custom allocator
kindly contributed for pre-OSX macOS. It uses virtually but not
necessarily physically contiguous non-paged memory (locked in,
present and won't get swapped out). You can use it by uncommenting
this section, adding some #includes, and setting up the appropriate
defines above:
#define MORECORE osMoreCore
There is also a shutdown routine that should somehow be called for
cleanup upon program exit.
#define MAX_POOL_ENTRIES 100
#define MINIMUM_MORECORE_SIZE (64 * 1024U)
static int next_os_pool;
void *our_os_pools[MAX_POOL_ENTRIES];
void *osMoreCore(int size)
{
void *ptr = 0;
static void *sbrk_top = 0;
if (size > 0)
{
if (size < MINIMUM_MORECORE_SIZE)
size = MINIMUM_MORECORE_SIZE;
if (CurrentExecutionLevel() == kTaskLevel)
ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
if (ptr == 0)
{
return (void *) MFAIL;
}
// save ptrs so they can be freed during cleanup
our_os_pools[next_os_pool] = ptr;
next_os_pool++;
ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
sbrk_top = (char *) ptr + size;
return ptr;
}
else if (size < 0)
{
// we don't currently support shrink behavior
return (void *) MFAIL;
}
else
{
return sbrk_top;
}
}
// cleanup any allocated memory pools
// called as last thing before shutting down driver
void osCleanupMem(void)
{
void **ptr;
for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
if (*ptr)
{
PoolDeallocate(*ptr);
*ptr = 0;
}
}
*/
/* -----------------------------------------------------------------------
History:
v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
* fix bad comparison in dlposix_memalign
* don't reuse adjusted asize in sys_alloc
* add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
* reduce compiler warnings -- thanks to all who reported/suggested these
v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
* Always perform unlink checks unless INSECURE
* Add posix_memalign.
* Improve realloc to expand in more cases; expose realloc_in_place.
Thanks to Peter Buhr for the suggestion.
* Add footprint_limit, inspect_all, bulk_free. Thanks
to Barry Hayes and others for the suggestions.
* Internal refactorings to avoid calls while holding locks
* Use non-reentrant locks by default. Thanks to Roland McGrath
for the suggestion.
* Small fixes to mspace_destroy, reset_on_error.
* Various configuration extensions/changes. Thanks
to all who contributed these.
V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
* Update Creative Commons URL
V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
* Use zeros instead of prev foot for is_mmapped
* Add mspace_track_large_chunks; thanks to Jean Brouwers
* Fix set_inuse in internal_realloc; thanks to Jean Brouwers
* Fix insufficient sys_alloc padding when using 16byte alignment
* Fix bad error check in mspace_footprint
* Adaptations for ptmalloc; thanks to Wolfram Gloger.
* Reentrant spin locks; thanks to Earl Chew and others
* Win32 improvements; thanks to Niall Douglas and Earl Chew
* Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
* Extension hook in malloc_state
* Various small adjustments to reduce warnings on some compilers
* Various configuration extensions/changes for more platforms. Thanks
to all who contributed these.
V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
* Add max_footprint functions
* Ensure all appropriate literals are size_t
* Fix conditional compilation problem for some #define settings
* Avoid concatenating segments with the one provided
in create_mspace_with_base
* Rename some variables to avoid compiler shadowing warnings
* Use explicit lock initialization.
* Better handling of sbrk interference.
* Simplify and fix segment insertion, trimming and mspace_destroy
* Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
* Thanks especially to Dennis Flanagan for help on these.
V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
* Fix memalign brace error.
V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
* Fix improper #endif nesting in C++
* Add explicit casts needed for C++
V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
* Use trees for large bins
* Support mspaces
* Use segments to unify sbrk-based and mmap-based system allocation,
removing need for emulation on most platforms without sbrk.
* Default safety checks
* Optional footer checks. Thanks to William Robertson for the idea.
* Internal code refactoring
* Incorporate suggestions and platform-specific changes.
Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
Aaron Bachmann, Emery Berger, and others.
* Speed up non-fastbin processing enough to remove fastbins.
* Remove useless cfree() to avoid conflicts with other apps.
* Remove internal memcpy, memset. Compilers handle builtins better.
* Remove some options that no one ever used and rename others.
V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
* Fix malloc_state bitmap array misdeclaration
V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
* Allow tuning of FIRST_SORTED_BIN_SIZE
* Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
* Better detection and support for non-contiguousness of MORECORE.
Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
* Bypass most of malloc if no frees. Thanks To Emery Berger.
* Fix freeing of old top non-contiguous chunk im sysmalloc.
* Raised default trim and map thresholds to 256K.
* Fix mmap-related #defines. Thanks to Lubos Lunak.
* Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
* Branch-free bin calculation
* Default trim and mmap thresholds now 256K.
V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
* Introduce independent_comalloc and independent_calloc.
Thanks to Michael Pachos for motivation and help.
* Make optional .h file available
* Allow > 2GB requests on 32bit systems.
* new WIN32 sbrk, mmap, munmap, lock code from <[email protected]>.
Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
and Anonymous.
* Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
helping test this.)
* memalign: check alignment arg
* realloc: don't try to shift chunks backwards, since this
leads to more fragmentation in some programs and doesn't
seem to help in any others.
* Collect all cases in malloc requiring system memory into sysmalloc
* Use mmap as backup to sbrk
* Place all internal state in malloc_state
* Introduce fastbins (although similar to 2.5.1)
* Many minor tunings and cosmetic improvements
* Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
* Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
Thanks to Tony E. Bennett <[email protected]> and others.
* Include errno.h to support default failure action.
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
* return null for negative arguments
* Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
(e.g. WIN32 platforms)
* Cleanup header file inclusion for WIN32 platforms
* Cleanup code to avoid Microsoft Visual C++ compiler complaints
* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
memory allocation routines
* Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
usage of 'assert' in non-WIN32 code
* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
avoid infinite loop
* Always call 'fREe()' rather than 'free()'
V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
* Fixed ordering problem with boundary-stamping
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
* Added pvalloc, as recommended by H.J. Liu
* Added 64bit pointer support mainly from Wolfram Gloger
* Added anonymously donated WIN32 sbrk emulation
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
* malloc_extend_top: fix mask error that caused wastage after
foreign sbrks
* Add linux mremap support code from HJ Liu
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
* Integrated most documentation with the code.
* Add support for mmap, with help from
Wolfram Gloger ([email protected]).
* Use last_remainder in more cases.
* Pack bins using idea from [email protected]
* Use ordered bins instead of best-fit threshhold
* Eliminate block-local decls to simplify tracing and debugging.
* Support another case of realloc via move into top
* Fix error occuring when initial sbrk_base not word-aligned.
* Rely on page size for units instead of SBRK_UNIT to
avoid surprises about sbrk alignment conventions.
* Add mallinfo, mallopt. Thanks to Raymond Nijssen
([email protected]) for the suggestion.
* Add `pad' argument to malloc_trim and top_pad mallopt parameter.
* More precautions for cases where other routines call sbrk,
courtesy of Wolfram Gloger ([email protected]).
* Added macros etc., allowing use in linux libc from
H.J. Lu ([email protected])
* Inverted this history list
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
* Re-tuned and fixed to behave more nicely with V2.6.0 changes.
* Removed all preallocation code since under current scheme
the work required to undo bad preallocations exceeds
the work saved in good cases for most test programs.
* No longer use return list or unconsolidated bins since
no scheme using them consistently outperforms those that don't
given above changes.
* Use best fit for very large chunks to prevent some worst-cases.
* Added some support for debugging
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
* Removed footers when chunks are in use. Thanks to
Paul Wilson ([email protected]) for the suggestion.
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
* Added malloc_trim, with help from Wolfram Gloger
([email protected]).
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
* realloc: try to expand in both directions
* malloc: swap order of clean-bin strategy;
* realloc: only conditionally expand backwards
* Try not to scavenge used bins
* Use bin counts as a guide to preallocation
* Occasionally bin return list chunks in first scan
* Add a few optimizations from [email protected]
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
* faster bin computation & slightly different binning
* merged all consolidations to one part of malloc proper
(eliminating old malloc_find_space & malloc_clean_bin)
* Scan 2 returns chunks (not just 1)
* Propagate failure in realloc if malloc returns 0
* Add stuff to allow compilation on non-ANSI compilers
from [email protected]
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
* removed potential for odd address access in prev_chunk
* removed dependency on getpagesize.h
* misc cosmetics and a bit more internal documentation
* anticosmetics: mangled names in macros to evade debugger strangeness
* tested on sparc, hp-700, dec-mips, rs6000
with gcc & native cc (hp, dec only) allowing
Detlefs & Zorn comparison study (in SIGPLAN Notices.)
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
structure of old version, but most details differ.)
*/
|
the_stack_data/6312.c
|
#include <stdio.h>
int main() {
float n, m, k, i=0, j=0, q=0, sum=0;
scanf("%f", &n);
scanf("%f", &m);
scanf("%f", &k);
while(i<=n){
while(j<k){
sum=sum + m*(100-q)/100;
j++;
}
q=q+2;
i=i+j+1;
j=0;
}
printf("%.0f", sum);
return 0;
}
|
the_stack_data/118928.c
|
int a;
int b;
int getint();
int main(){
a = getint();
b = getint();
if ( a && b ) {
return 1;
}
else {
return 0;
}
}
|
the_stack_data/79553.c
|
/* b03902086 李鈺昇 */
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#define FIFO_LEN 20
#define LINE_LEN 30
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
int main(int argc, char* argv[]) {
char* host_id = argv[1];
char player_index = argv[2][0];
char* random_key = argv[3];
char r_fifo[FIFO_LEN], w_fifo[FIFO_LEN];
sprintf(r_fifo, "host%s_%c.FIFO", host_id, player_index);
int r_fd = open(r_fifo, O_RDONLY);
sprintf(w_fifo, "host%s.FIFO", host_id);
int w_fd = open(w_fifo, O_WRONLY);
char line[LINE_LEN];
int player_int = player_index - 'A';
int money[4], pay;
for (int i = 0; i < 10; i++) {
while (read(r_fd, line, LINE_LEN) <= 0)
;
int other_max = -1;
for (int j = 0; j < 4; j++) {
sscanf(line, "%d", &money[j]);
if (j != player_int)
other_max = max(other_max, money[j]);
}
pay = ( (money[player_int] <= other_max) ? 999 :
min(money[player_int], other_max + 1)
);
sprintf(line, "%c %s %d\n", player_index, random_key, pay);
write(w_fd, line, strlen(line));
}
close(w_fd);
close(r_fd);
return 0;
}
|
the_stack_data/243892721.c
|
// include lib for using in app
#include <stdio.h>
// create main void from intger type
int main(){
// print Hello Word
printf("hello");
// return zero number for end void main end exit and die app
return 0;
}
|
the_stack_data/510288.c
|
/* Program to demonstrate passing structure values to a function.
Program reads and displays data for a student. */
#include <stdio.h>
struct student_rec /* Global structure template. */
{
int number ;
char surname[21] ;
char first_name[11] ;
int scores[5] ;
} ;
main()
{
void display_student_data( struct student_rec student );
void get_student_data( struct student_rec *ptr ) ;
struct student_rec student ;
struct student_rec *student_ptr ;
student_ptr = &student ;
/* Use a pointer to a structure variable as an argument. */
get_student_data( student_ptr ) ;
/* Use a structure variable as an argument. */
display_student_data( student ) ;
}
/* Function : display_student_data()
Purpose : This function displays student data.
Arguments: The student data. */
void display_student_data ( struct student_rec student)
{
int i;
printf( "\nThe data in the student structure is:" ) ;
printf( "\nNumber is %d", student.number ) ;
printf( "\nSurname is %s", student.surname ) ;
printf( "\nFirst name is %s", student.first_name ) ;
printf( "\nScores are: " ) ;
for ( i= 0 ; i < 5 ; i++ )
printf( " %d ", student.scores[i] ) ;
printf( "\n" ) ;
}
/* Function : get_student()
Purpose : This function reads student data.
Arguments: A pointer to the student data structure. */
void get_student_data( struct student_rec *ptr )
{
int i ;
printf( "Number: " ) ;
scanf( "%d", &(ptr->number) ) ;
printf( "Surname: " ) ;
scanf( "%20s", ptr->surname ) ;
printf( "First name: " ) ;
scanf( "%10s", ptr->first_name ) ;
printf( "Five test scores: " ) ;
for ( i= 0 ; i < 5 ; i++ )
scanf( "%d", &(ptr->scores[i]) ) ;
}
|
the_stack_data/99015.c
|
#include <stdio.h>
int check(int *a,int n)
{
int f = 1;
for(int i = 1;i<n;i++){
if(a[i]<a[i-1]) f = 0;
}
return f;
}
int main(){
int n; scanf("%d",&n);
int a[100010];
for(int i = 0;i<n;i++) scanf("%d",&a[i]);
int u = 0,d=0,x=0,y=n;
for(int i = 1;i<n-1;i++){
if(a[i-1]<a[i] && a[i]>a[i+1]){
if(x==0) x = i;
u++;
}
if(a[i-1]>a[i] && a[i]<a[i+1]){
d++;
y=i;
}
}
if(a[n-1]<a[n-2]) {
d++;
y = n-1;
}
if(a[0]>a[1]) {
u++;
x = 0;
}
// printf("%d %d\n %d %d \n",u,d,x,y);
if(u == 0 && d == 0 ) puts("yes");
else if(u == 1 && d == 1){
/// swap
int t = a[x];
a[x] = a[y];
a[y]=t;
if(check(a,n)){
puts("yes");
printf("swap %d %d\n",x+1,y+1);
}
else{
t = a[x];
a[x] = a[y];
a[y]=t;
if(x>y) for(int j =x,k = y;k<j;k++,j--) t = a[k],a[k]=a[j],a[j]=t;
else for(int j =y,k = x;k<j;k++,j--) t = a[k],a[k]=a[j],a[j]=t;
if(check(a,n)){
puts("yes");
printf("reverse %d %d\n",x+1,y+1);
}
else puts("no");
}
}
else if(d==2 && u == 2){
int t;
t = a[x];
a[x] = a[y];
a[y]=t;
if(check(a,n)){
puts("yes");
printf("swap %d %d\n",x+1,y+1);
}
else puts("no");
}
else if(d==1 && u ==0){
int t;
for(int j =y,k = x;k<j;k++,j--) t = a[k],a[k]=a[j],a[j]=t;
if(check(a,n)){
puts("yes");
printf("reverse %d %d\n",x+1,y+1);
}
else puts("no");
}
else if(d==0 && u == 1){
int t;
for(int j =x,k = y;k<j;k++,j--) t = a[k],a[k]=a[j],a[j]=t;
if(check(a,n)){
puts("yes");
printf("reverse %d %d\n",x+1,y+1);
}
else puts("no");
}
else puts("no");
return 0;
}
|
the_stack_data/36857.c
|
#include<stdio.h>
long long jiecheng(long long n)
{
long long a=1,i;
for(i=1;i<=n;i++)
a*=i;
return a;
}
int main()
{
long long m,n,a,b,c,d;
scanf("%lld %lld",&m,&n);
a=jiecheng(m);
b=jiecheng(n);
c=jiecheng(m-n);
d=a/b/c;
printf("%lld",d);
}
|
the_stack_data/154828587.c
|
#include <stdio.h>
int sum(int,int);
int main(void)
{
int x, y, z = 0;
scanf("%d %d", &x, &y);
z = sum(x, y);
printf("%d+%d=%d\n", x, y, z);
return 0;
}
int sum(int x, int y)
{
return x + y;
}
|
the_stack_data/19928.c
|
/*
============================================================================
Author : Ztiany
Description : math.h系列函数
============================================================================
*/
//math.h 头文件定义了各种数学函数和一个宏。在这个库中所有可用的功能都带有一个 double 类型的参数,且都返回 double 类型的结果。
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#define PI 3.14
static double toRadians(double angdeg) {
return angdeg / 180.0 * PI;
}
static double toDegrees(double angrad) {
return angrad * 180.0 / PI;
}
int main() {
double a = toRadians(60);
//返回弧度角 x 的正弦。
double_t sin_a = sin(a);
printf("san(30) =%lf", sin_a);
return EXIT_SUCCESS;
}
|
the_stack_data/194.c
|
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char ** argv){
char * p1 = (char *)malloc(1);
char * p2 = (char *) malloc(26);
char * p3 = (char *) malloc(1);
printf("p1 : %p \n",p1);
printf("p2 : %p \n",p2);
printf("p3 : %p \n",p3);
return 0;
}
|
the_stack_data/170454041.c
|
#include<stdio.h>
#define max 10
int queue_array[max];
int rear=-1;
int front=-1;
void insert(int x)
{
if(front == -1)
front++;
}
void delete()
{
if(front == -1 || front > rear)
{
printf("queue Underflow \n");
return ;
}
else
{
printf("elements deleted from queue : %d\n", queue_array[front]);
front=front+1;
}
}
|
the_stack_data/162639215.c
|
/* MDH WCET BENCHMARK SUITE. File version $Id: insertsort.c,v 1.3 2005/11/11 10:30:41 ael01 Exp $ */
/*************************************************************************/
/* */
/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */
/* ===================================================== */
/* Collected and Modified by S.-S. Lim */
/* [email protected] */
/* Real-Time Research Group */
/* Seoul National University */
/* */
/* */
/* < Features > - restrictions for our experimental environment */
/* */
/* 1. Completely structured. */
/* - There are no unconditional jumps. */
/* - There are no exit from loop bodies. */
/* (There are no 'break' or 'return' in loop bodies) */
/* 2. No 'switch' statements. */
/* 3. No 'do..while' statements. */
/* 4. Expressions are restricted. */
/* - There are no multiple expressions joined by 'or', */
/* 'and' operations. */
/* 5. No library calls. */
/* - All the functions needed are implemented in the */
/* source file. */
/* */
/* */
/*************************************************************************/
/* */
/* FILE: insertsort.c */
/* SOURCE : Public Domain Code */
/* */
/* DESCRIPTION : */
/* */
/* Insertion sort for 10 integer numbers. */
/* The integer array a[] is initialized in main function. */
/* */
/* REMARK : */
/* */
/* EXECUTION TIME : */
/* */
/* */
/*************************************************************************/
/* Changes:
* JG 2005/12/12: Indented program.
*/
#ifdef DEBUG
int cnt1, cnt2;
#endif
unsigned int a[11];
int
insertsort_main()
{
int i, j, temp;
a[0] = 0; /* assume all data is positive */
a[1] = 11;
a[2] = 10;
a[3] = 9;
a[4] = 8;
a[5] = 7;
a[6] = 6;
a[7] = 5;
a[8] = 4;
a[9] = 3;
a[10] = 2;
i = 2;
while (i <= 10) {
#ifdef DEBUG
cnt1++;
#endif
j = i;
#ifdef DEBUG
cnt2 = 0;
#endif
while (a[j] < a[j - 1]) {
#ifdef DEBUG
cnt2++;
#endif
temp = a[j];
a[j] = a[j - 1];
a[j - 1] = temp;
j--;
}
#ifdef DEBUG
printf("Inner Loop Counts: %d\n", cnt2);
#endif
i++;
}
#ifdef DEBUG
printf("Outer Loop : %d , Inner Loop : %d\n", cnt1, cnt2);
#endif
return 1;
}
|
the_stack_data/677258.c
|
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* ft_putchar.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: cacharle <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2019/07/02 22:03:32 by cacharle #+# #+# */
/* Updated: 2019/07/03 14:21:40 by cacharle ### ########.fr */
/* */
/* ************************************************************************** */
#include <unistd.h>
void ft_putchar(char c)
{
write(1, &c, 1);
}
|
the_stack_data/216032.c
|
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* ft_strncat.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: erli <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2018/11/07 14:56:00 by erli #+# #+# */
/* Updated: 2018/11/15 10:20:10 by erli ### ########.fr */
/* */
/* ************************************************************************** */
#include <string.h>
char *ft_strncat(char *s1, const char *s2, size_t n)
{
size_t i;
size_t j;
i = 0;
j = 0;
while (s1[i] != '\0')
i++;
while (s2[j] != '\0' && j < n)
{
s1[i + j] = s2[j];
j++;
}
s1[i + j] = '\0';
return (s1);
}
|
the_stack_data/200141880.c
|
#include <stdio.h>
int main (void)
{
int no;
printf("整数を入力してください。");
scanf("%d",&no);
printf("あなたは%dと入力しましたね。\n", no);
return 0;
}
|
the_stack_data/100139215.c
|
#include <stdio.h>
#include <stdbool.h>
#include <assert.h>
int unsorted0[] = {91, 39, 99, 66, 95, 65, 61, 49, 79, 18, 78, 35, 58, 29, 88};
int sorted0[] = {18, 29, 35, 39, 49, 58, 61, 65, 66, 78, 79, 88, 91, 95, 99};
int unsorted1[] = {50, 87, 16, 12, 96, 15, 12, 33, 96, 10, 82, 61, 65, 51, 87};
int sorted1[] = {10, 12, 12, 15, 16, 33, 50, 51, 61, 65, 82, 87, 87, 96, 96};
int unsorted2[] = {17, 91, 42, 42, 76, 46, 15, 33, 19, 39, 11, 70, 20, 48, 5};
int sorted2[] = {5, 11, 15, 17, 19, 20, 33, 39, 42, 42, 46, 48, 70, 76, 91};
int unsorted3[] = {61, 17, 71, 17, 57, 99, 19, 86, 71, 26, 74, 1, 65, 3, 4};
int sorted3[] = {1, 3, 4, 17, 17, 19, 26, 57, 61, 65, 71, 71, 74, 86, 99};
int unsorted4[] = {29, 65, 0, 45, 20, 92, 30, 86, 34, 61, 41, 5, 97, 92, 25};
int sorted4[] = {0, 5, 20, 25, 29, 30, 34, 41, 45, 61, 65, 86, 92, 92, 97};
bool cmplist(int list1[], int list1len, int list2[], int list2len){
if (list1len != list2len) {
return false;
}
for(int i=0; i < list1len; i++){
if(list1[i] != list2[i]) {
return false;
}
}
return true;
}
void printlist(int list[], int listlen) {
for(int i=0; i < listlen; i++){
printf("%d,", list[i]);
}
printf("\n");
}
int main(void) {
printlist(unsorted0, sizeof(unsorted0)/sizeof(unsorted0[0]));
printlist(sorted0, sizeof(sorted0)/sizeof(sorted0[0]));
assert(cmplist(unsorted0, sizeof(unsorted0)/sizeof(unsorted0[0]), sorted0, sizeof(unsorted0)/sizeof(sorted0[0])));
assert(cmplist(unsorted1, sizeof(unsorted1)/sizeof(unsorted1[0]), sorted1, sizeof(unsorted1)/sizeof(sorted1[0])));
assert(cmplist(unsorted2, sizeof(unsorted2)/sizeof(unsorted2[0]), sorted2, sizeof(unsorted2)/sizeof(sorted2[0])));
assert(cmplist(unsorted3, sizeof(unsorted3)/sizeof(unsorted3[0]), sorted3, sizeof(unsorted3)/sizeof(sorted3[0])));
assert(cmplist(unsorted4, sizeof(unsorted4)/sizeof(unsorted4[0]), sorted4, sizeof(unsorted4)/sizeof(sorted4[0])));
printf("Done!\n");
return 0;
}
|
the_stack_data/137857.c
|
/* $FreeBSD$ */
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#define SEM_NAME "/semtst"
int test_unnamed(void);
int test_named(void);
int test_named2(void);
int
test_unnamed(void)
{
sem_t *s;
pid_t pid;
int status;
printf("testing unnamed process-shared semaphore\n");
s = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED,
-1, 0);
if (s == MAP_FAILED)
err(1, "mmap failed");
if (sem_init(s, 1, 0))
err(2, "sem_init failed");
if ((pid = fork()) == 0) {
printf("child: sem_wait()\n");
if (sem_wait(s))
err(3, "sem_wait failed");
printf("child: sem_wait() returned\n");
exit(0);
} else {
sleep(1);
printf("parent: sem_post()\n");
if (sem_post(s))
err(4, "sem_post failed");
waitpid(pid, &status, 0);
if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
printf("OK.\n");
else
printf("Failure.");
}
return (0);
}
int
test_named(void)
{
sem_t *s, *s2;
pid_t pid;
int status;
printf("testing named process-shared semaphore\n");
sem_unlink(SEM_NAME);
s = sem_open(SEM_NAME, O_CREAT, 0777, 0);
if (s == SEM_FAILED)
err(1, "sem_open failed");
s2 = sem_open(SEM_NAME, O_CREAT, 0777, 0);
if (s2 == SEM_FAILED)
err(2, "second sem_open call failed");
if (s != s2)
errx(3,
"two sem_open calls for same semaphore do not return same address");
if (sem_close(s2))
err(4, "sem_close failed");
if ((pid = fork()) == 0) {
printf("child: sem_wait()\n");
if (sem_wait(s))
err(5, "sem_wait failed");
printf("child: sem_wait() returned\n");
exit(0);
} else {
sleep(1);
printf("parent: sem_post()\n");
if (sem_post(s))
err(6, "sem_post failed");
waitpid(pid, &status, 0);
if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
printf("OK.\n");
else
printf("Failure.");
}
if (sem_close(s))
err(7, "sem_close failed");
return (0);
}
int
test_named2(void)
{
sem_t *s, *s2, *s3;
printf("testing named process-shared semaphore, O_EXCL cases\n");
sem_unlink(SEM_NAME);
s = sem_open(SEM_NAME, O_CREAT | O_EXCL, 0777, 0);
if (s == SEM_FAILED)
err(1, "sem_open failed");
s2 = sem_open(SEM_NAME, O_CREAT | O_EXCL, 0777, 0);
if (s2 != SEM_FAILED)
errx(2, "second sem_open call wrongly succeeded");
if (errno != EEXIST)
err(3, "second sem_open call failed with wrong errno");
s3 = sem_open(SEM_NAME, 0);
if (s3 == SEM_FAILED)
err(4, "third sem_open call failed");
if (s != s3)
errx(5,
"two sem_open calls for same semaphore do not return same address");
if (sem_close(s3))
err(6, "sem_close failed");
if (sem_close(s))
err(7, "sem_close failed");
printf("OK.\n");
return (0);
}
int
main(void)
{
test_unnamed();
test_named();
test_named2();
return (0);
}
|
the_stack_data/215767153.c
|
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <complex.h>
#ifdef complex
#undef complex
#endif
#ifdef I
#undef I
#endif
#if defined(_WIN64)
typedef long long BLASLONG;
typedef unsigned long long BLASULONG;
#else
typedef long BLASLONG;
typedef unsigned long BLASULONG;
#endif
#ifdef LAPACK_ILP64
typedef BLASLONG blasint;
#if defined(_WIN64)
#define blasabs(x) llabs(x)
#else
#define blasabs(x) labs(x)
#endif
#else
typedef int blasint;
#define blasabs(x) abs(x)
#endif
typedef blasint integer;
typedef unsigned int uinteger;
typedef char *address;
typedef short int shortint;
typedef float real;
typedef double doublereal;
typedef struct { real r, i; } complex;
typedef struct { doublereal r, i; } doublecomplex;
#ifdef _MSC_VER
static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
#else
static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
#endif
#define pCf(z) (*_pCf(z))
#define pCd(z) (*_pCd(z))
typedef int logical;
typedef short int shortlogical;
typedef char logical1;
typedef char integer1;
#define TRUE_ (1)
#define FALSE_ (0)
/* Extern is for use with -E */
#ifndef Extern
#define Extern extern
#endif
/* I/O stuff */
typedef int flag;
typedef int ftnlen;
typedef int ftnint;
/*external read, write*/
typedef struct
{ flag cierr;
ftnint ciunit;
flag ciend;
char *cifmt;
ftnint cirec;
} cilist;
/*internal read, write*/
typedef struct
{ flag icierr;
char *iciunit;
flag iciend;
char *icifmt;
ftnint icirlen;
ftnint icirnum;
} icilist;
/*open*/
typedef struct
{ flag oerr;
ftnint ounit;
char *ofnm;
ftnlen ofnmlen;
char *osta;
char *oacc;
char *ofm;
ftnint orl;
char *oblnk;
} olist;
/*close*/
typedef struct
{ flag cerr;
ftnint cunit;
char *csta;
} cllist;
/*rewind, backspace, endfile*/
typedef struct
{ flag aerr;
ftnint aunit;
} alist;
/* inquire */
typedef struct
{ flag inerr;
ftnint inunit;
char *infile;
ftnlen infilen;
ftnint *inex; /*parameters in standard's order*/
ftnint *inopen;
ftnint *innum;
ftnint *innamed;
char *inname;
ftnlen innamlen;
char *inacc;
ftnlen inacclen;
char *inseq;
ftnlen inseqlen;
char *indir;
ftnlen indirlen;
char *infmt;
ftnlen infmtlen;
char *inform;
ftnint informlen;
char *inunf;
ftnlen inunflen;
ftnint *inrecl;
ftnint *innrec;
char *inblank;
ftnlen inblanklen;
} inlist;
#define VOID void
union Multitype { /* for multiple entry points */
integer1 g;
shortint h;
integer i;
/* longint j; */
real r;
doublereal d;
complex c;
doublecomplex z;
};
typedef union Multitype Multitype;
struct Vardesc { /* for Namelist */
char *name;
char *addr;
ftnlen *dims;
int type;
};
typedef struct Vardesc Vardesc;
struct Namelist {
char *name;
Vardesc **vars;
int nvars;
};
typedef struct Namelist Namelist;
#define abs(x) ((x) >= 0 ? (x) : -(x))
#define dabs(x) (fabs(x))
#define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
#define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
#define dmin(a,b) (f2cmin(a,b))
#define dmax(a,b) (f2cmax(a,b))
#define bit_test(a,b) ((a) >> (b) & 1)
#define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
#define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
#define abort_() { sig_die("Fortran abort routine called", 1); }
#define c_abs(z) (cabsf(Cf(z)))
#define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
#ifdef _MSC_VER
#define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
#define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
#else
#define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
#define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
#endif
#define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
#define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
#define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
//#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
#define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
#define d_abs(x) (fabs(*(x)))
#define d_acos(x) (acos(*(x)))
#define d_asin(x) (asin(*(x)))
#define d_atan(x) (atan(*(x)))
#define d_atn2(x, y) (atan2(*(x),*(y)))
#define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
#define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
#define d_cos(x) (cos(*(x)))
#define d_cosh(x) (cosh(*(x)))
#define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
#define d_exp(x) (exp(*(x)))
#define d_imag(z) (cimag(Cd(z)))
#define r_imag(z) (cimagf(Cf(z)))
#define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
#define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
#define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
#define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
#define d_log(x) (log(*(x)))
#define d_mod(x, y) (fmod(*(x), *(y)))
#define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
#define d_nint(x) u_nint(*(x))
#define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
#define d_sign(a,b) u_sign(*(a),*(b))
#define r_sign(a,b) u_sign(*(a),*(b))
#define d_sin(x) (sin(*(x)))
#define d_sinh(x) (sinh(*(x)))
#define d_sqrt(x) (sqrt(*(x)))
#define d_tan(x) (tan(*(x)))
#define d_tanh(x) (tanh(*(x)))
#define i_abs(x) abs(*(x))
#define i_dnnt(x) ((integer)u_nint(*(x)))
#define i_len(s, n) (n)
#define i_nint(x) ((integer)u_nint(*(x)))
#define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
#define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
#define pow_si(B,E) spow_ui(*(B),*(E))
#define pow_ri(B,E) spow_ui(*(B),*(E))
#define pow_di(B,E) dpow_ui(*(B),*(E))
#define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
#define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
#define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
#define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
#define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
#define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
#define sig_die(s, kill) { exit(1); }
#define s_stop(s, n) {exit(0);}
static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
#define z_abs(z) (cabs(Cd(z)))
#define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
#define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
#define myexit_() break;
#define mycycle() continue;
#define myceiling(w) {ceil(w)}
#define myhuge(w) {HUGE_VAL}
//#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
#define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
/* procedure parameter types for -A and -C++ */
#define F2C_proc_par_types 1
#ifdef __cplusplus
typedef logical (*L_fp)(...);
#else
typedef logical (*L_fp)();
#endif
static float spow_ui(float x, integer n) {
float pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
static double dpow_ui(double x, integer n) {
double pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#ifdef _MSC_VER
static _Fcomplex cpow_ui(complex x, integer n) {
complex pow={1.0,0.0}; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
for(u = n; ; ) {
if(u & 01) pow.r *= x.r, pow.i *= x.i;
if(u >>= 1) x.r *= x.r, x.i *= x.i;
else break;
}
}
_Fcomplex p={pow.r, pow.i};
return p;
}
#else
static _Complex float cpow_ui(_Complex float x, integer n) {
_Complex float pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#endif
#ifdef _MSC_VER
static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
_Dcomplex pow={1.0,0.0}; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
for(u = n; ; ) {
if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
else break;
}
}
_Dcomplex p = {pow._Val[0], pow._Val[1]};
return p;
}
#else
static _Complex double zpow_ui(_Complex double x, integer n) {
_Complex double pow=1.0; unsigned long int u;
if(n != 0) {
if(n < 0) n = -n, x = 1/x;
for(u = n; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
#endif
static integer pow_ii(integer x, integer n) {
integer pow; unsigned long int u;
if (n <= 0) {
if (n == 0 || x == 1) pow = 1;
else if (x != -1) pow = x == 0 ? 1/x : 0;
else n = -n;
}
if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
u = n;
for(pow = 1; ; ) {
if(u & 01) pow *= x;
if(u >>= 1) x *= x;
else break;
}
}
return pow;
}
static integer dmaxloc_(double *w, integer s, integer e, integer *n)
{
double m; integer i, mi;
for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
if (w[i-1]>m) mi=i ,m=w[i-1];
return mi-s+1;
}
static integer smaxloc_(float *w, integer s, integer e, integer *n)
{
float m; integer i, mi;
for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
if (w[i-1]>m) mi=i ,m=w[i-1];
return mi-s+1;
}
static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Fcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
}
}
pCf(z) = zdotc;
}
#else
_Complex float zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
}
}
pCf(z) = zdotc;
}
#endif
static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Dcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
}
}
pCd(z) = zdotc;
}
#else
_Complex double zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
}
}
pCd(z) = zdotc;
}
#endif
static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Fcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
}
}
pCf(z) = zdotc;
}
#else
_Complex float zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cf(&x[i]) * Cf(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
}
}
pCf(z) = zdotc;
}
#endif
static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
integer n = *n_, incx = *incx_, incy = *incy_, i;
#ifdef _MSC_VER
_Dcomplex zdotc = {0.0, 0.0};
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
}
}
pCd(z) = zdotc;
}
#else
_Complex double zdotc = 0.0;
if (incx == 1 && incy == 1) {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cd(&x[i]) * Cd(&y[i]);
}
} else {
for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
}
}
pCd(z) = zdotc;
}
#endif
/* -- translated by f2c (version 20000121).
You must link the resulting object file with the libraries:
-lf2c -lm (in that order)
*/
/* Table of constant values */
static real c_b12 = 1.f;
static real c_b15 = -1.f;
/* > \brief \b SPFTRF */
/* =========== DOCUMENTATION =========== */
/* Online html documentation available at */
/* http://www.netlib.org/lapack/explore-html/ */
/* > \htmlonly */
/* > Download SPFTRF + dependencies */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/spftrf.
f"> */
/* > [TGZ]</a> */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/spftrf.
f"> */
/* > [ZIP]</a> */
/* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/spftrf.
f"> */
/* > [TXT]</a> */
/* > \endhtmlonly */
/* Definition: */
/* =========== */
/* SUBROUTINE SPFTRF( TRANSR, UPLO, N, A, INFO ) */
/* CHARACTER TRANSR, UPLO */
/* INTEGER N, INFO */
/* REAL A( 0: * ) */
/* > \par Purpose: */
/* ============= */
/* > */
/* > \verbatim */
/* > */
/* > SPFTRF computes the Cholesky factorization of a real symmetric */
/* > positive definite matrix A. */
/* > */
/* > The factorization has the form */
/* > A = U**T * U, if UPLO = 'U', or */
/* > A = L * L**T, if UPLO = 'L', */
/* > where U is an upper triangular matrix and L is lower triangular. */
/* > */
/* > This is the block version of the algorithm, calling Level 3 BLAS. */
/* > \endverbatim */
/* Arguments: */
/* ========== */
/* > \param[in] TRANSR */
/* > \verbatim */
/* > TRANSR is CHARACTER*1 */
/* > = 'N': The Normal TRANSR of RFP A is stored; */
/* > = 'T': The Transpose TRANSR of RFP A is stored. */
/* > \endverbatim */
/* > */
/* > \param[in] UPLO */
/* > \verbatim */
/* > UPLO is CHARACTER*1 */
/* > = 'U': Upper triangle of RFP A is stored; */
/* > = 'L': Lower triangle of RFP A is stored. */
/* > \endverbatim */
/* > */
/* > \param[in] N */
/* > \verbatim */
/* > N is INTEGER */
/* > The order of the matrix A. N >= 0. */
/* > \endverbatim */
/* > */
/* > \param[in,out] A */
/* > \verbatim */
/* > A is REAL array, dimension ( N*(N+1)/2 ); */
/* > On entry, the symmetric matrix A in RFP format. RFP format is */
/* > described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
/* > then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
/* > (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
/* > the transpose of RFP A as defined when */
/* > TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
/* > follows: If UPLO = 'U' the RFP A contains the NT elements of */
/* > upper packed A. If UPLO = 'L' the RFP A contains the elements */
/* > of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
/* > 'T'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
/* > is odd. See the Note below for more details. */
/* > */
/* > On exit, if INFO = 0, the factor U or L from the Cholesky */
/* > factorization RFP A = U**T*U or RFP A = L*L**T. */
/* > \endverbatim */
/* > */
/* > \param[out] INFO */
/* > \verbatim */
/* > INFO is INTEGER */
/* > = 0: successful exit */
/* > < 0: if INFO = -i, the i-th argument had an illegal value */
/* > > 0: if INFO = i, the leading minor of order i is not */
/* > positive definite, and the factorization could not be */
/* > completed. */
/* > \endverbatim */
/* Authors: */
/* ======== */
/* > \author Univ. of Tennessee */
/* > \author Univ. of California Berkeley */
/* > \author Univ. of Colorado Denver */
/* > \author NAG Ltd. */
/* > \date December 2016 */
/* > \ingroup realOTHERcomputational */
/* > \par Further Details: */
/* ===================== */
/* > */
/* > \verbatim */
/* > */
/* > We first consider Rectangular Full Packed (RFP) Format when N is */
/* > even. We give an example where N = 6. */
/* > */
/* > AP is Upper AP is Lower */
/* > */
/* > 00 01 02 03 04 05 00 */
/* > 11 12 13 14 15 10 11 */
/* > 22 23 24 25 20 21 22 */
/* > 33 34 35 30 31 32 33 */
/* > 44 45 40 41 42 43 44 */
/* > 55 50 51 52 53 54 55 */
/* > */
/* > */
/* > Let TRANSR = 'N'. RFP holds AP as follows: */
/* > For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
/* > three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
/* > the transpose of the first three columns of AP upper. */
/* > For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
/* > three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
/* > the transpose of the last three columns of AP lower. */
/* > This covers the case N even and TRANSR = 'N'. */
/* > */
/* > RFP A RFP A */
/* > */
/* > 03 04 05 33 43 53 */
/* > 13 14 15 00 44 54 */
/* > 23 24 25 10 11 55 */
/* > 33 34 35 20 21 22 */
/* > 00 44 45 30 31 32 */
/* > 01 11 55 40 41 42 */
/* > 02 12 22 50 51 52 */
/* > */
/* > Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
/* > transpose of RFP A above. One therefore gets: */
/* > */
/* > */
/* > RFP A RFP A */
/* > */
/* > 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
/* > 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
/* > 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
/* > */
/* > */
/* > We then consider Rectangular Full Packed (RFP) Format when N is */
/* > odd. We give an example where N = 5. */
/* > */
/* > AP is Upper AP is Lower */
/* > */
/* > 00 01 02 03 04 00 */
/* > 11 12 13 14 10 11 */
/* > 22 23 24 20 21 22 */
/* > 33 34 30 31 32 33 */
/* > 44 40 41 42 43 44 */
/* > */
/* > */
/* > Let TRANSR = 'N'. RFP holds AP as follows: */
/* > For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
/* > three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
/* > the transpose of the first two columns of AP upper. */
/* > For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
/* > three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
/* > the transpose of the last two columns of AP lower. */
/* > This covers the case N odd and TRANSR = 'N'. */
/* > */
/* > RFP A RFP A */
/* > */
/* > 02 03 04 00 33 43 */
/* > 12 13 14 10 11 44 */
/* > 22 23 24 20 21 22 */
/* > 00 33 34 30 31 32 */
/* > 01 11 44 40 41 42 */
/* > */
/* > Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
/* > transpose of RFP A above. One therefore gets: */
/* > */
/* > RFP A RFP A */
/* > */
/* > 02 12 22 00 01 00 10 20 30 40 50 */
/* > 03 13 23 33 11 33 11 21 31 41 51 */
/* > 04 14 24 34 44 43 44 22 32 42 52 */
/* > \endverbatim */
/* > */
/* ===================================================================== */
/* Subroutine */ int spftrf_(char *transr, char *uplo, integer *n, real *a,
integer *info)
{
/* System generated locals */
integer i__1, i__2;
/* Local variables */
integer k;
logical normaltransr;
extern logical lsame_(char *, char *);
logical lower;
integer n1, n2;
extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
integer *, integer *, real *, real *, integer *, real *, integer *
), ssyrk_(char *, char *, integer
*, integer *, real *, real *, integer *, real *, real *, integer *
), xerbla_(char *, integer *, ftnlen);
logical nisodd;
extern /* Subroutine */ int spotrf_(char *, integer *, real *, integer *,
integer *);
/* -- LAPACK computational routine (version 3.7.0) -- */
/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
/* December 2016 */
/* ===================================================================== */
/* Test the input parameters. */
*info = 0;
normaltransr = lsame_(transr, "N");
lower = lsame_(uplo, "L");
if (! normaltransr && ! lsame_(transr, "T")) {
*info = -1;
} else if (! lower && ! lsame_(uplo, "U")) {
*info = -2;
} else if (*n < 0) {
*info = -3;
}
if (*info != 0) {
i__1 = -(*info);
xerbla_("SPFTRF", &i__1, (ftnlen)6);
return 0;
}
/* Quick return if possible */
if (*n == 0) {
return 0;
}
/* If N is odd, set NISODD = .TRUE. */
/* If N is even, set K = N/2 and NISODD = .FALSE. */
if (*n % 2 == 0) {
k = *n / 2;
nisodd = FALSE_;
} else {
nisodd = TRUE_;
}
/* Set N1 and N2 depending on LOWER */
if (lower) {
n2 = *n / 2;
n1 = *n - n2;
} else {
n1 = *n / 2;
n2 = *n - n1;
}
/* start execution: there are eight cases */
if (nisodd) {
/* N is odd */
if (normaltransr) {
/* N is odd and TRANSR = 'N' */
if (lower) {
/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
spotrf_("L", &n1, a, n, info);
if (*info > 0) {
return 0;
}
strsm_("R", "L", "T", "N", &n2, &n1, &c_b12, a, n, &a[n1], n);
ssyrk_("U", "N", &n2, &n1, &c_b15, &a[n1], n, &c_b12, &a[*n],
n);
spotrf_("U", &n2, &a[*n], n, info);
if (*info > 0) {
*info += n1;
}
} else {
/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
spotrf_("L", &n1, &a[n2], n, info);
if (*info > 0) {
return 0;
}
strsm_("L", "L", "N", "N", &n1, &n2, &c_b12, &a[n2], n, a, n);
ssyrk_("U", "T", &n2, &n1, &c_b15, a, n, &c_b12, &a[n1], n);
spotrf_("U", &n2, &a[n1], n, info);
if (*info > 0) {
*info += n1;
}
}
} else {
/* N is odd and TRANSR = 'T' */
if (lower) {
/* SRPA for LOWER, TRANSPOSE and N is odd */
/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
spotrf_("U", &n1, a, &n1, info);
if (*info > 0) {
return 0;
}
strsm_("L", "U", "T", "N", &n1, &n2, &c_b12, a, &n1, &a[n1 *
n1], &n1);
ssyrk_("L", "T", &n2, &n1, &c_b15, &a[n1 * n1], &n1, &c_b12, &
a[1], &n1);
spotrf_("L", &n2, &a[1], &n1, info);
if (*info > 0) {
*info += n1;
}
} else {
/* SRPA for UPPER, TRANSPOSE and N is odd */
/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
spotrf_("U", &n1, &a[n2 * n2], &n2, info);
if (*info > 0) {
return 0;
}
strsm_("R", "U", "N", "N", &n2, &n1, &c_b12, &a[n2 * n2], &n2,
a, &n2);
ssyrk_("L", "N", &n2, &n1, &c_b15, a, &n2, &c_b12, &a[n1 * n2]
, &n2);
spotrf_("L", &n2, &a[n1 * n2], &n2, info);
if (*info > 0) {
*info += n1;
}
}
}
} else {
/* N is even */
if (normaltransr) {
/* N is even and TRANSR = 'N' */
if (lower) {
/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
i__1 = *n + 1;
spotrf_("L", &k, &a[1], &i__1, info);
if (*info > 0) {
return 0;
}
i__1 = *n + 1;
i__2 = *n + 1;
strsm_("R", "L", "T", "N", &k, &k, &c_b12, &a[1], &i__1, &a[k
+ 1], &i__2);
i__1 = *n + 1;
i__2 = *n + 1;
ssyrk_("U", "N", &k, &k, &c_b15, &a[k + 1], &i__1, &c_b12, a,
&i__2);
i__1 = *n + 1;
spotrf_("U", &k, a, &i__1, info);
if (*info > 0) {
*info += k;
}
} else {
/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
i__1 = *n + 1;
spotrf_("L", &k, &a[k + 1], &i__1, info);
if (*info > 0) {
return 0;
}
i__1 = *n + 1;
i__2 = *n + 1;
strsm_("L", "L", "N", "N", &k, &k, &c_b12, &a[k + 1], &i__1,
a, &i__2);
i__1 = *n + 1;
i__2 = *n + 1;
ssyrk_("U", "T", &k, &k, &c_b15, a, &i__1, &c_b12, &a[k], &
i__2);
i__1 = *n + 1;
spotrf_("U", &k, &a[k], &i__1, info);
if (*info > 0) {
*info += k;
}
}
} else {
/* N is even and TRANSR = 'T' */
if (lower) {
/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
spotrf_("U", &k, &a[k], &k, info);
if (*info > 0) {
return 0;
}
strsm_("L", "U", "T", "N", &k, &k, &c_b12, &a[k], &n1, &a[k *
(k + 1)], &k);
ssyrk_("L", "T", &k, &k, &c_b15, &a[k * (k + 1)], &k, &c_b12,
a, &k);
spotrf_("L", &k, a, &k, info);
if (*info > 0) {
*info += k;
}
} else {
/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
spotrf_("U", &k, &a[k * (k + 1)], &k, info);
if (*info > 0) {
return 0;
}
strsm_("R", "U", "N", "N", &k, &k, &c_b12, &a[k * (k + 1)], &
k, a, &k);
ssyrk_("L", "N", &k, &k, &c_b15, a, &k, &c_b12, &a[k * k], &k);
spotrf_("L", &k, &a[k * k], &k, info);
if (*info > 0) {
*info += k;
}
}
}
}
return 0;
/* End of SPFTRF */
} /* spftrf_ */
|
the_stack_data/176706052.c
|
#include <stdio.h>
int main()
{
int num;
int i;
int maior;
int pos;
for(i=1, maior = 0;i<=100;i++)
{
scanf("%d", &num);
if(num > maior)
{
maior = num;
pos = i;
}
}
printf("%d\n", maior );
printf("%d\n", pos);
return 0;
}
|
the_stack_data/20449198.c
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
int main()
{
pid_t pid;
pid = fork();
if(0 == fork())
{
printf("%d",getppid());
}
return 0;
}
|
the_stack_data/140359.c
|
/**
*
* @copyright © 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* We kindly request you to use one or more of the following phrases to refer to
* foxBMS in your hardware, software, documentation or advertising materials:
*
* - ″This product uses parts of foxBMS®″
* - ″This product includes parts of foxBMS®″
* - ″This product is derived from foxBMS®″
*
*/
/**
* @file c-004.c
* @author foxBMS Team
* @date 2021-04-06 (date of creation)
* @updated 2021-04-06 (date of last update)
* @ingroup SOME_GROUP
* @prefix ABC
*
* @brief Implementation of some software
*
*/
|
the_stack_data/167329677.c
|
// RUN: %clang_cc1 %s -emit-llvm -o -
struct W {};
struct Y {
struct W w;
int i:1;
} __attribute__ ((packed)) y;
|
the_stack_data/175144423.c
|
/* This testcase is part of GDB, the GNU debugger.
Copyright 2011-2019 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
int
bar (void)
{
return 1;
}
int
baz (int a)
{
return a + 1;
}
static void
test_skip (void)
{
}
void
skip1_test_skip_file_and_function (void)
{
test_skip ();
}
|
the_stack_data/237644424.c
|
void ft_print_comb2(void);
int main()
{
ft_print_comb2();
return 0;
}
|
the_stack_data/49560.c
|
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* This file will contain the code to implement the functions declared in ia_css_shading.h
and associated helper functions */
|
the_stack_data/22012620.c
|
/* $NetBSD: strlcpy.c,v 1.4 2020/05/24 21:10:17 nia Exp $ */
/* $OpenBSD: strlcpy.c,v 1.7 2003/04/12 21:56:39 millert Exp $ */
/*
* Copyright (c) 1998 Todd C. Miller <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND TODD C. MILLER DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL TODD C. MILLER BE LIABLE
* FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#ifndef HAVE_STRLCPY
#include <sys/cdefs.h>
#include <sys/types.h>
#include <string.h>
/*
* Copy src to string dst of size siz. At most siz-1 characters
* will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
*/
size_t
strlcpy(char *dst, const char *src, size_t siz)
{
char *d = dst;
const char *s = src;
size_t n = siz;
if (!dst || !src)
return 0;
/* Copy as many bytes as will fit */
if (n != 0 && --n != 0) {
do {
if ((*d++ = *s++) == 0)
break;
} while (--n != 0);
}
/* Not enough room in dst, add NUL and traverse rest of src */
if (n == 0) {
if (siz != 0)
*d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
return(s - src - 1); /* count does not include NUL */
}
#endif
|
the_stack_data/709923.c
|
#include <stdio.h>
#define WEIGHT_A 2
#define WEIGHT_B 3
#define WEIGHT_C 5
#define avrg(a, b, c) ((WEIGHT_A * (a) + WEIGHT_B * (b) + WEIGHT_C * (c)) / (WEIGHT_A + WEIGHT_B + WEIGHT_C))
int main()
{
double grade_a, grade_b, grade_c;
double average;
scanf("%lf %lf %lf", &grade_a, &grade_b, &grade_c);
printf("MEDIA = %.1lf\n", avrg(grade_a, grade_b, grade_c));
return 0;
}
|
the_stack_data/23271.c
|
#include <stdio.h>
int a = 0; // 全局变量分配在全局量区
int main() {
// 变量分配在栈区
int x = 6;
int y = 'd';
// & 取值的地址, * 取地址的值
printf("%d %p %d\n", a, &a, *&a);
printf("%d %p %d\n", x, &x, *&x);
printf("%c %p %c", y, &y, *&y);
return 0;
}
|
the_stack_data/198580636.c
|
#include <stdio.h>
#include <stdlib.h>
int main() {
int n;
double *data;
printf("Enter the total number of elements: ");
scanf("%d", &n);
// Allocating memory for n elements
data = (double *)calloc(n, sizeof(double));
if (data == NULL) {
printf("Error!!! memory not allocated.");
exit(0);
}
// Storing numbers entered by the user.
for (int i = 0; i < n; ++i) {
printf("Enter number%d: ", i + 1);
scanf("%lf", data + i);
}
// Finding the largest number
for (int i = 1; i < n; ++i) {
if (*data < *(data + i)) {
*data = *(data + i);
}
}
printf("Largest number = %.2lf", *data);
free(data);
return 0;
}
|
the_stack_data/151706236.c
|
/* APPLE LOCAL file 4525731 */
/* { dg-do compile { target i?86-*-* } } */
/* { dg-require-effective-target ilp32 } */
/* { dg-options "-O3" } */
#include <stdlib.h>
static void __attribute__ ((__force_align_arg_pointer__, __noinline__, __regparm__(3)))
callee (int i1, int i2, int i3, int i4, int i5, int i6, int i7)
{ /* { dg-error "force_align_arg_pointer functions limited to 2 register parameters" } */
if (i1 != 1)
abort ();
if (i2 != 2)
abort ();
if (i3 != 3)
abort ();
if (i4 != 4)
abort ();
if (i5 != 5)
abort ();
if (i6 != 6)
abort ();
if (i7 != 7)
abort ();
}
main()
{
callee (1, 2, 3, 4, 5, 6, 7);
return 0;
}
|
the_stack_data/37637128.c
|
/* Verify that structure return doesn't invoke memcpy on
overlapping objects. */
extern void abort (void);
struct S {
char stuff[1024];
};
union U {
struct {
int space;
struct S s;
} a;
struct {
struct S s;
int space;
} b;
};
struct S f(struct S *);
void g(union U *);
void main_test(void)
{
union U u;
u.b.s = f(&u.a.s);
u.a.s = f(&u.b.s);
g(&u);
}
|
the_stack_data/206391939.c
|
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
int i ;
char **ptr;
extern char **environ;
for (i = 0; i < argc; i++)
printf("argv[%d]: %s\n", i, argv[i]);
for (ptr = environ; *ptr != 0; ptr++)
printf("%s\n", *ptr);
exit(0);
}
|
the_stack_data/1105859.c
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
#include <stdio.h>
#include <math.h>
#include <limits.h>
/*int findKthLargest(int nums[], int size, int k);
//void merge(int arr[], int low, int medium, int high);
//void mergeSort(int arr[], int low, int high);
int partition(int arr[], int size);
int main(int argc, char** argv) {
int arr[] = {12, 3, 5, 19, 4}; //{26,19,7,5,4,3,12};{19,26,7};{12,4,3,5}
int size = sizeof(arr)/sizeof(arr[0]);
int k =1;
printf("the kth largest is %d", findKthLargest(arr,size,k));
}
void swap(int *a, int *b)
{
int temp = *a;
*a = *b;
*b = temp;
}
int partition(int arr[], int size){//all greater to the left and smaller to the right
int pivot=arr[0];
int left=0;
int right=size-1;
printf("the pivot is %d", pivot);
while(left<=right){
while(arr[left]>pivot){
left++;
}
while(arr[right]<pivot){
right--;
}
if(arr[left]<=arr[right]) {
swap(&arr[left], &arr[right]);
//increment left index and decrement right index
left++;
right--;
}
}
return left;
}
/*void mergeSort(int arr[], int low, int high){
if(low<high){
int med=low+(high-low)/2;
mergeSort(arr,low, med);
mergeSort(arr,med,high);
merge(arr,low, med, high );
}
}
void merge(int arr[], int low, int medium, int high){
int l1=medium-low+1;
int l2=high-medium;
int leftArr[l1];
int rightArr[l2];
for(int i=0;i<l1;i++){
leftArr[i]=arr[i+low];
}
for(int i=0;i<l2;i++){
rightArr[i]=arr[medium+1+i];
}
int i=0;
int j=0;
int w=low;
while (i < l1 && j < high-l2)
{
if (leftArr[i] <= rightArr[j])
{
arr[w] = leftArr[i];
i++;
}
else
{
arr[w] = rightArr[j];
j++;
}
w++;
}
while (i < l1)
{
arr[w] = leftArr[i];
i++;
w++;
}
while (j < l2)
{
arr[w] = rightArr[j];
j++;
w++;
}
}*/
/*int findKthLargest(int nums[], int size, int k){
if(size==1){
return nums[0];
}
else if(k>0 && k<=size){
/*mergeSort(nums,0,size-1);
return nums[size-k];*/
/*int med=partition(nums,size);
printf("med is %d",med);*/
/* if(k<med){
return nums[k-1];
}
else if(med>1){
int newNums[size-med+1];
for(int i=med-1;i<size;i++){
newNums[i-med+1]=nums[i];
}
return findKthLargest(newNums, size-med+1, k-med+1);
}
else{
int newNums[size-med];
for(int i=med;i<size;i++){
newNums[i-med]=nums[i];
}
return findKthLargest(newNums, size-med, k-med);
}*/
/*if(k==med){
return nums[med-1];
}
else if(med>k){
int newNums[size/2];
for(int i=0;i<size/2;i++){
newNums[i]=nums[i];
}
return findKthLargest(newNums,size/2,k);
}
else{
if(size%2==1){
int newNums[size/2+1];
for(int i=size/2;i<size;i++){
newNums[i-size/2]=nums[i];
}
return findKthLargest(newNums,size/2+1,k-med+1);
}
else{
int newNums[size/2];
for(int i=size/2;i<size;i++){
newNums[i-size/2]=nums[i];
}
return findKthLargest(newNums,size/2,k-med+1);
}
}
}
}*/
|
the_stack_data/26699021.c
|
/*
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* **************************************************************************
*
* Set of malloc/realloc/calloc/strdup/free replacement macros that
* insert some extra words around each allocation for debugging purposes
* and also attempt to detect invalid uses of the malloc heap through
* various tricks like inserting clobber words at the head and tail of
* the user's area, delayed free() calls, and setting the memory to
* a fixed pattern on allocation and when freed. The allocations also
* can include warrants so that when an area is clobbered, this
* package can report where the allocation took place.
* The macros included are:
* malloc(size)
* realloc(ptr,size)
* calloc(nelem,elsize)
* strdup(s1)
* free(ptr)
* malloc_police() <--- Not a system function
* The above macros match the standard behavior of the system functions.
*
* They should be used through the include file "debug_malloc.h".
*
* IMPORTANT: All source files that call any of these macros
* should include debug_malloc.h. This package will
* not work if the memory isn't allocated and freed
* by the macros in debug_malloc.h. The important issue
* is that any malloc() from debug_malloc.h must be
* freed by the free() in debug_malloc.h.
*
* The macros in debug_malloc.h will override the normal use of
* malloc, realloc, calloc, strdup, and free with the functions below.
*
* These functions include:
* void *debug_malloc(size_t, void*, int);
* void *debug_realloc(void*, size_t, void*, int);
* void *debug_calloc(size_t, size_t, void*, int);
* void debug_free(void *, void*, int);
*
* In addition the function debug_malloc_police() can be called to
* tell you what memory has not been freed.
* void debug_malloc_police(void*, int);
* The function debug_malloc_police() is available through the macro
* malloc_police(). Normally you would want to call this at exit()
* time to find out what memory is still allocated.
*
* The variable malloc_watch determines if the warrants are generated.
* warrants are structures that include the filename and line number
* of the caller who allocated the memory. This structure is stored
* at the tail of the malloc space, which is allocated large enough
* to hold some clobber words at the head and tail, the user's request
* and the warrant record (if malloc_watch is non-zero).
*
* The macro LEFT_OVER_CHAR is what the trailing bytes of an allocation
* are set to (when the allocation is not a multiple of 8) on allocation.
* At free(0 time, these bytes are double checked to make sure they were
* not clobbered. To remove this feature #undef LEFT_OVER_CHAR.
*
* The memory freed will have the FREED_CHAR put into it. To remove this
* feature #undef FREED_CHAR.
*
* The memory allocated (not calloc'd) will have the ALLOC_CHAR put into it
* at the time of allocation. To remove this feature #undef ALLOC_CHAR.
*
* The macro MAX_FREE_DELAY_COUNT controls how many free blocks will
* be kept around before being freed. This creates a delayed affect
* so that free space that gets clobbered just might get detected.
* The free() call will immediately set the user space to the FREED_CHAR,
* leaving the clobber words and warrant in place (making sure they
* haven't been clobbered). Then the free() pointer is added to a
* queue of MAX_FREE_DELAY_COUNT long, and if the queue was full, the
* oldest free()'d memory is actually freed, getting it's entire
* memory length set to the FREED_CHAR.
*
* WARNING: This can significantly slow down an application, depending
* on how many allocations are made. Also the additional memory
* needed for the clobber words and the warrants can be significant
* again, depending on how many allocations are made.
* In addition, the delayed free calls can create situations
* where you might run out of memory prematurely.
*
* **************************************************************************
*/
#ifdef DEBUG
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdarg.h>
#include "hprof.h"
/* ***************************************************************************
* Space normally looks like (clobber Word is 64 bits and aligned to 8 bytes):
*
* -----------------
* malloc/free get->| clobber Word | ---> contains -size requested by user
* -----------------
* User gets --->| user space |
* | |
* | | left_over | ---> left_over bytes will be <= 7
* -----------------
* | clobber Word | ---> contains -size requested by user
* -----------------
* | Warrant | ---> Optional (malloc_watch!=0)
* | | Contains filename and line number
* | | where allocation happened
* | |
* -----------------
***************************************************************************/
/*
* Flag that tells debug_malloc/debug_free/debug_realloc to police
* heap space usage. (This is a dynamic flag that can be turned on/off)
*/
static int malloc_watch = 1;
/* Character to stuff into freed space */
#define FREED_CHAR 'F'
/* Character to stuff into allocated space */
#define ALLOC_CHAR 'A'
/* Character to stuff into left over trailing bytes */
#define LEFT_OVER_CHAR 'Z'
/* Number of 'free' calls that will be delayed until the end */
#define MAX_FREE_DELAY_COUNT 1
#undef MAX_FREE_DELAY_COUNT
/* Maximum name of __FILE_ stored in each malloc'd area */
#define WARRANT_NAME_MAX (32-1) /* 1 less than multiple of 8 is best */
/* Macro to convert a user pointer to the malloc pointer */
#define user2malloc_(uptr) (((char*)(void*)uptr)-sizeof(Word))
/* Macro to convert a macro pointer to the user pointer */
#define malloc2user_(mptr) (((char*)(void*)(mptr))+sizeof(Word))
/* Size of the warrant record (this is dynamic) */
#define warrant_space ( malloc_watch?sizeof(Warrant_Record):0 )
/* Macro to round up a number of bytes to a multiple of sizeof(Word) bytes */
#define round_up_(n) \
((n)==0?0:(sizeof(Word)+(((n)-1)/sizeof(Word))*sizeof(Word)))
/* Macro to calculate the needed malloc bytes from the user's request. */
#define rbytes_(nbytes) \
(size_t)( sizeof(Word) + round_up_(nbytes) + sizeof(Word) + warrant_space )
/* Macro to get the -size stored in space through the malloc pointer */
#define nsize1_(mptr) (((Word*)(void*)(mptr))->nsize1)
#define nsize2_(mptr) (((Word*)(void*)(mptr))->nsize2)
/* Macro to get the -size stored in the tail of the space through */
/* the malloc pointer */
#define tail_nsize1_(mptr) \
nsize1_(((char*)(void*)(mptr))+round_up_(-nsize1_(mptr))+sizeof(Word))
#define tail_nsize2_(mptr) \
nsize2_(((char*)(void*)(mptr))+round_up_(-nsize1_(mptr))+sizeof(Word))
/* Macro to get the -size stored in space through the user pointer */
#define user_nsize1_(uptr) nsize1_(user2malloc_(uptr))
#define user_nsize2_(uptr) nsize2_(user2malloc_(uptr))
/* Macro to get the -size stored in the tail of the space through */
/* the user pointer */
#define user_tail_nsize1_(uptr) tail_nsize1_(user2malloc_(uptr))
#define user_tail_nsize2_(uptr) tail_nsize2_(user2malloc_(uptr))
/* Macro to get the int* of the last 32bit word of user space */
#define last_user_word_(mptr) \
((int*)(((char*)(void*)(mptr))+round_up_(-nsize1_(mptr))))
/* Macros to get at the warrant contents from the malloc pointer */
#define warrant_(mptr) \
(*((Warrant_Record*)(void*)(((char*)(void*)(mptr))+round_up_(-nsize1_(mptr))+sizeof(Word)*2)))
/* This struct is allocated after the tail clobber word if malloc_watch */
/* is true. */
typedef struct {
void *link; /* Next mptr in list */
char name[WARRANT_NAME_MAX + 1]; /* Name of allocator */
int line; /* Line number where allocated */
int id; /* Nth allocation */
} Warrant_Record;
#define warrant_link_(mptr) warrant_(mptr).link
#define warrant_name_(mptr) warrant_(mptr).name
#define warrant_line_(mptr) warrant_(mptr).line
#define warrant_id_(mptr) warrant_(mptr).id
#define MFILE(mptr) (malloc_watch?warrant_name_(mptr):"?")
#define MLINE(mptr) (malloc_watch?warrant_line_(mptr):0)
#define MID(mptr) (malloc_watch?warrant_id_(mptr):0)
/* This should be one machine word and is also the clobber word struct */
typedef struct {
int nsize1;
int nsize2;
} Word; /* Largest basic type , sizeof(double)? */
/* The first malloc pointer for the warrants */
static void *first_warrant_mptr = NULL;
/* Counter of allocations */
static int id_counter = 0;
static int largest_size = 0;
static void * largest_addr = NULL;
static void * smallest_addr = NULL;
/* Used to isolate what the error is */
static char *debug_check;
static void *clobbered_ptr;
/* Minumum macro */
#define minimum(a,b) ((a)<(b)?(a):(b))
/* Message routine */
static void
error_message(const char * format, ...)
{
FILE *error_fp = stderr; /* All debug_malloc.c messages */
va_list ap;
va_start(ap, format);
(void)fprintf(error_fp, "debug_malloc: ");
(void)vfprintf(error_fp, format, ap);
(void)fprintf(error_fp, "\n");
(void)fflush(error_fp);
va_end(ap);
}
/* This function prints out a memory error for the memory function
* 'name' which was called in file 'file' at line number 'line'. The malloc
* pointer with the error is in 'mptr'.
*/
static void
memory_error(void *mptr, const char *name, int mid, const char *mfile, int mline, const char *file, int line)
{
char nice_words[512];
char temp[256];
int len;
void *mptr_walk;
if (name == NULL)
name = "UNKNOWN_NAME";
if (file == NULL)
file = "UNKNOWN_FILE";
md_system_error(temp, (int)sizeof(temp));
(void)strcpy(nice_words, temp);
if ( debug_check!=NULL ) {
(void)md_snprintf(nice_words, sizeof(nice_words),
"%s The %s at %p appears to have been hit.",
temp, debug_check, clobbered_ptr);
}
len = -nsize1_(mptr);
error_message("Error: "
"%s The malloc space #%d is at %p [user size=%d(0x%x)],"
" and was allocated from file \"%s\" at line %d."
" [The debug function %s() detected this error "
"in file \"%s\" at line %d.]",
nice_words, mid, mptr, len, len, mfile, mline,
name, file, line);
/* Print out contents of this allocation */
{
int i;
void *uptr = malloc2user_(mptr);
char *pmess;
pmess = temp;
for(i=0;i<(int)sizeof(temp);i++) {
int ch = ((unsigned char*)uptr)[i];
if ( isprint(ch) ) {
*pmess++ = ch;
} else {
*pmess++ = '\\';
*pmess++ = 'x';
(void)sprintf(pmess,"%02x",ch);
pmess+=2;
}
}
*pmess = 0;
error_message("Error: %p contains user data: %s", uptr, temp);
}
/* Try and print out table */
if (!malloc_watch) {
return;
}
mptr_walk = first_warrant_mptr;
if (mptr_walk != NULL) {
error_message("Active allocations: "
"count=%d, largest_size=%d, address range (%p,%p)",
id_counter, largest_size, smallest_addr, largest_addr);
do {
int size1;
int size2;
char *mfile_walk;
if ( mptr_walk > largest_addr || mptr_walk < smallest_addr ) {
error_message("Terminating list due to pointer corruption");
break;
}
size1 = -nsize1_(mptr_walk);
size2 = -nsize2_(mptr_walk);
mfile_walk = MFILE(mptr_walk);
error_message("#%d: addr=%p size1=%d size2=%d file=\"%.*s\" line=%d",
MID(mptr_walk), mptr_walk, size1, size2,
WARRANT_NAME_MAX, mfile_walk, MLINE(mptr_walk));
if ( size1 != size2 || size1 > largest_size || size1 < 0 ) {
error_message("Terminating list due to size corruption");
break;
}
mptr_walk = warrant_link_(mptr_walk);
} while (mptr_walk != NULL);
}
abort();
}
/* This function sets the clobber word and sets up the warrant for the input
* malloc pointer "mptr".
*/
static void
setup_space_and_issue_warrant(void *mptr, size_t size, const char *file, int line)
{
register int nbytes;
/*LINTED*/
nbytes = (int)size;
if ( nbytes > largest_size || largest_addr == NULL ) largest_size = nbytes;
/*LINTED*/
if ( mptr > largest_addr ) largest_addr = mptr;
/*LINTED*/
if ( mptr < smallest_addr || smallest_addr == NULL ) smallest_addr = mptr;
/* Must be done first: */
nsize1_(mptr) = -nbytes;
nsize2_(mptr) = -nbytes;
tail_nsize1_(mptr) = -nbytes;
tail_nsize2_(mptr) = -nbytes;
#ifdef LEFT_OVER_CHAR
/* Fill in those few extra bytes just before the tail Word structure */
{
register int trailing_extra_bytes;
/* LINTED */
trailing_extra_bytes = (int) (round_up_(nbytes) - nbytes);
if ( trailing_extra_bytes > 0 ) {
register char *p;
register int i;
p = ((char *) mptr) + sizeof(Word) + nbytes;
for (i = 0; i < trailing_extra_bytes; i++)
p[i] = LEFT_OVER_CHAR;
}
}
#endif
/* Fill out warrant */
if (malloc_watch) {
static Warrant_Record zero_warrant;
register void *p1,
*p2;
size_t len;
int start_pos = 0;
warrant_(mptr) = zero_warrant;
p1 = warrant_name_(mptr);
len = strlen(file);
if ( len > WARRANT_NAME_MAX ) {
/*LINTED*/
start_pos = (int)len - WARRANT_NAME_MAX;
}
p2 = ((char*)file) + start_pos;
/*LINTED*/
(void) memcpy(p1, p2, minimum(((int)len), WARRANT_NAME_MAX));
warrant_line_(mptr) = line;
warrant_id_(mptr) = ++id_counter;
warrant_link_(mptr) = first_warrant_mptr;
first_warrant_mptr = mptr;
}
}
/* This function checks the clobber words at the beginning and end of the
* allocated space.
*/
static void
memory_check(void *uptr, int mid, const char *mfile, int mline, const char *file, int line)
{
int neg_nbytes;
int nbytes;
debug_check = "pointer value itself";
clobbered_ptr = uptr;
if (uptr == NULL)
memory_error((void *) NULL, "memory_check", mid, mfile, mline, file, line);
/* Check both Word structures */
debug_check = "first beginning clobber word";
clobbered_ptr = (char*)&user_nsize1_(uptr);
neg_nbytes = user_nsize1_(uptr);
if (neg_nbytes >= 0)
memory_error(user2malloc_(uptr), "memory_check", mid, mfile, mline, file, line);
debug_check = "second beginning clobber word";
clobbered_ptr = (char*)&user_nsize2_(uptr);
if (neg_nbytes != user_nsize2_(uptr))
memory_error(user2malloc_(uptr), "memory_check", mid, mfile, mline, file, line);
debug_check = "first ending clobber word";
clobbered_ptr = (char*)&user_tail_nsize1_(uptr);
if (neg_nbytes != user_tail_nsize1_(uptr))
memory_error(user2malloc_(uptr), "memory_check", mid, mfile, mline, file, line);
debug_check = "second ending clobber word";
clobbered_ptr = (char*)&user_tail_nsize2_(uptr);
if (neg_nbytes != user_tail_nsize2_(uptr))
memory_error(user2malloc_(uptr), "memory_check", mid, mfile, mline, file, line);
/* Get a positive count of bytes */
nbytes = -neg_nbytes;
#ifdef LEFT_OVER_CHAR
{
/* Check those few extra bytes just before the tail Word structure */
register int trailing_extra_bytes;
register int i;
register char *p;
/* LINTED */
trailing_extra_bytes = (int) (round_up_(nbytes) - nbytes);
p = ((char *) (uptr)) + nbytes;
debug_check = "trailing left over area";
for (i = 0; i < trailing_extra_bytes; i++) {
clobbered_ptr = p+1;
if (p[i] != LEFT_OVER_CHAR) {
memory_error(user2malloc_(uptr), "memory_check", mid, mfile, mline, file, line);
}
}
}
#endif
/* Make sure debug_check is cleared */
debug_check = NULL;
}
/* This function looks for the given malloc pointer in the police line up
* and removes it from the warrant list.
* mptr The pointer to the malloc space being removed
*/
static int
remove_warrant(void *mptr)
{
void *mptr1,
*last_mptr1;
/* Free it up from the list */
if (malloc_watch && mptr != NULL) {
int found;
found = 0;
last_mptr1 = NULL;
mptr1 = first_warrant_mptr;
while (mptr1 != NULL) {
if (mptr1 == mptr) {
if (last_mptr1 == NULL)
first_warrant_mptr = warrant_link_(mptr1);
else
warrant_link_(last_mptr1) = warrant_link_(mptr1);
found = 1;
break;
}
last_mptr1 = mptr1;
mptr1 = warrant_link_(mptr1);
}
return found;
}
return 1;
}
static void
actual_free(void *uptr, const char *file, int line)
{
void *mptr;
const char *mfile;
int mline;
int mid;
if ( uptr == NULL )
return;
mptr = user2malloc_(uptr);
memory_check(uptr, (mid=MID(mptr)), (mfile=MFILE(mptr)), (mline=MLINE(mptr)), file, line);
if (malloc_watch && remove_warrant(mptr)==0 )
memory_check(uptr, mid, mfile, mline, file, line);
#ifdef FREED_CHAR
if ( mptr!=NULL ) {
size_t nbytes = -nsize1_(mptr);
/* LINTED */
(void)memset(mptr, FREED_CHAR, rbytes_(nbytes));
}
#endif
free(mptr);
}
#ifdef MAX_FREE_DELAY_COUNT
static void *free_delay[MAX_FREE_DELAY_COUNT];
static int free_delay_pos = 0;
static void
delayed_free(void *uptr, const char* file, int line)
{
void *mptr;
void *olduptr = free_delay[free_delay_pos];
size_t nbytes;
if ( uptr==NULL )
return;
mptr = user2malloc_(uptr);
memory_check(uptr, MID(mptr), MFILE(mptr), MLINE(mptr), file, line);
if ( olduptr!=NULL ) {
actual_free(olduptr, file, line);
}
free_delay[free_delay_pos] = uptr;
free_delay_pos++;
free_delay_pos = free_delay_pos % MAX_FREE_DELAY_COUNT;
nbytes = -user_nsize1_(uptr);
#ifdef FREED_CHAR
(void)memset(uptr, FREED_CHAR, (size_t)nbytes);
#endif
}
static void
delayed_free_all(const char *file, int line)
{
int i;
for ( i=0; i< MAX_FREE_DELAY_COUNT; i++) {
void *olduptr = free_delay[i];
free_delay[i] = NULL;
if ( olduptr!=NULL ) {
actual_free(olduptr, file, line);
}
}
}
#endif
void
debug_free(void *uptr, const char *file, int line)
{
int mid = 0;
if (uptr == NULL)
memory_error((void *) NULL, "debug_free", mid, file, line, file, line);
#ifdef MAX_FREE_DELAY_COUNT
delayed_free(uptr, file, line);
#else
actual_free(uptr, file, line);
#endif
}
/* This function calls malloc(). */
void *
debug_malloc(size_t nbytes, const char *file, int line)
{
void *mptr;
void *uptr;
int mid = id_counter;
/*LINTED*/
if ((int)nbytes <= 0)
memory_error((void *) NULL, "debug_malloc", mid, file, line, file, line);
/* LINTED */
mptr = malloc(rbytes_(nbytes));
if (mptr == NULL)
memory_error((void *) NULL, "debug_malloc", mid, file, line, file, line);
setup_space_and_issue_warrant(mptr, nbytes, file, line);
uptr = malloc2user_(mptr);
#ifdef ALLOC_CHAR
(void)memset(uptr, ALLOC_CHAR, (size_t)nbytes);
#endif
return uptr;
}
void *
debug_realloc(void *uptr, size_t nbytes, const char *file, int line)
{
void *mptr;
void *oldmptr;
void *newuptr;
size_t oldnbytes;
int mid = id_counter;
oldmptr = user2malloc_(uptr);
oldnbytes = 0;
if ((int)nbytes <= 0)
memory_error(oldmptr, "debug_realloc", mid, file, line, file, line);
if (uptr != NULL) {
memory_check(uptr, MID(oldmptr), MFILE(oldmptr), MLINE(oldmptr), file, line);
oldnbytes = -user_nsize1_(uptr);
if ( malloc_watch && remove_warrant(oldmptr)==0 )
memory_check(uptr, MID(oldmptr), MFILE(oldmptr), MLINE(oldmptr), file, line);
}
if (uptr == NULL) {
/* LINTED */
mptr = malloc(rbytes_(nbytes));
} else {
/* LINTED */
mptr = realloc(oldmptr, rbytes_(nbytes));
}
if (mptr == NULL)
memory_error(oldmptr, "debug_realloc", mid, file, line, file, line);
setup_space_and_issue_warrant(mptr, nbytes, file, line);
newuptr = malloc2user_(mptr);
#ifdef ALLOC_CHAR
if (uptr == NULL)
(void)memset(newuptr, ALLOC_CHAR, (size_t)nbytes);
else if ( nbytes > oldnbytes )
(void)memset(((char*)newuptr)+oldnbytes, ALLOC_CHAR, (size_t)nbytes-oldnbytes);
#endif
return newuptr;
}
/* This function calls calloc(). */
void *
debug_calloc(size_t nelem, size_t elsize, const char *file, int line)
{
void *mptr;
size_t nbytes;
int mid = id_counter;
nbytes = nelem*elsize;
/*LINTED*/
if ((int)nbytes <= 0)
memory_error((void *) NULL, "debug_calloc", mid, file, line, file, line);
/* LINTED */
mptr = calloc(rbytes_(nbytes),1);
if (mptr == NULL)
memory_error((void *) NULL, "debug_calloc", mid, file, line, file, line);
setup_space_and_issue_warrant(mptr, nbytes, file, line);
return malloc2user_(mptr);
}
/* This function replaces strdup(). */
char *
debug_strdup(const char *s1, const char *file, int line)
{
void *mptr;
void *uptr;
size_t nbytes;
int mid = id_counter;
if (s1 == NULL)
memory_error((void *) NULL, "debug_strdup", mid, file, line, file, line);
nbytes = strlen(s1)+1;
/*LINTED*/
if ((int)nbytes < 0)
memory_error((void *) NULL, "debug_strdup", mid, file, line, file, line);
/* LINTED */
mptr = malloc(rbytes_(nbytes));
if (mptr == NULL)
memory_error((void *) NULL, "debug_strdup", mid, file, line, file, line);
setup_space_and_issue_warrant(mptr, nbytes, file, line);
uptr = malloc2user_(mptr);
(void)strcpy((char*)uptr, s1);
return (char*)uptr;
}
void
debug_malloc_verify(const char *file, int line)
{
void *mptr;
#ifdef MAX_FREE_DELAY_COUNT
delayed_free_all(file,line);
#endif
if (!malloc_watch) {
return;
}
mptr = first_warrant_mptr;
if (mptr != NULL) {
/* Check all this memory first */
do {
memory_check(malloc2user_(mptr), MID(mptr), MFILE(mptr), MLINE(mptr), file, line);
mptr = warrant_link_(mptr);
} while (mptr != NULL);
}
}
/* Report outstanding space warrants to console. */
void
debug_malloc_police(const char *file, int line)
{
void *mptr;
#ifdef MAX_FREE_DELAY_COUNT
delayed_free_all(file,line);
#endif
if (!malloc_watch) {
return;
}
mptr = first_warrant_mptr;
if (mptr != NULL) {
debug_malloc_verify(file, line);
/* Now issue warrants */
mptr = first_warrant_mptr;
do {
error_message("Outstanding space warrant: %p (%d bytes) allocated by %s at line %d, allocation #%d",
mptr, -nsize1_(mptr), warrant_name_(mptr),
warrant_line_(mptr), warrant_id_(mptr));
mptr = warrant_link_(mptr);
} while (mptr != NULL);
}
}
#else
void
debug_malloc_verify(const char *file, int line)
{
file = file;
line = line;
}
void
debug_malloc_police(const char *file, int line)
{
file = file;
line = line;
}
#endif
|
the_stack_data/573028.c
|
/*******************************************************************************
*
* Program: Replace char
*
* Description: Example of how to replace one character with another in a string
* with C.
*
* YouTube Lesson: https://www.youtube.com/watch?v=0OjJc-Qyd4E
*
* Author: Kevin Browne @ https://portfoliocourses.com
*
*******************************************************************************/
#include <stdio.h>
#include <string.h>
void replace_char(char *string, char replace, char new);
int main(void)
{
// test the function with some example arguments
char s1[] = "This is my string!";
replace_char(s1, 'i', 'Z');
printf("s1: %s\n", s1);
char s2[] = "aaaaBBBBaBBBBaaaBBBBcdea";
replace_char(s2, 'a', 'Z');
printf("s2: %s\n", s2);
return 0;
}
// replaces occurrences of replace in string with new
void replace_char(char *string, char replace, char new)
{
int slen = strlen(string);
// check each index for the replace char, replace with new if necessary
for (int i = 0; i < slen; i++)
if (string[i] == replace)
string[i] = new;
}
|
the_stack_data/36076509.c
|
#include <stdio.h>
#include <stdlib.h> // needed for exit()
#include <stdbool.h> // needed for is1butf8() etc.
/***************************************************************
* int decode_utf8(unsigned int)
* This function takes an unsigned int representing a UTF-8
* character and converts it into the corresponding Unicode
* code point in the form U+(hex value).
* For example, 0xD096, which corresponds to Russian capital
* letter Ж (ZH) is converted into Unicode code point U+0416.
* Written by Thomas Hedden April 2021, modified Jan 2022.
* Pre: an unsigned int representing a UTF-8
* character.
* Post: an unsigned int representing the
* Unicode code point corrsponding
* to that UTF-8 character
* Functions used: standard library functions, is1butf8(), etc.
* Includes: stdio.h, stdlib.h, stdbool.h
* Used in: utod.c, xtoU.c */
/***************************************************************/
/* THINGS THAT NEED ATTENTION */
/***************************************************************/
/* #include's and #define's should be bracketed by INFDEFINE's
*/
/***************************************************************
* FUNCTION DECLARATIONS *
***************************************************************/
bool is1butf8(unsigned int); // returns true if 1-byte UTF-8
bool is2butf8(unsigned int); // returns true if 2-byte UTF-8
bool is3butf8(unsigned int); // returns true if 3-byte UTF-8
bool is4butf8(unsigned int); // returns true if 4-byte UTF-8
int decode_utf8(unsigned int u) {
// this program requires that the size of an int be 4 bytes
if( sizeof(int) != 4 ) {
fprintf(stderr, "sizeof(int) is not 4!\n");
exit(EXIT_FAILURE);
}
/************************************************************
* VARIABLE DECLARATIONS *
************************************************************/
int ucp;
// Here byte 1 is the leftmost (highest-order) byte in the int.
unsigned int byte1 = (u & 0xFF000000) >> 24; // byte 1 of passed unsigned int
unsigned int byte2 = (u & 0x00FF0000) >> 16; // byte 2 of passed unsigned int
unsigned int byte3 = (u & 0x0000FF00) >> 8; // byte 3 of passed unsigned int
unsigned int byte4 = u & 0x000000FF; // byte 4 of passed unsigned int
// NOTE: Korpela calls the highest-order byte in all UTF-8 characters octet 1.
// So, he refers to a 1-byte UTF-8 character as "octet 1"; I call it "byte 4".
// In a 2-byte UTF-8 character, Korpela refers to these two bytes as octets
// 1 and 2. I would call them bytes 3 and 4.
if(is1butf8(u)) {
/* from Korpela p. 298
Hex value of UTF-8 character
Unicode code point Octet 1 Octet 2 Octet 3 Octet 4
00000000 0xxxxxxx 0xxxxxxx */
ucp = byte4;
return(ucp);
}
if(is2butf8(u)) {
/* from Korpela p. 298
Hex value of UTF-8 character
Unicode code point Octet 1 Octet 2 Octet 3 Octet 4
00000yyy yyxxxxxx 110yyyyy 10xxxxxx */
// byte 1 of ucp is ((byte3 & 00011100) >> 2)
// byte 2 of ucp is ((byte3 & 00000011) << 6) PLUS (byte4 & 00111111)
// ucp = (((byte3 & 00011100) >> 2) << 8) + (((byte3 & 00000011) << 6) + (byte4 & 00111111));
ucp = (((byte3 & 0x1C) >> 2) << 8) + (((byte3 & 0x03) << 6) + (byte4 & 0x3F));
return(ucp);
}
if(is3butf8(u)) {
/* from Korpela p. 298
Hex value of UTF-8 character
Unicode code point Octet 1 Octet 2 Octet 3 Octet 4
zzzzyyyy yyxxxxxx 1110zzzz 10yyyyyy 10xxxxxx */
// byte 1 of ucp is ((byte2 & 00001111) << 4) PLUS ((byte3 & 00111100) >> 2)
// byte 2 of ucp is ((byte3 & 00000011) << 6) PLUS (byte4 & 00111111)
// ucp = ( ( (((byte2 & 00001111) << 4) + ((byte3 & 00111100) >> 2)) << 8 ) +
// (((byte3 & 00000011) << 6) + (byte4 & 00111111)) );
ucp = ( ( (((byte2 & 0x0F) << 4) + ((byte3 & 0x3C) >> 2)) << 8 ) +
(((byte3 & 0x03) << 6) + (byte4 & 0x3F)) );
return(ucp);
}
if(is4butf8(u)) {
/* from Korpela p. 298
Hex value of UTF-8 character
Unicode code point Octet 1 Octet 2 Octet 3 Octet 4
000uuuww zzzzyyyy yyxxxxxx 11110uuu l0wwzzzz l0yyyyyy 10xxxxxx */
// byte 1 of ucp is ((byte1 & 00000111) << 3) PLUS (((byte2) & 00110000) >> 4)
// byte 2 of ucp is ((byte2 & 00001111) << 4) PLUS (((byte3) & 00111100) >> 2)
// byte 3 of ucp is ((byte3 & 00000011) << 6) PLUS ((byte4) & 00111111)
// ucp = (((((byte1 & 00000111) << 3) + ((byte2 & 00110000) >> 4) ) << 16) +
// ((((byte2 & 00001111) << 4) + ((byte3 & 00111100) >> 2) ) << 8) +
// ((byte3 & 00000011) << 6) + (byte4 & 00111111) );
ucp = (((((byte1 & 0x07) << 3) + ((byte2 & 0x30) >> 4) ) << 16) +
((((byte2 & 0x0F) << 4) + ((byte3 & 0x3C) >> 2) ) << 8) +
((byte3 & 0x03) << 6) + (byte4 & 0x3F) );
return(ucp);
}
fprintf(stderr, "0x%X is not a UTF-8 character\n", u);
return(0xFFFD);
}
|
the_stack_data/112242.c
|
// general protection fault in __hrtimer_run_queues
// https://syzkaller.appspot.com/bug?id=576e2155c70f83dfd48ac57ab2fd754649ffeb89
// status:invalid
// autogenerated by syzkaller (http://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <errno.h>
#include <linux/futex.h>
#include <linux/net.h>
#include <netinet/in.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <signal.h>
#include <stdarg.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
__attribute__((noreturn)) static void doexit(int status)
{
volatile unsigned i;
syscall(__NR_exit_group, status);
for (i = 0;; i++) {
}
}
#include <errno.h>
#include <setjmp.h>
#include <signal.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <sys/stat.h>
const int kFailStatus = 67;
const int kRetryStatus = 69;
static void fail(const char* msg, ...)
{
int e = errno;
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
fprintf(stderr, " (errno %d)\n", e);
doexit((e == ENOMEM || e == EAGAIN) ? kRetryStatus : kFailStatus);
}
static void exitf(const char* msg, ...)
{
int e = errno;
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
fprintf(stderr, " (errno %d)\n", e);
doexit(kRetryStatus);
}
static __thread int skip_segv;
static __thread jmp_buf segv_env;
static void segv_handler(int sig, siginfo_t* info, void* uctx)
{
uintptr_t addr = (uintptr_t)info->si_addr;
const uintptr_t prog_start = 1 << 20;
const uintptr_t prog_end = 100 << 20;
if (__atomic_load_n(&skip_segv, __ATOMIC_RELAXED) &&
(addr < prog_start || addr > prog_end)) {
_longjmp(segv_env, 1);
}
doexit(sig);
}
static void install_segv_handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_IGN;
syscall(SYS_rt_sigaction, 0x20, &sa, NULL, 8);
syscall(SYS_rt_sigaction, 0x21, &sa, NULL, 8);
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = segv_handler;
sa.sa_flags = SA_NODEFER | SA_SIGINFO;
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGBUS, &sa, NULL);
}
#define NONFAILING(...) \
{ \
__atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \
if (_setjmp(segv_env) == 0) { \
__VA_ARGS__; \
} \
__atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \
}
static uint64_t current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void use_temporary_dir()
{
char tmpdir_template[] = "./syzkaller.XXXXXX";
char* tmpdir = mkdtemp(tmpdir_template);
if (!tmpdir)
fail("failed to mkdtemp");
if (chmod(tmpdir, 0777))
fail("failed to chmod");
if (chdir(tmpdir))
fail("failed to chdir");
}
static void loop();
static void sandbox_common()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
setsid();
struct rlimit rlim;
rlim.rlim_cur = rlim.rlim_max = 160 << 20;
setrlimit(RLIMIT_AS, &rlim);
rlim.rlim_cur = rlim.rlim_max = 8 << 20;
setrlimit(RLIMIT_MEMLOCK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 136 << 20;
setrlimit(RLIMIT_FSIZE, &rlim);
rlim.rlim_cur = rlim.rlim_max = 1 << 20;
setrlimit(RLIMIT_STACK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 0;
setrlimit(RLIMIT_CORE, &rlim);
if (unshare(CLONE_NEWNS)) {
}
if (unshare(CLONE_NEWIPC)) {
}
if (unshare(0x02000000)) {
}
if (unshare(CLONE_NEWUTS)) {
}
if (unshare(CLONE_SYSVSEM)) {
}
}
int wait_for_loop(int pid)
{
if (pid < 0)
fail("sandbox fork failed");
int status = 0;
while (waitpid(-1, &status, __WALL) != pid) {
}
return WEXITSTATUS(status);
}
static int do_sandbox_none(void)
{
if (unshare(CLONE_NEWPID)) {
}
int pid = fork();
if (pid != 0)
return wait_for_loop(pid);
sandbox_common();
if (unshare(CLONE_NEWNET)) {
}
loop();
doexit(1);
}
#define XT_TABLE_SIZE 1536
#define XT_MAX_ENTRIES 10
struct xt_counters {
uint64_t pcnt, bcnt;
};
struct ipt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_entries;
unsigned int size;
};
struct ipt_get_entries {
char name[32];
unsigned int size;
void* entrytable[XT_TABLE_SIZE / sizeof(void*)];
};
struct ipt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_counters;
struct xt_counters* counters;
char entrytable[XT_TABLE_SIZE];
};
struct ipt_table_desc {
const char* name;
struct ipt_getinfo info;
struct ipt_replace replace;
};
static struct ipt_table_desc ipv4_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
static struct ipt_table_desc ipv6_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
#define IPT_BASE_CTL 64
#define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
#define IPT_SO_GET_INFO (IPT_BASE_CTL)
#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
struct arpt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_entries;
unsigned int size;
};
struct arpt_get_entries {
char name[32];
unsigned int size;
void* entrytable[XT_TABLE_SIZE / sizeof(void*)];
};
struct arpt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_counters;
struct xt_counters* counters;
char entrytable[XT_TABLE_SIZE];
};
struct arpt_table_desc {
const char* name;
struct arpt_getinfo info;
struct arpt_replace replace;
};
static struct arpt_table_desc arpt_tables[] = {
{.name = "filter"},
};
#define ARPT_BASE_CTL 96
#define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL)
#define ARPT_SO_GET_INFO (ARPT_BASE_CTL)
#define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1)
static void checkpoint_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
struct ipt_get_entries entries;
socklen_t optlen;
int fd, i;
fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(%d, SOCK_STREAM, IPPROTO_TCP)", family);
}
for (i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
optlen = sizeof(table->info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(IPT_SO_GET_INFO)");
}
if (table->info.size > sizeof(table->replace.entrytable))
fail("table size is too large: %u", table->info.size);
if (table->info.num_entries > XT_MAX_ENTRIES)
fail("too many counters: %u", table->info.num_entries);
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(IPT_SO_GET_ENTRIES)");
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
struct xt_counters counters[XT_MAX_ENTRIES];
struct ipt_get_entries entries;
struct ipt_getinfo info;
socklen_t optlen;
int fd, i;
fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(%d, SOCK_STREAM, IPPROTO_TCP)", family);
}
for (i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
if (table->info.valid_hooks == 0)
continue;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
optlen = sizeof(info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &info, &optlen))
fail("getsockopt(IPT_SO_GET_INFO)");
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(IPT_SO_GET_ENTRIES)");
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
}
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, level, IPT_SO_SET_REPLACE, &table->replace, optlen))
fail("setsockopt(IPT_SO_SET_REPLACE)");
}
close(fd);
}
static void checkpoint_arptables(void)
{
struct arpt_get_entries entries;
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
}
for (i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
optlen = sizeof(table->info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(ARPT_SO_GET_INFO)");
}
if (table->info.size > sizeof(table->replace.entrytable))
fail("table size is too large: %u", table->info.size);
if (table->info.num_entries > XT_MAX_ENTRIES)
fail("too many counters: %u", table->info.num_entries);
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(ARPT_SO_GET_ENTRIES)");
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_arptables()
{
struct xt_counters counters[XT_MAX_ENTRIES];
struct arpt_get_entries entries;
struct arpt_getinfo info;
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
}
for (i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
if (table->info.valid_hooks == 0)
continue;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
optlen = sizeof(info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &info, &optlen))
fail("getsockopt(ARPT_SO_GET_INFO)");
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(ARPT_SO_GET_ENTRIES)");
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
}
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, SOL_IP, ARPT_SO_SET_REPLACE, &table->replace, optlen))
fail("setsockopt(ARPT_SO_SET_REPLACE)");
}
close(fd);
}
#include <linux/if.h>
#include <linux/netfilter_bridge/ebtables.h>
struct ebt_table_desc {
const char* name;
struct ebt_replace replace;
char entrytable[XT_TABLE_SIZE];
};
static struct ebt_table_desc ebt_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "broute"},
};
static void checkpoint_ebtables(void)
{
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
}
for (i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
strcpy(table->replace.name, table->name);
optlen = sizeof(table->replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_INFO, &table->replace,
&optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(EBT_SO_GET_INIT_INFO)");
}
if (table->replace.entries_size > sizeof(table->entrytable))
fail("table size is too large: %u", table->replace.entries_size);
table->replace.num_counters = 0;
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_ENTRIES, &table->replace,
&optlen))
fail("getsockopt(EBT_SO_GET_INIT_ENTRIES)");
}
close(fd);
}
static void reset_ebtables()
{
struct ebt_replace replace;
char entrytable[XT_TABLE_SIZE];
socklen_t optlen;
unsigned i, j, h;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
}
for (i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
if (table->replace.valid_hooks == 0)
continue;
memset(&replace, 0, sizeof(replace));
strcpy(replace.name, table->name);
optlen = sizeof(replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INFO, &replace, &optlen))
fail("getsockopt(EBT_SO_GET_INFO)");
replace.num_counters = 0;
table->replace.entries = 0;
for (h = 0; h < NF_BR_NUMHOOKS; h++)
table->replace.hook_entry[h] = 0;
if (memcmp(&table->replace, &replace, sizeof(table->replace)) == 0) {
memset(&entrytable, 0, sizeof(entrytable));
replace.entries = entrytable;
optlen = sizeof(replace) + replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_ENTRIES, &replace, &optlen))
fail("getsockopt(EBT_SO_GET_ENTRIES)");
if (memcmp(table->entrytable, entrytable, replace.entries_size) == 0)
continue;
}
for (j = 0, h = 0; h < NF_BR_NUMHOOKS; h++) {
if (table->replace.valid_hooks & (1 << h)) {
table->replace.hook_entry[h] =
(struct ebt_entries*)table->entrytable + j;
j++;
}
}
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (setsockopt(fd, SOL_IP, EBT_SO_SET_ENTRIES, &table->replace, optlen))
fail("setsockopt(EBT_SO_SET_ENTRIES)");
}
close(fd);
}
static void checkpoint_net_namespace(void)
{
checkpoint_ebtables();
checkpoint_arptables();
checkpoint_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
checkpoint_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void reset_net_namespace(void)
{
reset_ebtables();
reset_arptables();
reset_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
reset_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void remove_dir(const char* dir)
{
DIR* dp;
struct dirent* ep;
int iter = 0;
retry:
while (umount2(dir, MNT_DETACH) == 0) {
}
dp = opendir(dir);
if (dp == NULL) {
if (errno == EMFILE) {
exitf("opendir(%s) failed due to NOFILE, exiting", dir);
}
exitf("opendir(%s) failed", dir);
}
while ((ep = readdir(dp))) {
if (strcmp(ep->d_name, ".") == 0 || strcmp(ep->d_name, "..") == 0)
continue;
char filename[FILENAME_MAX];
snprintf(filename, sizeof(filename), "%s/%s", dir, ep->d_name);
struct stat st;
if (lstat(filename, &st))
exitf("lstat(%s) failed", filename);
if (S_ISDIR(st.st_mode)) {
remove_dir(filename);
continue;
}
int i;
for (i = 0;; i++) {
if (unlink(filename) == 0)
break;
if (errno == EROFS) {
break;
}
if (errno != EBUSY || i > 100)
exitf("unlink(%s) failed", filename);
if (umount2(filename, MNT_DETACH))
exitf("umount(%s) failed", filename);
}
}
closedir(dp);
int i;
for (i = 0;; i++) {
if (rmdir(dir) == 0)
break;
if (i < 100) {
if (errno == EROFS) {
break;
}
if (errno == EBUSY) {
if (umount2(dir, MNT_DETACH))
exitf("umount(%s) failed", dir);
continue;
}
if (errno == ENOTEMPTY) {
if (iter < 100) {
iter++;
goto retry;
}
}
}
exitf("rmdir(%s) failed", dir);
}
}
static void execute_one();
extern unsigned long long procid;
static void loop()
{
checkpoint_net_namespace();
int iter;
for (iter = 0;; iter++) {
char cwdbuf[32];
sprintf(cwdbuf, "./%d", iter);
if (mkdir(cwdbuf, 0777))
fail("failed to mkdir");
int pid = fork();
if (pid < 0)
fail("clone failed");
if (pid == 0) {
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
if (chdir(cwdbuf))
fail("failed to chdir");
execute_one();
doexit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
int res = waitpid(-1, &status, __WALL | WNOHANG);
if (res == pid) {
break;
}
usleep(1000);
if (current_time_ms() - start < 3 * 1000)
continue;
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
while (waitpid(-1, &status, __WALL) != pid) {
}
break;
}
remove_dir(cwdbuf);
reset_net_namespace();
}
}
struct thread_t {
int created, running, call;
pthread_t th;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static int collide;
static void* thr(void* arg)
{
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
while (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE))
syscall(SYS_futex, &th->running, FUTEX_WAIT, 0, 0);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
__atomic_store_n(&th->running, 0, __ATOMIC_RELEASE);
syscall(SYS_futex, &th->running, FUTEX_WAKE);
}
return 0;
}
static void execute(int num_calls)
{
int call, thread;
running = 0;
for (call = 0; call < num_calls; call++) {
for (thread = 0; thread < sizeof(threads) / sizeof(threads[0]); thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
pthread_create(&th->th, &attr, thr, th);
}
if (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE)) {
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
__atomic_store_n(&th->running, 1, __ATOMIC_RELEASE);
syscall(SYS_futex, &th->running, FUTEX_WAKE);
if (collide && call % 2)
break;
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 20 * 1000 * 1000;
syscall(SYS_futex, &th->running, FUTEX_WAIT, 1, &ts);
if (__atomic_load_n(&running, __ATOMIC_RELAXED))
usleep((call == num_calls - 1) ? 10000 : 1000);
break;
}
}
}
}
#ifndef __NR_bpf
#define __NR_bpf 321
#endif
uint64_t r[1] = {0xffffffffffffffff};
void execute_call(int call)
{
long res;
switch (call) {
case 0:
syscall(__NR_socketpair, 0, 0, 0, 0x20000140);
break;
case 1:
syscall(__NR_socket, 0xa, 1, 0);
break;
case 2:
NONFAILING(*(uint32_t*)0x20000280 = 0xf);
NONFAILING(*(uint32_t*)0x20000284 = 4);
NONFAILING(*(uint32_t*)0x20000288 = 4);
NONFAILING(*(uint32_t*)0x2000028c = 0x70);
NONFAILING(*(uint32_t*)0x20000290 = 0);
NONFAILING(*(uint32_t*)0x20000294 = -1);
NONFAILING(*(uint32_t*)0x20000298 = 0);
NONFAILING(*(uint8_t*)0x2000029c = 0);
NONFAILING(*(uint8_t*)0x2000029d = 0);
NONFAILING(*(uint8_t*)0x2000029e = 0);
NONFAILING(*(uint8_t*)0x2000029f = 0);
NONFAILING(*(uint8_t*)0x200002a0 = 0);
NONFAILING(*(uint8_t*)0x200002a1 = 0);
NONFAILING(*(uint8_t*)0x200002a2 = 0);
NONFAILING(*(uint8_t*)0x200002a3 = 0);
NONFAILING(*(uint8_t*)0x200002a4 = 0);
NONFAILING(*(uint8_t*)0x200002a5 = 0);
NONFAILING(*(uint8_t*)0x200002a6 = 0);
NONFAILING(*(uint8_t*)0x200002a7 = 0);
NONFAILING(*(uint8_t*)0x200002a8 = 0);
NONFAILING(*(uint8_t*)0x200002a9 = 0);
NONFAILING(*(uint8_t*)0x200002aa = 0);
NONFAILING(*(uint8_t*)0x200002ab = 0);
res = syscall(__NR_bpf, 0, 0x20000280, 0x2c);
if (res != -1)
r[0] = res;
break;
case 3:
NONFAILING(*(uint32_t*)0x20000180 = r[0]);
NONFAILING(*(uint64_t*)0x20000188 = 0x20000000);
NONFAILING(*(uint64_t*)0x20000190 = 0x20000140);
NONFAILING(*(uint64_t*)0x20000198 = 0);
syscall(__NR_bpf, 2, 0x20000180, 0x20);
break;
case 4:
NONFAILING(*(uint32_t*)0x20000180 = r[0]);
NONFAILING(*(uint64_t*)0x20000188 = 0x20000000);
NONFAILING(*(uint64_t*)0x20000190 = 0x20000140);
NONFAILING(*(uint64_t*)0x20000198 = 1);
syscall(__NR_bpf, 2, 0x20000180, 0x20);
break;
}
}
void execute_one()
{
execute(5);
collide = 1;
execute(5);
}
int main()
{
syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0);
install_segv_handler();
char* cwd = get_current_dir_name();
for (;;) {
if (chdir(cwd))
fail("failed to chdir");
use_temporary_dir();
do_sandbox_none();
}
}
|
the_stack_data/135504.c
|
// SYZFAIL: unknown binary format
// https://syzkaller.appspot.com/bug?id=cbd8b92538332fdc5281
// status:0
// autogenerated by syzkaller (https://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <netinet/in.h>
#include <pthread.h>
#include <sched.h>
#include <setjmp.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <linux/capability.h>
#include <linux/futex.h>
#include <linux/genetlink.h>
#include <linux/if_addr.h>
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/if_tun.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/neighbour.h>
#include <linux/net.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/tcp.h>
#include <linux/veth.h>
static unsigned long long procid;
static __thread int skip_segv;
static __thread jmp_buf segv_env;
static void segv_handler(int sig, siginfo_t* info, void* ctx)
{
uintptr_t addr = (uintptr_t)info->si_addr;
const uintptr_t prog_start = 1 << 20;
const uintptr_t prog_end = 100 << 20;
int skip = __atomic_load_n(&skip_segv, __ATOMIC_RELAXED) != 0;
int valid = addr < prog_start || addr > prog_end;
if (skip && valid) {
_longjmp(segv_env, 1);
}
exit(sig);
}
static void install_segv_handler(void)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_IGN;
syscall(SYS_rt_sigaction, 0x20, &sa, NULL, 8);
syscall(SYS_rt_sigaction, 0x21, &sa, NULL, 8);
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = segv_handler;
sa.sa_flags = SA_NODEFER | SA_SIGINFO;
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGBUS, &sa, NULL);
}
#define NONFAILING(...) \
({ \
int ok = 1; \
__atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \
if (_setjmp(segv_env) == 0) { \
__VA_ARGS__; \
} else \
ok = 0; \
__atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \
ok; \
})
static void sleep_ms(uint64_t ms)
{
usleep(ms * 1000);
}
static uint64_t current_time_ms(void)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
exit(1);
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void use_temporary_dir(void)
{
char tmpdir_template[] = "./syzkaller.XXXXXX";
char* tmpdir = mkdtemp(tmpdir_template);
if (!tmpdir)
exit(1);
if (chmod(tmpdir, 0777))
exit(1);
if (chdir(tmpdir))
exit(1);
}
static void thread_start(void* (*fn)(void*), void* arg)
{
pthread_t th;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
int i = 0;
for (; i < 100; i++) {
if (pthread_create(&th, &attr, fn, arg) == 0) {
pthread_attr_destroy(&attr);
return;
}
if (errno == EAGAIN) {
usleep(50);
continue;
}
break;
}
exit(1);
}
typedef struct {
int state;
} event_t;
static void event_init(event_t* ev)
{
ev->state = 0;
}
static void event_reset(event_t* ev)
{
ev->state = 0;
}
static void event_set(event_t* ev)
{
if (ev->state)
exit(1);
__atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
}
static void event_wait(event_t* ev)
{
while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
}
static int event_isset(event_t* ev)
{
return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}
static int event_timedwait(event_t* ev, uint64_t timeout)
{
uint64_t start = current_time_ms();
uint64_t now = start;
for (;;) {
uint64_t remain = timeout - (now - start);
struct timespec ts;
ts.tv_sec = remain / 1000;
ts.tv_nsec = (remain % 1000) * 1000 * 1000;
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
return 1;
now = current_time_ms();
if (now - start > timeout)
return 0;
}
}
static bool write_file(const char* file, const char* what, ...)
{
char buf[1024];
va_list args;
va_start(args, what);
vsnprintf(buf, sizeof(buf), what, args);
va_end(args);
buf[sizeof(buf) - 1] = 0;
int len = strlen(buf);
int fd = open(file, O_WRONLY | O_CLOEXEC);
if (fd == -1)
return false;
if (write(fd, buf, len) != len) {
int err = errno;
close(fd);
errno = err;
return false;
}
close(fd);
return true;
}
struct nlmsg {
char* pos;
int nesting;
struct nlattr* nested[8];
char buf[4096];
};
static void netlink_init(struct nlmsg* nlmsg, int typ, int flags,
const void* data, int size)
{
memset(nlmsg, 0, sizeof(*nlmsg));
struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf;
hdr->nlmsg_type = typ;
hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
memcpy(hdr + 1, data, size);
nlmsg->pos = (char*)(hdr + 1) + NLMSG_ALIGN(size);
}
static void netlink_attr(struct nlmsg* nlmsg, int typ, const void* data,
int size)
{
struct nlattr* attr = (struct nlattr*)nlmsg->pos;
attr->nla_len = sizeof(*attr) + size;
attr->nla_type = typ;
if (size > 0)
memcpy(attr + 1, data, size);
nlmsg->pos += NLMSG_ALIGN(attr->nla_len);
}
static void netlink_nest(struct nlmsg* nlmsg, int typ)
{
struct nlattr* attr = (struct nlattr*)nlmsg->pos;
attr->nla_type = typ;
nlmsg->pos += sizeof(*attr);
nlmsg->nested[nlmsg->nesting++] = attr;
}
static void netlink_done(struct nlmsg* nlmsg)
{
struct nlattr* attr = nlmsg->nested[--nlmsg->nesting];
attr->nla_len = nlmsg->pos - (char*)attr;
}
static int netlink_send_ext(struct nlmsg* nlmsg, int sock, uint16_t reply_type,
int* reply_len, bool dofail)
{
if (nlmsg->pos > nlmsg->buf + sizeof(nlmsg->buf) || nlmsg->nesting)
exit(1);
struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf;
hdr->nlmsg_len = nlmsg->pos - nlmsg->buf;
struct sockaddr_nl addr;
memset(&addr, 0, sizeof(addr));
addr.nl_family = AF_NETLINK;
ssize_t n = sendto(sock, nlmsg->buf, hdr->nlmsg_len, 0,
(struct sockaddr*)&addr, sizeof(addr));
if (n != (ssize_t)hdr->nlmsg_len) {
if (dofail)
exit(1);
return -1;
}
n = recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0);
if (reply_len)
*reply_len = 0;
if (n < 0) {
if (dofail)
exit(1);
return -1;
}
if (n < (ssize_t)sizeof(struct nlmsghdr)) {
errno = EINVAL;
if (dofail)
exit(1);
return -1;
}
if (hdr->nlmsg_type == NLMSG_DONE)
return 0;
if (reply_len && hdr->nlmsg_type == reply_type) {
*reply_len = n;
return 0;
}
if (n < (ssize_t)(sizeof(struct nlmsghdr) + sizeof(struct nlmsgerr))) {
errno = EINVAL;
if (dofail)
exit(1);
return -1;
}
if (hdr->nlmsg_type != NLMSG_ERROR) {
errno = EINVAL;
if (dofail)
exit(1);
return -1;
}
errno = -((struct nlmsgerr*)(hdr + 1))->error;
return -errno;
}
static int netlink_send(struct nlmsg* nlmsg, int sock)
{
return netlink_send_ext(nlmsg, sock, 0, NULL, true);
}
static int netlink_query_family_id(struct nlmsg* nlmsg, int sock,
const char* family_name, bool dofail)
{
struct genlmsghdr genlhdr;
memset(&genlhdr, 0, sizeof(genlhdr));
genlhdr.cmd = CTRL_CMD_GETFAMILY;
netlink_init(nlmsg, GENL_ID_CTRL, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(nlmsg, CTRL_ATTR_FAMILY_NAME, family_name,
strnlen(family_name, GENL_NAMSIZ - 1) + 1);
int n = 0;
int err = netlink_send_ext(nlmsg, sock, GENL_ID_CTRL, &n, dofail);
if (err < 0) {
return -1;
}
uint16_t id = 0;
struct nlattr* attr = (struct nlattr*)(nlmsg->buf + NLMSG_HDRLEN +
NLMSG_ALIGN(sizeof(genlhdr)));
for (; (char*)attr < nlmsg->buf + n;
attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) {
if (attr->nla_type == CTRL_ATTR_FAMILY_ID) {
id = *(uint16_t*)(attr + 1);
break;
}
}
if (!id) {
errno = EINVAL;
return -1;
}
recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0);
return id;
}
static int netlink_next_msg(struct nlmsg* nlmsg, unsigned int offset,
unsigned int total_len)
{
struct nlmsghdr* hdr = (struct nlmsghdr*)(nlmsg->buf + offset);
if (offset == total_len || offset + hdr->nlmsg_len > total_len)
return -1;
return hdr->nlmsg_len;
}
static void netlink_add_device_impl(struct nlmsg* nlmsg, const char* type,
const char* name)
{
struct ifinfomsg hdr;
memset(&hdr, 0, sizeof(hdr));
netlink_init(nlmsg, RTM_NEWLINK, NLM_F_EXCL | NLM_F_CREATE, &hdr,
sizeof(hdr));
if (name)
netlink_attr(nlmsg, IFLA_IFNAME, name, strlen(name));
netlink_nest(nlmsg, IFLA_LINKINFO);
netlink_attr(nlmsg, IFLA_INFO_KIND, type, strlen(type));
}
static void netlink_add_device(struct nlmsg* nlmsg, int sock, const char* type,
const char* name)
{
netlink_add_device_impl(nlmsg, type, name);
netlink_done(nlmsg);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_veth(struct nlmsg* nlmsg, int sock, const char* name,
const char* peer)
{
netlink_add_device_impl(nlmsg, "veth", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
netlink_nest(nlmsg, VETH_INFO_PEER);
nlmsg->pos += sizeof(struct ifinfomsg);
netlink_attr(nlmsg, IFLA_IFNAME, peer, strlen(peer));
netlink_done(nlmsg);
netlink_done(nlmsg);
netlink_done(nlmsg);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_hsr(struct nlmsg* nlmsg, int sock, const char* name,
const char* slave1, const char* slave2)
{
netlink_add_device_impl(nlmsg, "hsr", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
int ifindex1 = if_nametoindex(slave1);
netlink_attr(nlmsg, IFLA_HSR_SLAVE1, &ifindex1, sizeof(ifindex1));
int ifindex2 = if_nametoindex(slave2);
netlink_attr(nlmsg, IFLA_HSR_SLAVE2, &ifindex2, sizeof(ifindex2));
netlink_done(nlmsg);
netlink_done(nlmsg);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_linked(struct nlmsg* nlmsg, int sock, const char* type,
const char* name, const char* link)
{
netlink_add_device_impl(nlmsg, type, name);
netlink_done(nlmsg);
int ifindex = if_nametoindex(link);
netlink_attr(nlmsg, IFLA_LINK, &ifindex, sizeof(ifindex));
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_vlan(struct nlmsg* nlmsg, int sock, const char* name,
const char* link, uint16_t id, uint16_t proto)
{
netlink_add_device_impl(nlmsg, "vlan", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
netlink_attr(nlmsg, IFLA_VLAN_ID, &id, sizeof(id));
netlink_attr(nlmsg, IFLA_VLAN_PROTOCOL, &proto, sizeof(proto));
netlink_done(nlmsg);
netlink_done(nlmsg);
int ifindex = if_nametoindex(link);
netlink_attr(nlmsg, IFLA_LINK, &ifindex, sizeof(ifindex));
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_macvlan(struct nlmsg* nlmsg, int sock, const char* name,
const char* link)
{
netlink_add_device_impl(nlmsg, "macvlan", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
uint32_t mode = MACVLAN_MODE_BRIDGE;
netlink_attr(nlmsg, IFLA_MACVLAN_MODE, &mode, sizeof(mode));
netlink_done(nlmsg);
netlink_done(nlmsg);
int ifindex = if_nametoindex(link);
netlink_attr(nlmsg, IFLA_LINK, &ifindex, sizeof(ifindex));
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_add_geneve(struct nlmsg* nlmsg, int sock, const char* name,
uint32_t vni, struct in_addr* addr4,
struct in6_addr* addr6)
{
netlink_add_device_impl(nlmsg, "geneve", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
netlink_attr(nlmsg, IFLA_GENEVE_ID, &vni, sizeof(vni));
if (addr4)
netlink_attr(nlmsg, IFLA_GENEVE_REMOTE, addr4, sizeof(*addr4));
if (addr6)
netlink_attr(nlmsg, IFLA_GENEVE_REMOTE6, addr6, sizeof(*addr6));
netlink_done(nlmsg);
netlink_done(nlmsg);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
#define IFLA_IPVLAN_FLAGS 2
#define IPVLAN_MODE_L3S 2
#undef IPVLAN_F_VEPA
#define IPVLAN_F_VEPA 2
static void netlink_add_ipvlan(struct nlmsg* nlmsg, int sock, const char* name,
const char* link, uint16_t mode, uint16_t flags)
{
netlink_add_device_impl(nlmsg, "ipvlan", name);
netlink_nest(nlmsg, IFLA_INFO_DATA);
netlink_attr(nlmsg, IFLA_IPVLAN_MODE, &mode, sizeof(mode));
netlink_attr(nlmsg, IFLA_IPVLAN_FLAGS, &flags, sizeof(flags));
netlink_done(nlmsg);
netlink_done(nlmsg);
int ifindex = if_nametoindex(link);
netlink_attr(nlmsg, IFLA_LINK, &ifindex, sizeof(ifindex));
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static void netlink_device_change(struct nlmsg* nlmsg, int sock,
const char* name, bool up, const char* master,
const void* mac, int macsize,
const char* new_name)
{
struct ifinfomsg hdr;
memset(&hdr, 0, sizeof(hdr));
if (up)
hdr.ifi_flags = hdr.ifi_change = IFF_UP;
hdr.ifi_index = if_nametoindex(name);
netlink_init(nlmsg, RTM_NEWLINK, 0, &hdr, sizeof(hdr));
if (new_name)
netlink_attr(nlmsg, IFLA_IFNAME, new_name, strlen(new_name));
if (master) {
int ifindex = if_nametoindex(master);
netlink_attr(nlmsg, IFLA_MASTER, &ifindex, sizeof(ifindex));
}
if (macsize)
netlink_attr(nlmsg, IFLA_ADDRESS, mac, macsize);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static int netlink_add_addr(struct nlmsg* nlmsg, int sock, const char* dev,
const void* addr, int addrsize)
{
struct ifaddrmsg hdr;
memset(&hdr, 0, sizeof(hdr));
hdr.ifa_family = addrsize == 4 ? AF_INET : AF_INET6;
hdr.ifa_prefixlen = addrsize == 4 ? 24 : 120;
hdr.ifa_scope = RT_SCOPE_UNIVERSE;
hdr.ifa_index = if_nametoindex(dev);
netlink_init(nlmsg, RTM_NEWADDR, NLM_F_CREATE | NLM_F_REPLACE, &hdr,
sizeof(hdr));
netlink_attr(nlmsg, IFA_LOCAL, addr, addrsize);
netlink_attr(nlmsg, IFA_ADDRESS, addr, addrsize);
return netlink_send(nlmsg, sock);
}
static void netlink_add_addr4(struct nlmsg* nlmsg, int sock, const char* dev,
const char* addr)
{
struct in_addr in_addr;
inet_pton(AF_INET, addr, &in_addr);
int err = netlink_add_addr(nlmsg, sock, dev, &in_addr, sizeof(in_addr));
if (err < 0) {
}
}
static void netlink_add_addr6(struct nlmsg* nlmsg, int sock, const char* dev,
const char* addr)
{
struct in6_addr in6_addr;
inet_pton(AF_INET6, addr, &in6_addr);
int err = netlink_add_addr(nlmsg, sock, dev, &in6_addr, sizeof(in6_addr));
if (err < 0) {
}
}
static void netlink_add_neigh(struct nlmsg* nlmsg, int sock, const char* name,
const void* addr, int addrsize, const void* mac,
int macsize)
{
struct ndmsg hdr;
memset(&hdr, 0, sizeof(hdr));
hdr.ndm_family = addrsize == 4 ? AF_INET : AF_INET6;
hdr.ndm_ifindex = if_nametoindex(name);
hdr.ndm_state = NUD_PERMANENT;
netlink_init(nlmsg, RTM_NEWNEIGH, NLM_F_EXCL | NLM_F_CREATE, &hdr,
sizeof(hdr));
netlink_attr(nlmsg, NDA_DST, addr, addrsize);
netlink_attr(nlmsg, NDA_LLADDR, mac, macsize);
int err = netlink_send(nlmsg, sock);
if (err < 0) {
}
}
static struct nlmsg nlmsg;
static int tunfd = -1;
#define TUN_IFACE "syz_tun"
#define LOCAL_MAC 0xaaaaaaaaaaaa
#define REMOTE_MAC 0xaaaaaaaaaabb
#define LOCAL_IPV4 "172.20.20.170"
#define REMOTE_IPV4 "172.20.20.187"
#define LOCAL_IPV6 "fe80::aa"
#define REMOTE_IPV6 "fe80::bb"
#define IFF_NAPI 0x0010
static void initialize_tun(void)
{
tunfd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
if (tunfd == -1) {
printf("tun: can't open /dev/net/tun: please enable CONFIG_TUN=y\n");
printf("otherwise fuzzing or reproducing might not work as intended\n");
return;
}
const int kTunFd = 240;
if (dup2(tunfd, kTunFd) < 0)
exit(1);
close(tunfd);
tunfd = kTunFd;
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, TUN_IFACE, IFNAMSIZ);
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
if (ioctl(tunfd, TUNSETIFF, (void*)&ifr) < 0) {
exit(1);
}
char sysctl[64];
sprintf(sysctl, "/proc/sys/net/ipv6/conf/%s/accept_dad", TUN_IFACE);
write_file(sysctl, "0");
sprintf(sysctl, "/proc/sys/net/ipv6/conf/%s/router_solicitations", TUN_IFACE);
write_file(sysctl, "0");
int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock == -1)
exit(1);
netlink_add_addr4(&nlmsg, sock, TUN_IFACE, LOCAL_IPV4);
netlink_add_addr6(&nlmsg, sock, TUN_IFACE, LOCAL_IPV6);
uint64_t macaddr = REMOTE_MAC;
struct in_addr in_addr;
inet_pton(AF_INET, REMOTE_IPV4, &in_addr);
netlink_add_neigh(&nlmsg, sock, TUN_IFACE, &in_addr, sizeof(in_addr),
&macaddr, ETH_ALEN);
struct in6_addr in6_addr;
inet_pton(AF_INET6, REMOTE_IPV6, &in6_addr);
netlink_add_neigh(&nlmsg, sock, TUN_IFACE, &in6_addr, sizeof(in6_addr),
&macaddr, ETH_ALEN);
macaddr = LOCAL_MAC;
netlink_device_change(&nlmsg, sock, TUN_IFACE, true, 0, &macaddr, ETH_ALEN,
NULL);
close(sock);
}
#define DEVLINK_FAMILY_NAME "devlink"
#define DEVLINK_CMD_PORT_GET 5
#define DEVLINK_ATTR_BUS_NAME 1
#define DEVLINK_ATTR_DEV_NAME 2
#define DEVLINK_ATTR_NETDEV_NAME 7
static struct nlmsg nlmsg2;
static void initialize_devlink_ports(const char* bus_name, const char* dev_name,
const char* netdev_prefix)
{
struct genlmsghdr genlhdr;
int len, total_len, id, err, offset;
uint16_t netdev_index;
int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (sock == -1)
exit(1);
int rtsock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (rtsock == -1)
exit(1);
id = netlink_query_family_id(&nlmsg, sock, DEVLINK_FAMILY_NAME, true);
if (id == -1)
goto error;
memset(&genlhdr, 0, sizeof(genlhdr));
genlhdr.cmd = DEVLINK_CMD_PORT_GET;
netlink_init(&nlmsg, id, NLM_F_DUMP, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, DEVLINK_ATTR_BUS_NAME, bus_name, strlen(bus_name) + 1);
netlink_attr(&nlmsg, DEVLINK_ATTR_DEV_NAME, dev_name, strlen(dev_name) + 1);
err = netlink_send_ext(&nlmsg, sock, id, &total_len, true);
if (err < 0) {
goto error;
}
offset = 0;
netdev_index = 0;
while ((len = netlink_next_msg(&nlmsg, offset, total_len)) != -1) {
struct nlattr* attr = (struct nlattr*)(nlmsg.buf + offset + NLMSG_HDRLEN +
NLMSG_ALIGN(sizeof(genlhdr)));
for (; (char*)attr < nlmsg.buf + offset + len;
attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) {
if (attr->nla_type == DEVLINK_ATTR_NETDEV_NAME) {
char* port_name;
char netdev_name[IFNAMSIZ];
port_name = (char*)(attr + 1);
snprintf(netdev_name, sizeof(netdev_name), "%s%d", netdev_prefix,
netdev_index);
netlink_device_change(&nlmsg2, rtsock, port_name, true, 0, 0, 0,
netdev_name);
break;
}
}
offset += len;
netdev_index++;
}
error:
close(rtsock);
close(sock);
}
#define DEV_IPV4 "172.20.20.%d"
#define DEV_IPV6 "fe80::%02x"
#define DEV_MAC 0x00aaaaaaaaaa
static void netdevsim_add(unsigned int addr, unsigned int port_count)
{
char buf[16];
sprintf(buf, "%u %u", addr, port_count);
if (write_file("/sys/bus/netdevsim/new_device", buf)) {
snprintf(buf, sizeof(buf), "netdevsim%d", addr);
initialize_devlink_ports("netdevsim", buf, "netdevsim");
}
}
#define WG_GENL_NAME "wireguard"
enum wg_cmd {
WG_CMD_GET_DEVICE,
WG_CMD_SET_DEVICE,
};
enum wgdevice_attribute {
WGDEVICE_A_UNSPEC,
WGDEVICE_A_IFINDEX,
WGDEVICE_A_IFNAME,
WGDEVICE_A_PRIVATE_KEY,
WGDEVICE_A_PUBLIC_KEY,
WGDEVICE_A_FLAGS,
WGDEVICE_A_LISTEN_PORT,
WGDEVICE_A_FWMARK,
WGDEVICE_A_PEERS,
};
enum wgpeer_attribute {
WGPEER_A_UNSPEC,
WGPEER_A_PUBLIC_KEY,
WGPEER_A_PRESHARED_KEY,
WGPEER_A_FLAGS,
WGPEER_A_ENDPOINT,
WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
WGPEER_A_LAST_HANDSHAKE_TIME,
WGPEER_A_RX_BYTES,
WGPEER_A_TX_BYTES,
WGPEER_A_ALLOWEDIPS,
WGPEER_A_PROTOCOL_VERSION,
};
enum wgallowedip_attribute {
WGALLOWEDIP_A_UNSPEC,
WGALLOWEDIP_A_FAMILY,
WGALLOWEDIP_A_IPADDR,
WGALLOWEDIP_A_CIDR_MASK,
};
static void netlink_wireguard_setup(void)
{
const char ifname_a[] = "wg0";
const char ifname_b[] = "wg1";
const char ifname_c[] = "wg2";
const char private_a[] =
"\xa0\x5c\xa8\x4f\x6c\x9c\x8e\x38\x53\xe2\xfd\x7a\x70\xae\x0f\xb2\x0f\xa1"
"\x52\x60\x0c\xb0\x08\x45\x17\x4f\x08\x07\x6f\x8d\x78\x43";
const char private_b[] =
"\xb0\x80\x73\xe8\xd4\x4e\x91\xe3\xda\x92\x2c\x22\x43\x82\x44\xbb\x88\x5c"
"\x69\xe2\x69\xc8\xe9\xd8\x35\xb1\x14\x29\x3a\x4d\xdc\x6e";
const char private_c[] =
"\xa0\xcb\x87\x9a\x47\xf5\xbc\x64\x4c\x0e\x69\x3f\xa6\xd0\x31\xc7\x4a\x15"
"\x53\xb6\xe9\x01\xb9\xff\x2f\x51\x8c\x78\x04\x2f\xb5\x42";
const char public_a[] =
"\x97\x5c\x9d\x81\xc9\x83\xc8\x20\x9e\xe7\x81\x25\x4b\x89\x9f\x8e\xd9\x25"
"\xae\x9f\x09\x23\xc2\x3c\x62\xf5\x3c\x57\xcd\xbf\x69\x1c";
const char public_b[] =
"\xd1\x73\x28\x99\xf6\x11\xcd\x89\x94\x03\x4d\x7f\x41\x3d\xc9\x57\x63\x0e"
"\x54\x93\xc2\x85\xac\xa4\x00\x65\xcb\x63\x11\xbe\x69\x6b";
const char public_c[] =
"\xf4\x4d\xa3\x67\xa8\x8e\xe6\x56\x4f\x02\x02\x11\x45\x67\x27\x08\x2f\x5c"
"\xeb\xee\x8b\x1b\xf5\xeb\x73\x37\x34\x1b\x45\x9b\x39\x22";
const uint16_t listen_a = 20001;
const uint16_t listen_b = 20002;
const uint16_t listen_c = 20003;
const uint16_t af_inet = AF_INET;
const uint16_t af_inet6 = AF_INET6;
const struct sockaddr_in endpoint_b_v4 = {
.sin_family = AF_INET,
.sin_port = htons(listen_b),
.sin_addr = {htonl(INADDR_LOOPBACK)}};
const struct sockaddr_in endpoint_c_v4 = {
.sin_family = AF_INET,
.sin_port = htons(listen_c),
.sin_addr = {htonl(INADDR_LOOPBACK)}};
struct sockaddr_in6 endpoint_a_v6 = {.sin6_family = AF_INET6,
.sin6_port = htons(listen_a)};
endpoint_a_v6.sin6_addr = in6addr_loopback;
struct sockaddr_in6 endpoint_c_v6 = {.sin6_family = AF_INET6,
.sin6_port = htons(listen_c)};
endpoint_c_v6.sin6_addr = in6addr_loopback;
const struct in_addr first_half_v4 = {0};
const struct in_addr second_half_v4 = {(uint32_t)htonl(128 << 24)};
const struct in6_addr first_half_v6 = {{{0}}};
const struct in6_addr second_half_v6 = {{{0x80}}};
const uint8_t half_cidr = 1;
const uint16_t persistent_keepalives[] = {1, 3, 7, 9, 14, 19};
struct genlmsghdr genlhdr = {.cmd = WG_CMD_SET_DEVICE, .version = 1};
int sock;
int id, err;
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (sock == -1) {
return;
}
id = netlink_query_family_id(&nlmsg, sock, WG_GENL_NAME, true);
if (id == -1)
goto error;
netlink_init(&nlmsg, id, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, WGDEVICE_A_IFNAME, ifname_a, strlen(ifname_a) + 1);
netlink_attr(&nlmsg, WGDEVICE_A_PRIVATE_KEY, private_a, 32);
netlink_attr(&nlmsg, WGDEVICE_A_LISTEN_PORT, &listen_a, 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGDEVICE_A_PEERS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_b, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_b_v4,
sizeof(endpoint_b_v4));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[0], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v4,
sizeof(first_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v6,
sizeof(first_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_c, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_c_v6,
sizeof(endpoint_c_v6));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[1], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v4,
sizeof(second_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v6,
sizeof(second_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
err = netlink_send(&nlmsg, sock);
if (err < 0) {
}
netlink_init(&nlmsg, id, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, WGDEVICE_A_IFNAME, ifname_b, strlen(ifname_b) + 1);
netlink_attr(&nlmsg, WGDEVICE_A_PRIVATE_KEY, private_b, 32);
netlink_attr(&nlmsg, WGDEVICE_A_LISTEN_PORT, &listen_b, 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGDEVICE_A_PEERS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_a, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_a_v6,
sizeof(endpoint_a_v6));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[2], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v4,
sizeof(first_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v6,
sizeof(first_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_c, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_c_v4,
sizeof(endpoint_c_v4));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[3], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v4,
sizeof(second_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v6,
sizeof(second_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
err = netlink_send(&nlmsg, sock);
if (err < 0) {
}
netlink_init(&nlmsg, id, 0, &genlhdr, sizeof(genlhdr));
netlink_attr(&nlmsg, WGDEVICE_A_IFNAME, ifname_c, strlen(ifname_c) + 1);
netlink_attr(&nlmsg, WGDEVICE_A_PRIVATE_KEY, private_c, 32);
netlink_attr(&nlmsg, WGDEVICE_A_LISTEN_PORT, &listen_c, 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGDEVICE_A_PEERS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_a, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_a_v6,
sizeof(endpoint_a_v6));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[4], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v4,
sizeof(first_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &first_half_v6,
sizeof(first_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGPEER_A_PUBLIC_KEY, public_b, 32);
netlink_attr(&nlmsg, WGPEER_A_ENDPOINT, &endpoint_b_v4,
sizeof(endpoint_b_v4));
netlink_attr(&nlmsg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
&persistent_keepalives[5], 2);
netlink_nest(&nlmsg, NLA_F_NESTED | WGPEER_A_ALLOWEDIPS);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v4,
sizeof(second_half_v4));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_nest(&nlmsg, NLA_F_NESTED | 0);
netlink_attr(&nlmsg, WGALLOWEDIP_A_FAMILY, &af_inet6, 2);
netlink_attr(&nlmsg, WGALLOWEDIP_A_IPADDR, &second_half_v6,
sizeof(second_half_v6));
netlink_attr(&nlmsg, WGALLOWEDIP_A_CIDR_MASK, &half_cidr, 1);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
netlink_done(&nlmsg);
err = netlink_send(&nlmsg, sock);
if (err < 0) {
}
error:
close(sock);
}
static void initialize_netdevices(void)
{
char netdevsim[16];
sprintf(netdevsim, "netdevsim%d", (int)procid);
struct {
const char* type;
const char* dev;
} devtypes[] = {
{"ip6gretap", "ip6gretap0"}, {"bridge", "bridge0"},
{"vcan", "vcan0"}, {"bond", "bond0"},
{"team", "team0"}, {"dummy", "dummy0"},
{"nlmon", "nlmon0"}, {"caif", "caif0"},
{"batadv", "batadv0"}, {"vxcan", "vxcan1"},
{"netdevsim", netdevsim}, {"veth", 0},
{"xfrm", "xfrm0"}, {"wireguard", "wg0"},
{"wireguard", "wg1"}, {"wireguard", "wg2"},
};
const char* devmasters[] = {"bridge", "bond", "team", "batadv"};
struct {
const char* name;
int macsize;
bool noipv6;
} devices[] = {
{"lo", ETH_ALEN},
{"sit0", 0},
{"bridge0", ETH_ALEN},
{"vcan0", 0, true},
{"tunl0", 0},
{"gre0", 0},
{"gretap0", ETH_ALEN},
{"ip_vti0", 0},
{"ip6_vti0", 0},
{"ip6tnl0", 0},
{"ip6gre0", 0},
{"ip6gretap0", ETH_ALEN},
{"erspan0", ETH_ALEN},
{"bond0", ETH_ALEN},
{"veth0", ETH_ALEN},
{"veth1", ETH_ALEN},
{"team0", ETH_ALEN},
{"veth0_to_bridge", ETH_ALEN},
{"veth1_to_bridge", ETH_ALEN},
{"veth0_to_bond", ETH_ALEN},
{"veth1_to_bond", ETH_ALEN},
{"veth0_to_team", ETH_ALEN},
{"veth1_to_team", ETH_ALEN},
{"veth0_to_hsr", ETH_ALEN},
{"veth1_to_hsr", ETH_ALEN},
{"hsr0", 0},
{"dummy0", ETH_ALEN},
{"nlmon0", 0},
{"vxcan0", 0, true},
{"vxcan1", 0, true},
{"caif0", ETH_ALEN},
{"batadv0", ETH_ALEN},
{netdevsim, ETH_ALEN},
{"xfrm0", ETH_ALEN},
{"veth0_virt_wifi", ETH_ALEN},
{"veth1_virt_wifi", ETH_ALEN},
{"virt_wifi0", ETH_ALEN},
{"veth0_vlan", ETH_ALEN},
{"veth1_vlan", ETH_ALEN},
{"vlan0", ETH_ALEN},
{"vlan1", ETH_ALEN},
{"macvlan0", ETH_ALEN},
{"macvlan1", ETH_ALEN},
{"ipvlan0", ETH_ALEN},
{"ipvlan1", ETH_ALEN},
{"veth0_macvtap", ETH_ALEN},
{"veth1_macvtap", ETH_ALEN},
{"macvtap0", ETH_ALEN},
{"macsec0", ETH_ALEN},
{"veth0_to_batadv", ETH_ALEN},
{"veth1_to_batadv", ETH_ALEN},
{"batadv_slave_0", ETH_ALEN},
{"batadv_slave_1", ETH_ALEN},
{"geneve0", ETH_ALEN},
{"geneve1", ETH_ALEN},
{"wg0", 0},
{"wg1", 0},
{"wg2", 0},
};
int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock == -1)
exit(1);
unsigned i;
for (i = 0; i < sizeof(devtypes) / sizeof(devtypes[0]); i++)
netlink_add_device(&nlmsg, sock, devtypes[i].type, devtypes[i].dev);
for (i = 0; i < sizeof(devmasters) / (sizeof(devmasters[0])); i++) {
char master[32], slave0[32], veth0[32], slave1[32], veth1[32];
sprintf(slave0, "%s_slave_0", devmasters[i]);
sprintf(veth0, "veth0_to_%s", devmasters[i]);
netlink_add_veth(&nlmsg, sock, slave0, veth0);
sprintf(slave1, "%s_slave_1", devmasters[i]);
sprintf(veth1, "veth1_to_%s", devmasters[i]);
netlink_add_veth(&nlmsg, sock, slave1, veth1);
sprintf(master, "%s0", devmasters[i]);
netlink_device_change(&nlmsg, sock, slave0, false, master, 0, 0, NULL);
netlink_device_change(&nlmsg, sock, slave1, false, master, 0, 0, NULL);
}
netlink_device_change(&nlmsg, sock, "bridge_slave_0", true, 0, 0, 0, NULL);
netlink_device_change(&nlmsg, sock, "bridge_slave_1", true, 0, 0, 0, NULL);
netlink_add_veth(&nlmsg, sock, "hsr_slave_0", "veth0_to_hsr");
netlink_add_veth(&nlmsg, sock, "hsr_slave_1", "veth1_to_hsr");
netlink_add_hsr(&nlmsg, sock, "hsr0", "hsr_slave_0", "hsr_slave_1");
netlink_device_change(&nlmsg, sock, "hsr_slave_0", true, 0, 0, 0, NULL);
netlink_device_change(&nlmsg, sock, "hsr_slave_1", true, 0, 0, 0, NULL);
netlink_add_veth(&nlmsg, sock, "veth0_virt_wifi", "veth1_virt_wifi");
netlink_add_linked(&nlmsg, sock, "virt_wifi", "virt_wifi0",
"veth1_virt_wifi");
netlink_add_veth(&nlmsg, sock, "veth0_vlan", "veth1_vlan");
netlink_add_vlan(&nlmsg, sock, "vlan0", "veth0_vlan", 0, htons(ETH_P_8021Q));
netlink_add_vlan(&nlmsg, sock, "vlan1", "veth0_vlan", 1, htons(ETH_P_8021AD));
netlink_add_macvlan(&nlmsg, sock, "macvlan0", "veth1_vlan");
netlink_add_macvlan(&nlmsg, sock, "macvlan1", "veth1_vlan");
netlink_add_ipvlan(&nlmsg, sock, "ipvlan0", "veth0_vlan", IPVLAN_MODE_L2, 0);
netlink_add_ipvlan(&nlmsg, sock, "ipvlan1", "veth0_vlan", IPVLAN_MODE_L3S,
IPVLAN_F_VEPA);
netlink_add_veth(&nlmsg, sock, "veth0_macvtap", "veth1_macvtap");
netlink_add_linked(&nlmsg, sock, "macvtap", "macvtap0", "veth0_macvtap");
netlink_add_linked(&nlmsg, sock, "macsec", "macsec0", "veth1_macvtap");
char addr[32];
sprintf(addr, DEV_IPV4, 14 + 10);
struct in_addr geneve_addr4;
if (inet_pton(AF_INET, addr, &geneve_addr4) <= 0)
exit(1);
struct in6_addr geneve_addr6;
if (inet_pton(AF_INET6, "fc00::01", &geneve_addr6) <= 0)
exit(1);
netlink_add_geneve(&nlmsg, sock, "geneve0", 0, &geneve_addr4, 0);
netlink_add_geneve(&nlmsg, sock, "geneve1", 1, 0, &geneve_addr6);
netdevsim_add((int)procid, 4);
netlink_wireguard_setup();
for (i = 0; i < sizeof(devices) / (sizeof(devices[0])); i++) {
char addr[32];
sprintf(addr, DEV_IPV4, i + 10);
netlink_add_addr4(&nlmsg, sock, devices[i].name, addr);
if (!devices[i].noipv6) {
sprintf(addr, DEV_IPV6, i + 10);
netlink_add_addr6(&nlmsg, sock, devices[i].name, addr);
}
uint64_t macaddr = DEV_MAC + ((i + 10ull) << 40);
netlink_device_change(&nlmsg, sock, devices[i].name, true, 0, &macaddr,
devices[i].macsize, NULL);
}
close(sock);
}
static void initialize_netdevices_init(void)
{
int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock == -1)
exit(1);
struct {
const char* type;
int macsize;
bool noipv6;
bool noup;
} devtypes[] = {
{"nr", 7, true},
{"rose", 5, true, true},
};
unsigned i;
for (i = 0; i < sizeof(devtypes) / sizeof(devtypes[0]); i++) {
char dev[32], addr[32];
sprintf(dev, "%s%d", devtypes[i].type, (int)procid);
sprintf(addr, "172.30.%d.%d", i, (int)procid + 1);
netlink_add_addr4(&nlmsg, sock, dev, addr);
if (!devtypes[i].noipv6) {
sprintf(addr, "fe88::%02x:%02x", i, (int)procid + 1);
netlink_add_addr6(&nlmsg, sock, dev, addr);
}
int macsize = devtypes[i].macsize;
uint64_t macaddr = 0xbbbbbb +
((unsigned long long)i << (8 * (macsize - 2))) +
(procid << (8 * (macsize - 1)));
netlink_device_change(&nlmsg, sock, dev, !devtypes[i].noup, 0, &macaddr,
macsize, NULL);
}
close(sock);
}
static int read_tun(char* data, int size)
{
if (tunfd < 0)
return -1;
int rv = read(tunfd, data, size);
if (rv < 0) {
if (errno == EAGAIN || errno == EBADFD)
return -1;
exit(1);
}
return rv;
}
static void flush_tun()
{
char data[1000];
while (read_tun(&data[0], sizeof(data)) != -1) {
}
}
#define MAX_FDS 30
#define XT_TABLE_SIZE 1536
#define XT_MAX_ENTRIES 10
struct xt_counters {
uint64_t pcnt, bcnt;
};
struct ipt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_entries;
unsigned int size;
};
struct ipt_get_entries {
char name[32];
unsigned int size;
uint64_t entrytable[XT_TABLE_SIZE / sizeof(uint64_t)];
};
struct ipt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_counters;
struct xt_counters* counters;
uint64_t entrytable[XT_TABLE_SIZE / sizeof(uint64_t)];
};
struct ipt_table_desc {
const char* name;
struct ipt_getinfo info;
struct ipt_replace replace;
};
static struct ipt_table_desc ipv4_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
static struct ipt_table_desc ipv6_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
#define IPT_BASE_CTL 64
#define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
#define IPT_SO_GET_INFO (IPT_BASE_CTL)
#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
struct arpt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_entries;
unsigned int size;
};
struct arpt_get_entries {
char name[32];
unsigned int size;
uint64_t entrytable[XT_TABLE_SIZE / sizeof(uint64_t)];
};
struct arpt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_counters;
struct xt_counters* counters;
uint64_t entrytable[XT_TABLE_SIZE / sizeof(uint64_t)];
};
struct arpt_table_desc {
const char* name;
struct arpt_getinfo info;
struct arpt_replace replace;
};
static struct arpt_table_desc arpt_tables[] = {
{.name = "filter"},
};
#define ARPT_BASE_CTL 96
#define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL)
#define ARPT_SO_GET_INFO (ARPT_BASE_CTL)
#define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1)
static void checkpoint_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
int fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (int i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
socklen_t optlen = sizeof(table->info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
exit(1);
}
if (table->info.size > sizeof(table->replace.entrytable))
exit(1);
if (table->info.num_entries > XT_MAX_ENTRIES)
exit(1);
struct ipt_get_entries entries;
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
exit(1);
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
int fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (int i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
if (table->info.valid_hooks == 0)
continue;
struct ipt_getinfo info;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
socklen_t optlen = sizeof(info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &info, &optlen))
exit(1);
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
struct ipt_get_entries entries;
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
exit(1);
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
}
struct xt_counters counters[XT_MAX_ENTRIES];
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, level, IPT_SO_SET_REPLACE, &table->replace, optlen))
exit(1);
}
close(fd);
}
static void checkpoint_arptables(void)
{
int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (unsigned i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
socklen_t optlen = sizeof(table->info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
exit(1);
}
if (table->info.size > sizeof(table->replace.entrytable))
exit(1);
if (table->info.num_entries > XT_MAX_ENTRIES)
exit(1);
struct arpt_get_entries entries;
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
exit(1);
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_arptables()
{
int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (unsigned i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
if (table->info.valid_hooks == 0)
continue;
struct arpt_getinfo info;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
socklen_t optlen = sizeof(info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &info, &optlen))
exit(1);
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
struct arpt_get_entries entries;
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
exit(1);
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
} else {
}
struct xt_counters counters[XT_MAX_ENTRIES];
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, SOL_IP, ARPT_SO_SET_REPLACE, &table->replace, optlen))
exit(1);
}
close(fd);
}
#define NF_BR_NUMHOOKS 6
#define EBT_TABLE_MAXNAMELEN 32
#define EBT_CHAIN_MAXNAMELEN 32
#define EBT_BASE_CTL 128
#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL)
#define EBT_SO_GET_INFO (EBT_BASE_CTL)
#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO + 1)
#define EBT_SO_GET_INIT_INFO (EBT_SO_GET_ENTRIES + 1)
#define EBT_SO_GET_INIT_ENTRIES (EBT_SO_GET_INIT_INFO + 1)
struct ebt_replace {
char name[EBT_TABLE_MAXNAMELEN];
unsigned int valid_hooks;
unsigned int nentries;
unsigned int entries_size;
struct ebt_entries* hook_entry[NF_BR_NUMHOOKS];
unsigned int num_counters;
struct ebt_counter* counters;
char* entries;
};
struct ebt_entries {
unsigned int distinguisher;
char name[EBT_CHAIN_MAXNAMELEN];
unsigned int counter_offset;
int policy;
unsigned int nentries;
char data[0] __attribute__((aligned(__alignof__(struct ebt_replace))));
};
struct ebt_table_desc {
const char* name;
struct ebt_replace replace;
char entrytable[XT_TABLE_SIZE];
};
static struct ebt_table_desc ebt_tables[] = {
{.name = "filter"},
{.name = "nat"},
{.name = "broute"},
};
static void checkpoint_ebtables(void)
{
int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (size_t i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
strcpy(table->replace.name, table->name);
socklen_t optlen = sizeof(table->replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_INFO, &table->replace,
&optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
exit(1);
}
if (table->replace.entries_size > sizeof(table->entrytable))
exit(1);
table->replace.num_counters = 0;
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_ENTRIES, &table->replace,
&optlen))
exit(1);
}
close(fd);
}
static void reset_ebtables()
{
int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1) {
switch (errno) {
case EAFNOSUPPORT:
case ENOPROTOOPT:
return;
}
exit(1);
}
for (unsigned i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
if (table->replace.valid_hooks == 0)
continue;
struct ebt_replace replace;
memset(&replace, 0, sizeof(replace));
strcpy(replace.name, table->name);
socklen_t optlen = sizeof(replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INFO, &replace, &optlen))
exit(1);
replace.num_counters = 0;
table->replace.entries = 0;
for (unsigned h = 0; h < NF_BR_NUMHOOKS; h++)
table->replace.hook_entry[h] = 0;
if (memcmp(&table->replace, &replace, sizeof(table->replace)) == 0) {
char entrytable[XT_TABLE_SIZE];
memset(&entrytable, 0, sizeof(entrytable));
replace.entries = entrytable;
optlen = sizeof(replace) + replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_ENTRIES, &replace, &optlen))
exit(1);
if (memcmp(table->entrytable, entrytable, replace.entries_size) == 0)
continue;
}
for (unsigned j = 0, h = 0; h < NF_BR_NUMHOOKS; h++) {
if (table->replace.valid_hooks & (1 << h)) {
table->replace.hook_entry[h] =
(struct ebt_entries*)table->entrytable + j;
j++;
}
}
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (setsockopt(fd, SOL_IP, EBT_SO_SET_ENTRIES, &table->replace, optlen))
exit(1);
}
close(fd);
}
static void checkpoint_net_namespace(void)
{
checkpoint_ebtables();
checkpoint_arptables();
checkpoint_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
checkpoint_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void reset_net_namespace(void)
{
reset_ebtables();
reset_arptables();
reset_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
reset_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void setup_cgroups()
{
if (mkdir("/syzcgroup", 0777)) {
}
if (mkdir("/syzcgroup/unified", 0777)) {
}
if (mount("none", "/syzcgroup/unified", "cgroup2", 0, NULL)) {
}
if (chmod("/syzcgroup/unified", 0777)) {
}
write_file("/syzcgroup/unified/cgroup.subtree_control",
"+cpu +memory +io +pids +rdma");
if (mkdir("/syzcgroup/cpu", 0777)) {
}
if (mount("none", "/syzcgroup/cpu", "cgroup", 0,
"cpuset,cpuacct,perf_event,hugetlb")) {
}
write_file("/syzcgroup/cpu/cgroup.clone_children", "1");
write_file("/syzcgroup/cpu/cpuset.memory_pressure_enabled", "1");
if (chmod("/syzcgroup/cpu", 0777)) {
}
if (mkdir("/syzcgroup/net", 0777)) {
}
if (mount("none", "/syzcgroup/net", "cgroup", 0,
"net_cls,net_prio,devices,freezer")) {
}
if (chmod("/syzcgroup/net", 0777)) {
}
}
static void setup_cgroups_loop()
{
int pid = getpid();
char file[128];
char cgroupdir[64];
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/unified/syz%llu", procid);
if (mkdir(cgroupdir, 0777)) {
}
snprintf(file, sizeof(file), "%s/pids.max", cgroupdir);
write_file(file, "32");
snprintf(file, sizeof(file), "%s/memory.low", cgroupdir);
write_file(file, "%d", 298 << 20);
snprintf(file, sizeof(file), "%s/memory.high", cgroupdir);
write_file(file, "%d", 299 << 20);
snprintf(file, sizeof(file), "%s/memory.max", cgroupdir);
write_file(file, "%d", 300 << 20);
snprintf(file, sizeof(file), "%s/cgroup.procs", cgroupdir);
write_file(file, "%d", pid);
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/cpu/syz%llu", procid);
if (mkdir(cgroupdir, 0777)) {
}
snprintf(file, sizeof(file), "%s/cgroup.procs", cgroupdir);
write_file(file, "%d", pid);
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/net/syz%llu", procid);
if (mkdir(cgroupdir, 0777)) {
}
snprintf(file, sizeof(file), "%s/cgroup.procs", cgroupdir);
write_file(file, "%d", pid);
}
static void setup_cgroups_test()
{
char cgroupdir[64];
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/unified/syz%llu", procid);
if (symlink(cgroupdir, "./cgroup")) {
}
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/cpu/syz%llu", procid);
if (symlink(cgroupdir, "./cgroup.cpu")) {
}
snprintf(cgroupdir, sizeof(cgroupdir), "/syzcgroup/net/syz%llu", procid);
if (symlink(cgroupdir, "./cgroup.net")) {
}
}
static void setup_common()
{
if (mount(0, "/sys/fs/fuse/connections", "fusectl", 0, 0)) {
}
setup_cgroups();
}
static void loop();
static void sandbox_common()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setsid();
struct rlimit rlim;
rlim.rlim_cur = rlim.rlim_max = (200 << 20);
setrlimit(RLIMIT_AS, &rlim);
rlim.rlim_cur = rlim.rlim_max = 32 << 20;
setrlimit(RLIMIT_MEMLOCK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 136 << 20;
setrlimit(RLIMIT_FSIZE, &rlim);
rlim.rlim_cur = rlim.rlim_max = 1 << 20;
setrlimit(RLIMIT_STACK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 0;
setrlimit(RLIMIT_CORE, &rlim);
rlim.rlim_cur = rlim.rlim_max = 256;
setrlimit(RLIMIT_NOFILE, &rlim);
if (unshare(CLONE_NEWNS)) {
}
if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
}
if (unshare(CLONE_NEWIPC)) {
}
if (unshare(0x02000000)) {
}
if (unshare(CLONE_NEWUTS)) {
}
if (unshare(CLONE_SYSVSEM)) {
}
typedef struct {
const char* name;
const char* value;
} sysctl_t;
static const sysctl_t sysctls[] = {
{"/proc/sys/kernel/shmmax", "16777216"},
{"/proc/sys/kernel/shmall", "536870912"},
{"/proc/sys/kernel/shmmni", "1024"},
{"/proc/sys/kernel/msgmax", "8192"},
{"/proc/sys/kernel/msgmni", "1024"},
{"/proc/sys/kernel/msgmnb", "1024"},
{"/proc/sys/kernel/sem", "1024 1048576 500 1024"},
};
unsigned i;
for (i = 0; i < sizeof(sysctls) / sizeof(sysctls[0]); i++)
write_file(sysctls[i].name, sysctls[i].value);
}
static int wait_for_loop(int pid)
{
if (pid < 0)
exit(1);
int status = 0;
while (waitpid(-1, &status, __WALL) != pid) {
}
return WEXITSTATUS(status);
}
static void drop_caps(void)
{
struct __user_cap_header_struct cap_hdr = {};
struct __user_cap_data_struct cap_data[2] = {};
cap_hdr.version = _LINUX_CAPABILITY_VERSION_3;
cap_hdr.pid = getpid();
if (syscall(SYS_capget, &cap_hdr, &cap_data))
exit(1);
const int drop = (1 << CAP_SYS_PTRACE) | (1 << CAP_SYS_NICE);
cap_data[0].effective &= ~drop;
cap_data[0].permitted &= ~drop;
cap_data[0].inheritable &= ~drop;
if (syscall(SYS_capset, &cap_hdr, &cap_data))
exit(1);
}
static int do_sandbox_none(void)
{
if (unshare(CLONE_NEWPID)) {
}
int pid = fork();
if (pid != 0)
return wait_for_loop(pid);
setup_common();
sandbox_common();
drop_caps();
initialize_netdevices_init();
if (unshare(CLONE_NEWNET)) {
}
initialize_tun();
initialize_netdevices();
loop();
exit(1);
}
#define FS_IOC_SETFLAGS _IOW('f', 2, long)
static void remove_dir(const char* dir)
{
int iter = 0;
DIR* dp = 0;
retry:
while (umount2(dir, MNT_DETACH) == 0) {
}
dp = opendir(dir);
if (dp == NULL) {
if (errno == EMFILE) {
exit(1);
}
exit(1);
}
struct dirent* ep = 0;
while ((ep = readdir(dp))) {
if (strcmp(ep->d_name, ".") == 0 || strcmp(ep->d_name, "..") == 0)
continue;
char filename[FILENAME_MAX];
snprintf(filename, sizeof(filename), "%s/%s", dir, ep->d_name);
while (umount2(filename, MNT_DETACH) == 0) {
}
struct stat st;
if (lstat(filename, &st))
exit(1);
if (S_ISDIR(st.st_mode)) {
remove_dir(filename);
continue;
}
int i;
for (i = 0;; i++) {
if (unlink(filename) == 0)
break;
if (errno == EPERM) {
int fd = open(filename, O_RDONLY);
if (fd != -1) {
long flags = 0;
if (ioctl(fd, FS_IOC_SETFLAGS, &flags) == 0) {
}
close(fd);
continue;
}
}
if (errno == EROFS) {
break;
}
if (errno != EBUSY || i > 100)
exit(1);
if (umount2(filename, MNT_DETACH))
exit(1);
}
}
closedir(dp);
for (int i = 0;; i++) {
if (rmdir(dir) == 0)
break;
if (i < 100) {
if (errno == EPERM) {
int fd = open(dir, O_RDONLY);
if (fd != -1) {
long flags = 0;
if (ioctl(fd, FS_IOC_SETFLAGS, &flags) == 0) {
}
close(fd);
continue;
}
}
if (errno == EROFS) {
break;
}
if (errno == EBUSY) {
if (umount2(dir, MNT_DETACH))
exit(1);
continue;
}
if (errno == ENOTEMPTY) {
if (iter < 100) {
iter++;
goto retry;
}
}
}
exit(1);
}
}
static void kill_and_wait(int pid, int* status)
{
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
for (int i = 0; i < 100; i++) {
if (waitpid(-1, status, WNOHANG | __WALL) == pid)
return;
usleep(1000);
}
DIR* dir = opendir("/sys/fs/fuse/connections");
if (dir) {
for (;;) {
struct dirent* ent = readdir(dir);
if (!ent)
break;
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
continue;
char abort[300];
snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
ent->d_name);
int fd = open(abort, O_WRONLY);
if (fd == -1) {
continue;
}
if (write(fd, abort, 1) < 0) {
}
close(fd);
}
closedir(dir);
} else {
}
while (waitpid(-1, status, __WALL) != pid) {
}
}
static void setup_loop()
{
setup_cgroups_loop();
checkpoint_net_namespace();
}
static void reset_loop()
{
reset_net_namespace();
}
static void setup_test()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
setup_cgroups_test();
write_file("/proc/self/oom_score_adj", "1000");
flush_tun();
}
static void close_fds()
{
for (int fd = 3; fd < MAX_FDS; fd++)
close(fd);
}
static void setup_binfmt_misc()
{
if (mount(0, "/proc/sys/fs/binfmt_misc", "binfmt_misc", 0, 0)) {
}
write_file("/proc/sys/fs/binfmt_misc/register", ":syz0:M:0:\x01::./file0:");
write_file("/proc/sys/fs/binfmt_misc/register",
":syz1:M:1:\x02::./file0:POC");
}
struct thread_t {
int created, call;
event_t ready, done;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static void* thr(void* arg)
{
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
event_wait(&th->ready);
event_reset(&th->ready);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
event_set(&th->done);
}
return 0;
}
static void execute_one(void)
{
int i, call, thread;
int collide = 0;
again:
for (call = 0; call < 9; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);
thread_start(thr, th);
}
if (!event_isset(&th->done))
continue;
event_reset(&th->done);
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
if (collide && (call % 2) == 0)
break;
event_timedwait(&th->done, 50);
break;
}
}
for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
sleep_ms(1);
close_fds();
if (!collide) {
collide = 1;
goto again;
}
}
static void execute_one(void);
#define WAIT_FLAGS __WALL
static void loop(void)
{
setup_loop();
int iter = 0;
for (;; iter++) {
char cwdbuf[32];
sprintf(cwdbuf, "./%d", iter);
if (mkdir(cwdbuf, 0777))
exit(1);
reset_loop();
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
if (chdir(cwdbuf))
exit(1);
setup_test();
execute_one();
exit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
break;
sleep_ms(1);
if (current_time_ms() - start < 5000) {
continue;
}
kill_and_wait(pid, &status);
break;
}
remove_dir(cwdbuf);
}
}
uint64_t r[1] = {0x0};
void execute_call(int call)
{
intptr_t res = 0;
switch (call) {
case 0:
NONFAILING(memcpy((void*)0x20000100, ":}@\000", 4));
syscall(__NR_ioctl, -1, 0x40082406, 0x20000100ul);
break;
case 1:
syscall(__NR_prctl, 0x59616d61ul, -1, 0, 0, 0);
break;
case 2:
syscall(__NR_clone, 0x100ul, 0ul, 0x9999999999999999ul, 0ul, -1ul);
break;
case 3:
res = syscall(__NR_gettid);
if (res != -1)
r[0] = res;
break;
case 4:
syscall(__NR_wait4, 0, 0ul, 0x80000002ul, 0ul);
break;
case 5:
syscall(__NR_ptrace, 0x4206ul, r[0], 0ul, 5ul);
break;
case 6:
syscall(__NR_tkill, r[0], 0x34);
break;
case 7:
syscall(__NR_ptrace, 0x18ul, r[0], 0ul, 0ul);
break;
case 8:
syscall(__NR_ptrace, 0xdul, r[0], 0ul, 0x20000080ul);
break;
}
}
int main(void)
{
syscall(__NR_mmap, 0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
syscall(__NR_mmap, 0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
syscall(__NR_mmap, 0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
setup_binfmt_misc();
install_segv_handler();
use_temporary_dir();
do_sandbox_none();
return 0;
}
|
the_stack_data/103266226.c
|
/*
Reto Acepta534: Tras el festival
Mayor diferencia entre dos cubos, tras colocarlos para minimizar diferencias
Entrada de ejemplo
4
43 40 41 42
6
22 22 20 25 26 27
0
Salida de ejemplo
1
3
*/
// Nacho
#include <stdio.h>
#include <stdlib.h> /* Para qsort */
int funcionComparacion(const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );
}
void nSort(int datos[], int cant)
{
qsort (datos, cant, sizeof(int), funcionComparacion);
}
int main()
{
int datos, i;
do
{
scanf("%d", &datos);
if (datos > 0)
{
int pesos[datos];
for(i = 0; i < datos; i++)
{
scanf("%d", &pesos[i]);
}
nSort(pesos, datos);
int diferencia = 0;
for(i = 0; i < datos-1; i+=2)
{
if (pesos[i+1] - pesos[i] > diferencia)
diferencia = pesos[i+1] - pesos[i];
}
printf("%d\n", diferencia);
}
}
while (datos > 0);
return 0;
}
|
the_stack_data/254073.c
|
/*
* Exercise 1-8
* Write a program to count blanks, tabs, and newlines.
*/
#include <stdio.h>
main() {
int c, nlc, tc, bc;
nlc = tc = bc = 0;
while ((c = getchar()) != EOF) {
if (c == '\n') nlc++;
if (c == '\t') tc++;
if (c == ' ') bc++;
}
printf("\n");
printf("\\n : %d\n", nlc);
printf("\\t : %d\n", tc);
printf("\' \': %d\n", bc);
}
|
the_stack_data/192330432.c
|
#include<stdlib.h>
#include<assert.h>
typedef struct _s {
int i;
} s;
int main(){
int l = 17;
s * K = malloc(sizeof(s));
K->i = 1;
l = K->i;
assert(l == 17); // FAIL
return 0;
}
/*
>>> update_offset: indexing on non-struct type (02-index-nonstruct.c:10)
>>> eval_offset: indexing on non-struct type (02-index-nonstruct.c:12)
*/
|
the_stack_data/89201626.c
|
#include <stdio.h>
#include <string.h>
#include <unistd.h>
int
main() {
int p[2];
pid_t pid;
char buf[1024];
memset(buf, 0, sizeof(buf));
if (pipe(p) != 0) {
perror("pipe");
return 1;
}
pid = fork();
if (pid < 0) {
perror("fork");
return 1;
}
if (pid == 0) {
printf("child process : my id=%d\n", getpid());
close(p[0]);
dup2(p[1], fileno(stdout));
if (execve("/bin/ls", NULL, NULL) < 0) {
perror("exec");
return 1;
}
} else {
FILE* filep;
close(p[1]);
printf("parent process : child process id=%d\n", pid);
filep = fdopen(p[0], "r");
if (filep == NULL) {
perror("fdopen");
return 1;
}
while (fgets(buf, sizeof(buf), filep) != NULL) {
printf("HOGE : %s", buf);
}
}
return 0;
}
|
the_stack_data/34512750.c
|
void main()
{
int a = 9, *b, e = 1;
float c = 3.14, *d;
b = &a;
assert(*b == 9, "*b must be 9");
(*b)++;
assert(*b == 10, "*b must be 10");
++*b;
assert(*b == 11, "*b must be 1");
d = &c;
assert(abs(*d - 3.14) < 0.000001, "*d must be 3.14");
(*d)++;
assert(abs(*d - 4.14) < 0.000001, "*d must be 4.14");
++*d;
assert(abs(*d - 5.14) < 0.000001, "*d must be 5.14");
a = ++e + ++e;
assert(a == 5, "a must be 5");
assert(e == 3, "e must be 3");
a = e++ + ++e;
assert(a == 8, "a must be 8");
assert(e == 5, "e must be 5");
}
|
the_stack_data/634191.c
|
#include <stdio.h>
int n1,n2;
void exchange();
int main(){
scanf("%d %d", &n1,&n2);
printf("%d %d\n",n1,n2);
exchange();
printf("%d %d",n1,n2);
return 0;
}
void exchange(){
int temp;
temp = n1;
n1 = n2;
n2 = temp;
}
|
the_stack_data/723320.c
|
/* Copyright (C) 1989, 2000 Aladdin Enterprises. All rights reserved. */
/*$Id: ansi2knr.c,v 1.14 2003/09/06 05:36:56 eggert Exp $*/
/* Convert ANSI C function definitions to K&R ("traditional C") syntax */
/*
ansi2knr is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY. No author or distributor accepts responsibility to anyone for the
consequences of using it or for whether it serves any particular purpose or
works at all, unless he says so in writing. Refer to the GNU General Public
License (the "GPL") for full details.
Everyone is granted permission to copy, modify and redistribute ansi2knr,
but only under the conditions described in the GPL. A copy of this license
is supposed to have been given to you along with ansi2knr so you can know
your rights and responsibilities. It should be in a file named COPYLEFT,
or, if there is no file named COPYLEFT, a file named COPYING. Among other
things, the copyright notice and this notice must be preserved on all
copies.
We explicitly state here what we believe is already implied by the GPL: if
the ansi2knr program is distributed as a separate set of sources and a
separate executable file which are aggregated on a storage medium together
with another program, this in itself does not bring the other program under
the GPL, nor does the mere fact that such a program or the procedures for
constructing it invoke the ansi2knr executable bring any other part of the
program under the GPL.
*/
/*
* Usage:
ansi2knr [--filename FILENAME] [INPUT_FILE [OUTPUT_FILE]]
* --filename provides the file name for the #line directive in the output,
* overriding input_file (if present).
* If no input_file is supplied, input is read from stdin.
* If no output_file is supplied, output goes to stdout.
* There are no error messages.
*
* ansi2knr recognizes function definitions by seeing a non-keyword
* identifier at the left margin, followed by a left parenthesis, with a
* right parenthesis as the last character on the line, and with a left
* brace as the first token on the following line (ignoring possible
* intervening comments and/or preprocessor directives), except that a line
* consisting of only
* identifier1(identifier2)
* will not be considered a function definition unless identifier2 is
* the word "void", and a line consisting of
* identifier1(identifier2, <<arbitrary>>)
* will not be considered a function definition.
* ansi2knr will recognize a multi-line header provided that no intervening
* line ends with a left or right brace or a semicolon. These algorithms
* ignore whitespace, comments, and preprocessor directives, except that
* the function name must be the first thing on the line. The following
* constructs will confuse it:
* - Any other construct that starts at the left margin and
* follows the above syntax (such as a macro or function call).
* - Some macros that tinker with the syntax of function headers.
*/
/*
* The original and principal author of ansi2knr is L. Peter Deutsch
* <[email protected]>. Other authors are noted in the change history
* that follows (in reverse chronological order):
lpd 2000-04-12 backs out Eggert's changes because of bugs:
- concatlits didn't declare the type of its bufend argument;
- concatlits didn't recognize when it was inside a comment;
- scanstring could scan backward past the beginning of the string; when
- the check for \ + newline in scanstring was unnecessary.
2000-03-05 Paul Eggert <[email protected]>
Add support for concatenated string literals.
* ansi2knr.c (concatlits): New decl.
(main): Invoke concatlits to concatenate string literals.
(scanstring): Handle backslash-newline correctly. Work with
character constants. Fix bug when scanning backwards through
backslash-quote. Check for unterminated strings.
(convert1): Parse character constants, too.
(appendline, concatlits): New functions.
* ansi2knr.1: Document this.
lpd 1999-08-17 added code to allow preprocessor directives
wherever comments are allowed
lpd 1999-04-12 added minor fixes from Pavel Roskin
<[email protected]> for clean compilation with
gcc -W -Wall
lpd 1999-03-22 added hack to recognize lines consisting of
identifier1(identifier2, xxx) as *not* being procedures
lpd 1999-02-03 made indentation of preprocessor commands consistent
lpd 1999-01-28 fixed two bugs: a '/' in an argument list caused an
endless loop; quoted strings within an argument list
confused the parser
lpd 1999-01-24 added a check for write errors on the output,
suggested by Jim Meyering <[email protected]>
lpd 1998-11-09 added further hack to recognize identifier(void)
as being a procedure
lpd 1998-10-23 added hack to recognize lines consisting of
identifier1(identifier2) as *not* being procedures
lpd 1997-12-08 made input_file optional; only closes input and/or
output file if not stdin or stdout respectively; prints
usage message on stderr rather than stdout; adds
--filename switch (changes suggested by
<[email protected]>)
lpd 1996-01-21 added code to cope with not HAVE_CONFIG_H and with
compilers that don't understand void, as suggested by
Tom Lane
lpd 1996-01-15 changed to require that the first non-comment token
on the line following a function header be a left brace,
to reduce sensitivity to macros, as suggested by Tom Lane
<[email protected]>
lpd 1995-06-22 removed #ifndefs whose sole purpose was to define
undefined preprocessor symbols as 0; changed all #ifdefs
for configuration symbols to #ifs
lpd 1995-04-05 changed copyright notice to make it clear that
including ansi2knr in a program does not bring the entire
program under the GPL
lpd 1994-12-18 added conditionals for systems where ctype macros
don't handle 8-bit characters properly, suggested by
Francois Pinard <[email protected]>;
removed --varargs switch (this is now the default)
lpd 1994-10-10 removed CONFIG_BROKETS conditional
lpd 1994-07-16 added some conditionals to help GNU `configure',
suggested by Francois Pinard <[email protected]>;
properly erase prototype args in function parameters,
contributed by Jim Avera <[email protected]>;
correct error in writeblanks (it shouldn't erase EOLs)
lpd 1989-xx-xx original version
*/
/* Most of the conditionals here are to make ansi2knr work with */
/* or without the GNU configure machinery. */
#if HAVE_CONFIG_H
# include <config.h>
#endif
#include <stdio.h>
#include <ctype.h>
#if HAVE_CONFIG_H
/*
For properly autoconfiguring ansi2knr, use AC_CONFIG_HEADER(config.h).
This will define HAVE_CONFIG_H and so, activate the following lines.
*/
# if STDC_HEADERS || HAVE_STRING_H
# include <string.h>
# else
# include <strings.h>
# endif
#else /* not HAVE_CONFIG_H */
/* Otherwise do it the hard way */
# ifdef BSD
# include <strings.h>
# else
# ifdef VMS
extern int strlen(), strncmp();
# else
# include <string.h>
# endif
# endif
#endif /* not HAVE_CONFIG_H */
#if STDC_HEADERS
# include <stdlib.h>
#else
/*
malloc and free should be declared in stdlib.h,
but if you've got a K&R compiler, they probably aren't.
*/
# ifdef MSDOS
# include <malloc.h>
# else
# ifdef VMS
extern char *malloc();
extern void free();
# else
extern char *malloc();
extern int free();
# endif
# endif
#endif
/* Define NULL (for *very* old compilers). */
#ifndef NULL
# define NULL (0)
#endif
/*
* The ctype macros don't always handle 8-bit characters correctly.
* Compensate for this here.
*/
#ifdef isascii
# undef HAVE_ISASCII /* just in case */
# define HAVE_ISASCII 1
#else
#endif
#if STDC_HEADERS || !HAVE_ISASCII
# define is_ascii(c) 1
#else
# define is_ascii(c) isascii(c)
#endif
#define is_space(c) (is_ascii(c) && isspace(c))
#define is_alpha(c) (is_ascii(c) && isalpha(c))
#define is_alnum(c) (is_ascii(c) && isalnum(c))
/* Scanning macros */
#define isidchar(ch) (is_alnum(ch) || (ch) == '_')
#define isidfirstchar(ch) (is_alpha(ch) || (ch) == '_')
/* Forward references */
char *ppdirforward();
char *ppdirbackward();
char *skipspace();
char *scanstring();
int writeblanks();
int test1();
int convert1();
/* The main program */
int
main(argc, argv)
int argc;
char *argv[];
{ FILE *in = stdin;
FILE *out = stdout;
char *filename = 0;
char *program_name = argv[0];
char *output_name = 0;
#define bufsize 5000 /* arbitrary size */
char *buf;
char *line;
char *more;
char *usage =
"Usage: ansi2knr [--filename FILENAME] [INPUT_FILE [OUTPUT_FILE]]\n";
/*
* In previous versions, ansi2knr recognized a --varargs switch.
* If this switch was supplied, ansi2knr would attempt to convert
* a ... argument to va_alist and va_dcl; if this switch was not
* supplied, ansi2knr would simply drop any such arguments.
* Now, ansi2knr always does this conversion, and we only
* check for this switch for backward compatibility.
*/
int convert_varargs = 1;
int output_error;
while ( argc > 1 && argv[1][0] == '-' ) {
if ( !strcmp(argv[1], "--varargs") ) {
convert_varargs = 1;
argc--;
argv++;
continue;
}
if ( !strcmp(argv[1], "--filename") && argc > 2 ) {
filename = argv[2];
argc -= 2;
argv += 2;
continue;
}
fprintf(stderr, "%s: Unrecognized switch: %s\n", program_name,
argv[1]);
fprintf(stderr, usage);
exit(1);
}
switch ( argc )
{
default:
fprintf(stderr, usage);
exit(0);
case 3:
output_name = argv[2];
out = fopen(output_name, "w");
if ( out == NULL ) {
fprintf(stderr, "%s: Cannot open output file %s\n",
program_name, output_name);
exit(1);
}
/* falls through */
case 2:
in = fopen(argv[1], "r");
if ( in == NULL ) {
fprintf(stderr, "%s: Cannot open input file %s\n",
program_name, argv[1]);
exit(1);
}
if ( filename == 0 )
filename = argv[1];
/* falls through */
case 1:
break;
}
if ( filename )
fprintf(out, "#line 1 \"%s\"\n", filename);
buf = malloc(bufsize);
if ( buf == NULL )
{
fprintf(stderr, "Unable to allocate read buffer!\n");
exit(1);
}
line = buf;
while ( fgets(line, (unsigned)(buf + bufsize - line), in) != NULL )
{
test: line += strlen(line);
switch ( test1(buf) )
{
case 2: /* a function header */
convert1(buf, out, 1, convert_varargs);
break;
case 1: /* a function */
/* Check for a { at the start of the next line. */
more = ++line;
f: if ( line >= buf + (bufsize - 1) ) /* overflow check */
goto wl;
if ( fgets(line, (unsigned)(buf + bufsize - line), in) == NULL )
goto wl;
switch ( *skipspace(ppdirforward(more), 1) )
{
case '{':
/* Definitely a function header. */
convert1(buf, out, 0, convert_varargs);
fputs(more, out);
break;
case 0:
/* The next line was blank or a comment: */
/* keep scanning for a non-comment. */
line += strlen(line);
goto f;
default:
/* buf isn't a function header, but */
/* more might be. */
fputs(buf, out);
strcpy(buf, more);
line = buf;
goto test;
}
break;
case -1: /* maybe the start of a function */
if ( line != buf + (bufsize - 1) ) /* overflow check */
continue;
/* falls through */
default: /* not a function */
wl: fputs(buf, out);
break;
}
line = buf;
}
if ( line != buf )
fputs(buf, out);
free(buf);
if ( output_name ) {
output_error = ferror(out);
output_error |= fclose(out);
} else { /* out == stdout */
fflush(out);
output_error = ferror(out);
}
if ( output_error ) {
fprintf(stderr, "%s: error writing to %s\n", program_name,
(output_name ? output_name : "stdout"));
exit(1);
}
if ( in != stdin )
fclose(in);
return 0;
}
/*
* Skip forward or backward over one or more preprocessor directives.
*/
char *
ppdirforward(p)
char *p;
{
for (; *p == '#'; ++p) {
for (; *p != '\r' && *p != '\n'; ++p)
if (*p == 0)
return p;
if (*p == '\r' && p[1] == '\n')
++p;
}
return p;
}
char *
ppdirbackward(p, limit)
char *p;
char *limit;
{
char *np = p;
for (;; p = --np) {
if (*np == '\n' && np[-1] == '\r')
--np;
for (; np > limit && np[-1] != '\r' && np[-1] != '\n'; --np)
if (np[-1] == 0)
return np;
if (*np != '#')
return p;
}
}
/*
* Skip over whitespace, comments, and preprocessor directives,
* in either direction.
*/
char *
skipspace(p, dir)
char *p;
int dir; /* 1 for forward, -1 for backward */
{
for ( ; ; ) {
while ( is_space(*p) )
p += dir;
if ( !(*p == '/' && p[dir] == '*') )
break;
p += dir; p += dir;
while ( !(*p == '*' && p[dir] == '/') ) {
if ( *p == 0 )
return p; /* multi-line comment?? */
p += dir;
}
p += dir; p += dir;
}
return p;
}
/* Scan over a quoted string, in either direction. */
char *
scanstring(p, dir)
char *p;
int dir;
{
for (p += dir; ; p += dir)
if (*p == '"' && p[-dir] != '\\')
return p + dir;
}
/*
* Write blanks over part of a string.
* Don't overwrite end-of-line characters.
*/
int
writeblanks(start, end)
char *start;
char *end;
{ char *p;
for ( p = start; p < end; p++ )
if ( *p != '\r' && *p != '\n' )
*p = ' ';
return 0;
}
/*
* Test whether the string in buf is a function definition.
* The string may contain and/or end with a newline.
* Return as follows:
* 0 - definitely not a function definition;
* 1 - definitely a function definition;
* 2 - definitely a function prototype (NOT USED);
* -1 - may be the beginning of a function definition,
* append another line and look again.
* The reason we don't attempt to convert function prototypes is that
* Ghostscript's declaration-generating macros look too much like
* prototypes, and confuse the algorithms.
*/
int
test1(buf)
char *buf;
{ char *p = buf;
char *bend;
char *endfn;
int contin;
if ( !isidfirstchar(*p) )
return 0; /* no name at left margin */
bend = skipspace(ppdirbackward(buf + strlen(buf) - 1, buf), -1);
switch ( *bend )
{
case ';': contin = 0 /*2*/; break;
case ')': contin = 1; break;
case '{': return 0; /* not a function */
case '}': return 0; /* not a function */
default: contin = -1;
}
while ( isidchar(*p) )
p++;
endfn = p;
p = skipspace(p, 1);
if ( *p++ != '(' )
return 0; /* not a function */
p = skipspace(p, 1);
if ( *p == ')' )
return 0; /* no parameters */
/* Check that the apparent function name isn't a keyword. */
/* We only need to check for keywords that could be followed */
/* by a left parenthesis (which, unfortunately, is most of them). */
{ static char *words[] =
{ "asm", "auto", "case", "char", "const", "double",
"extern", "float", "for", "if", "int", "long",
"register", "return", "short", "signed", "sizeof",
"static", "switch", "typedef", "unsigned",
"void", "volatile", "while", 0
};
char **key = words;
char *kp;
unsigned len = endfn - buf;
while ( (kp = *key) != 0 )
{ if ( strlen(kp) == len && !strncmp(kp, buf, len) )
return 0; /* name is a keyword */
key++;
}
}
{
char *id = p;
int len;
/*
* Check for identifier1(identifier2) and not
* identifier1(void), or identifier1(identifier2, xxxx).
*/
while ( isidchar(*p) )
p++;
len = p - id;
p = skipspace(p, 1);
if (*p == ',' ||
(*p == ')' && (len != 4 || strncmp(id, "void", 4)))
)
return 0; /* not a function */
}
/*
* If the last significant character was a ), we need to count
* parentheses, because it might be part of a formal parameter
* that is a procedure.
*/
if (contin > 0) {
int level = 0;
for (p = skipspace(buf, 1); *p; p = skipspace(p + 1, 1))
level += (*p == '(' ? 1 : *p == ')' ? -1 : 0);
if (level > 0)
contin = -1;
}
return contin;
}
/* Convert a recognized function definition or header to K&R syntax. */
int
convert1(buf, out, header, convert_varargs)
char *buf;
FILE *out;
int header; /* Boolean */
int convert_varargs; /* Boolean */
{ char *endfn;
char *p;
/*
* The breaks table contains pointers to the beginning and end
* of each argument.
*/
char **breaks;
unsigned num_breaks = 2; /* for testing */
char **btop;
char **bp;
char **ap;
char *vararg = 0;
/* Pre-ANSI implementations don't agree on whether strchr */
/* is called strchr or index, so we open-code it here. */
for ( endfn = buf; *(endfn++) != '('; )
;
top: p = endfn;
breaks = (char **)malloc(sizeof(char *) * num_breaks * 2);
if ( breaks == NULL )
{ /* Couldn't allocate break table, give up */
fprintf(stderr, "Unable to allocate break table!\n");
fputs(buf, out);
return -1;
}
btop = breaks + num_breaks * 2 - 2;
bp = breaks;
/* Parse the argument list */
do
{ int level = 0;
char *lp = NULL;
char *rp = NULL;
char *end = NULL;
if ( bp >= btop )
{ /* Filled up break table. */
/* Allocate a bigger one and start over. */
free((char *)breaks);
num_breaks <<= 1;
goto top;
}
*bp++ = p;
/* Find the end of the argument */
for ( ; end == NULL; p++ )
{ switch(*p)
{
case ',':
if ( !level ) end = p;
break;
case '(':
if ( !level ) lp = p;
level++;
break;
case ')':
if ( --level < 0 ) end = p;
else rp = p;
break;
case '/':
if (p[1] == '*')
p = skipspace(p, 1) - 1;
break;
case '"':
p = scanstring(p, 1) - 1;
break;
default:
;
}
}
/* Erase any embedded prototype parameters. */
if ( lp && rp )
writeblanks(lp + 1, rp);
p--; /* back up over terminator */
/* Find the name being declared. */
/* This is complicated because of procedure and */
/* array modifiers. */
for ( ; ; )
{ p = skipspace(p - 1, -1);
switch ( *p )
{
case ']': /* skip array dimension(s) */
case ')': /* skip procedure args OR name */
{ int level = 1;
while ( level )
switch ( *--p )
{
case ']': case ')':
level++;
break;
case '[': case '(':
level--;
break;
case '/':
if (p > buf && p[-1] == '*')
p = skipspace(p, -1) + 1;
break;
case '"':
p = scanstring(p, -1) + 1;
break;
default: ;
}
}
if ( *p == '(' && *skipspace(p + 1, 1) == '*' )
{ /* We found the name being declared */
while ( !isidfirstchar(*p) )
p = skipspace(p, 1) + 1;
goto found;
}
break;
default:
goto found;
}
}
found: if ( *p == '.' && p[-1] == '.' && p[-2] == '.' )
{ if ( convert_varargs )
{ *bp++ = "va_alist";
vararg = p-2;
}
else
{ p++;
if ( bp == breaks + 1 ) /* sole argument */
writeblanks(breaks[0], p);
else
writeblanks(bp[-1] - 1, p);
bp--;
}
}
else
{ while ( isidchar(*p) ) p--;
*bp++ = p+1;
}
p = end;
}
while ( *p++ == ',' );
*bp = p;
/* Make a special check for 'void' arglist */
if ( bp == breaks+2 )
{ p = skipspace(breaks[0], 1);
if ( !strncmp(p, "void", 4) )
{ p = skipspace(p+4, 1);
if ( p == breaks[2] - 1 )
{ bp = breaks; /* yup, pretend arglist is empty */
writeblanks(breaks[0], p + 1);
}
}
}
/* Put out the function name and left parenthesis. */
p = buf;
while ( p != endfn ) putc(*p, out), p++;
/* Put out the declaration. */
if ( header )
{ fputs(");", out);
for ( p = breaks[0]; *p; p++ )
if ( *p == '\r' || *p == '\n' )
putc(*p, out);
}
else
{ for ( ap = breaks+1; ap < bp; ap += 2 )
{ p = *ap;
while ( isidchar(*p) )
putc(*p, out), p++;
if ( ap < bp - 1 )
fputs(", ", out);
}
fputs(") ", out);
/* Put out the argument declarations */
for ( ap = breaks+2; ap <= bp; ap += 2 )
(*ap)[-1] = ';';
if ( vararg != 0 )
{ *vararg = 0;
fputs(breaks[0], out); /* any prior args */
fputs("va_dcl", out); /* the final arg */
fputs(bp[0], out);
}
else
fputs(breaks[0], out);
}
free((char *)breaks);
return 0;
}
|
the_stack_data/44825.c
|
/* Bluetooth Mesh */
/*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifdef CONFIG_BT_MESH_FRIEND
#include <stdint.h>
#include <zephyr.h>
#include <misc/byteorder.h>
#include <net/buf.h>
#include <bluetooth.h>
#include <api/mesh.h>
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_MESH_DEBUG_FRIEND)
#include "common/log.h"
#include "mesh_crypto.h"
#include "adv.h"
#include "mesh.h"
#include "net.h"
#include "transport.h"
#include "access.h"
#include "foundation.h"
#include "friend.h"
#include <errno.h>
//#include "bt_mesh_custom_log.h"
#define FRIEND_BUF_SIZE (BT_MESH_ADV_DATA_SIZE - BT_MESH_NET_HDR_LEN)
/* We reserve one extra buffer for each friendship, since we need to be able
* to resend the last sent PDU, which sits separately outside of the queue.
*/
#define FRIEND_BUF_COUNT ((CONFIG_BT_MESH_FRIEND_QUEUE_SIZE + 1) * \
CONFIG_BT_MESH_FRIEND_LPN_COUNT)
#define FRIEND_ADV(buf) CONTAINER_OF(BT_MESH_ADV(buf), \
struct friend_adv, adv)
struct friend_pdu_info {
u16_t src;
u16_t dst;
u8_t seq[3];
u8_t ttl:7,
ctl:1;
u32_t iv_index;
};
NET_BUF_POOL_DEFINE(friend_buf_pool, FRIEND_BUF_COUNT,
BT_MESH_ADV_DATA_SIZE, BT_MESH_ADV_USER_DATA_SIZE, NULL);
static struct friend_adv {
struct bt_mesh_adv adv;
u64_t seq_auth;
} adv_pool[FRIEND_BUF_COUNT];
static struct bt_mesh_adv *adv_alloc(int id)
{
return &adv_pool[id].adv;
}
static void discard_buffer(void)
{
struct bt_mesh_friend *frnd = &bt_mesh.frnd[0];
struct net_buf *buf;
int i;
/* Find the Friend context with the most queued buffers */
for (i = 1; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
if (bt_mesh.frnd[i].queue_size > frnd->queue_size) {
frnd = &bt_mesh.frnd[i];
}
}
buf = net_buf_slist_get(&frnd->queue);
__ASSERT_NO_MSG(buf != NULL);
BT_WARN("Discarding buffer %p for LPN 0x%04x", buf, frnd->lpn);
net_buf_unref(buf);
/* should reduce queue size when discard buffer*/
frnd->queue_size--;
}
static struct net_buf *friend_buf_alloc(u16_t src)
{
u8_t xmit = bt_mesh_net_transmit_get();
struct net_buf *buf;
BT_DBG("src 0x%04x", src);
do {
buf = bt_mesh_adv_create_from_pool(&friend_buf_pool, adv_alloc,
BT_MESH_ADV_DATA,
BT_MESH_TRANSMIT_COUNT(xmit),
BT_MESH_TRANSMIT_INT(xmit),
K_NO_WAIT);
if (!buf) {
discard_buffer();
}
} while (!buf);
BT_MESH_ADV(buf)->addr = src;
FRIEND_ADV(buf)->seq_auth = TRANS_SEQ_AUTH_NVAL;
BT_DBG("allocated buf %p", buf);
return buf;
}
struct bt_mesh_friend *bt_mesh_friend_find(u16_t net_idx, u16_t lpn_addr,
bool valid, bool established)
{
int i;
BT_DBG("net_idx 0x%04x lpn_addr 0x%04x", net_idx, lpn_addr);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (valid && !frnd->valid) {
continue;
}
if (established && !frnd->established) {
continue;
}
if (net_idx != BT_MESH_KEY_ANY && frnd->net_idx != net_idx) {
continue;
}
if (frnd->lpn == lpn_addr) {
return frnd;
}
}
return NULL;
}
/* Intentionally start a little bit late into the ReceiveWindow when
* it's large enough. This may improve reliability with some platforms,
* like the PTS, where the receiver might not have sufficiently compensated
* for internal latencies required to start scanning.
* Nordic 52832 have about 100ms delay in adv packet sending.
*/
static s32_t recv_delay(struct bt_mesh_friend *frnd)
{
#if CONFIG_BT_MESH_FRIEND_RECV_WIN > 50
return (s32_t)frnd->recv_delay + (CONFIG_BT_MESH_FRIEND_RECV_WIN / 5) - CONFIG_BOARD_ADV_DELAY;
#else
return frnd->recv_delay - CONFIG_BOARD_ADV_DELAY;
#endif
}
static void friend_clear(struct bt_mesh_friend *frnd)
{
int i;
BT_DBG("LPN 0x%04x", frnd->lpn);
k_delayed_work_cancel(&frnd->timer);
friend_cred_del(frnd->net_idx, frnd->lpn);
if (frnd->last) {
/* Cancel the sending if necessary */
if (frnd->pending_buf) {
BT_MESH_ADV(frnd->last)->busy = 0;
}
net_buf_unref(frnd->last);
frnd->last = NULL;
}
while (!sys_slist_is_empty(&frnd->queue)) {
net_buf_unref(net_buf_slist_get(&frnd->queue));
}
for (i = 0; i < ARRAY_SIZE(frnd->seg); i++) {
struct bt_mesh_friend_seg *seg = &frnd->seg[i];
while (!sys_slist_is_empty(&seg->queue)) {
net_buf_unref(net_buf_slist_get(&seg->queue));
}
}
frnd->valid = 0;
frnd->established = 0;
frnd->pending_buf = 0;
frnd->fsn = 0;
frnd->queue_size = 0;
frnd->pending_req = 0;
memset(frnd->sub_list, 0, sizeof(frnd->sub_list));
}
void bt_mesh_friend_clear_net_idx(u16_t net_idx)
{
int i;
BT_DBG("net_idx 0x%04x", net_idx);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (frnd->net_idx == BT_MESH_KEY_UNUSED) {
continue;
}
if (net_idx == BT_MESH_KEY_ANY || frnd->net_idx == net_idx) {
friend_clear(frnd);
}
}
}
void bt_mesh_friend_sec_update(u16_t net_idx)
{
int i;
BT_DBG("net_idx 0x%04x", net_idx);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (frnd->net_idx == BT_MESH_KEY_UNUSED) {
continue;
}
if (net_idx == BT_MESH_KEY_ANY || frnd->net_idx == net_idx) {
frnd->sec_update = 1;
}
}
}
int bt_mesh_friend_clear(struct bt_mesh_net_rx *rx, struct net_buf_simple *buf)
{
struct bt_mesh_ctl_friend_clear *msg = (void *)buf->data;
struct bt_mesh_friend *frnd;
u16_t lpn_addr, lpn_counter;
struct bt_mesh_net_tx tx = {
.sub = rx->sub,
.ctx = &rx->ctx,
.src = bt_mesh_primary_addr(),
.xmit = bt_mesh_net_transmit_get(),
};
struct bt_mesh_ctl_friend_clear_confirm cfm;
if (buf->len < sizeof(*msg)) {
BT_WARN("Too short Friend Clear");
return -EINVAL;
}
lpn_addr = sys_be16_to_cpu(msg->lpn_addr);
lpn_counter = sys_be16_to_cpu(msg->lpn_counter);
BT_DBG("LPN addr 0x%04x counter 0x%04x", lpn_addr, lpn_counter);
frnd = bt_mesh_friend_find(rx->sub->net_idx, lpn_addr, false, false);
if (!frnd) {
BT_WARN("No matching LPN addr 0x%04x", lpn_addr);
return 0;
}
/* A Friend Clear message is considered valid if the result of the
* subtraction of the value of the LPNCounter field of the Friend
* Request message (the one that initiated the friendship) from the
* value of the LPNCounter field of the Friend Clear message, modulo
* 65536, is in the range 0 to 255 inclusive.
*/
if (lpn_counter - frnd->lpn_counter > 255) {
BT_WARN("LPN Counter out of range (old %u new %u)",
frnd->lpn_counter, lpn_counter);
return 0;
}
tx.ctx->send_ttl = BT_MESH_TTL_MAX;
cfm.lpn_addr = msg->lpn_addr;
cfm.lpn_counter = msg->lpn_counter;
bt_mesh_ctl_send(&tx, TRANS_CTL_OP_FRIEND_CLEAR_CFM, &cfm,
sizeof(cfm), NULL, NULL, NULL);
friend_clear(frnd);
return 0;
}
static void friend_sub_add(struct bt_mesh_friend *frnd, u16_t addr)
{
int i;
for (i = 0; i < ARRAY_SIZE(frnd->sub_list); i++) {
if (frnd->sub_list[i] == BT_MESH_ADDR_UNASSIGNED) {
frnd->sub_list[i] = addr;
return;
}
}
BT_WARN("No space in friend subscription list");
}
static void friend_sub_rem(struct bt_mesh_friend *frnd, u16_t addr)
{
int i;
for (i = 0; i < ARRAY_SIZE(frnd->sub_list); i++) {
if (frnd->sub_list[i] == addr) {
frnd->sub_list[i] = BT_MESH_ADDR_UNASSIGNED;
return;
}
}
}
static struct net_buf *create_friend_pdu(struct bt_mesh_friend *frnd,
struct friend_pdu_info *info,
struct net_buf_simple *sdu)
{
struct bt_mesh_subnet *sub;
const u8_t *enc, *priv;
struct net_buf *buf;
u8_t nid;
sub = bt_mesh_subnet_get(frnd->net_idx);
__ASSERT_NO_MSG(sub != NULL);
buf = friend_buf_alloc(info->src);
/* Friend Offer needs master security credentials */
if (info->ctl && TRANS_CTL_OP(sdu->data) == TRANS_CTL_OP_FRIEND_OFFER) {
enc = sub->keys[sub->kr_flag].enc;
priv = sub->keys[sub->kr_flag].privacy;
nid = sub->keys[sub->kr_flag].nid;
} else {
if (friend_cred_get(sub, frnd->lpn, &nid, &enc, &priv)) {
BT_ERR("friend_cred_get failed");
goto failed;
}
}
net_buf_add_u8(buf, (nid | (info->iv_index & 1) << 7));
if (info->ctl) {
net_buf_add_u8(buf, info->ttl | 0x80);
} else {
net_buf_add_u8(buf, info->ttl);
}
net_buf_add_mem(buf, info->seq, sizeof(info->seq));
net_buf_add_be16(buf, info->src);
net_buf_add_be16(buf, info->dst);
net_buf_add_mem(buf, sdu->data, sdu->len);
/* We re-encrypt and obfuscate using the received IVI rather than
* the normal TX IVI (which may be different) since the transport
* layer nonce includes the IVI.
*/
if (bt_mesh_net_encrypt(enc, &buf->b, info->iv_index, false)) {
BT_ERR("Re-encrypting failed");
goto failed;
}
if (bt_mesh_net_obfuscate(buf->data, info->iv_index, priv)) {
BT_ERR("Re-obfuscating failed");
goto failed;
}
return buf;
failed:
net_buf_unref(buf);
return NULL;
}
static struct net_buf *encode_friend_ctl(struct bt_mesh_friend *frnd,
u8_t ctl_op,
struct net_buf_simple *sdu)
{
struct friend_pdu_info info;
BT_DBG("LPN 0x%04x", frnd->lpn);
net_buf_simple_push_u8(sdu, TRANS_CTL_HDR(ctl_op, 0));
info.src = bt_mesh_primary_addr();
info.dst = frnd->lpn;
info.ctl = 1;
info.ttl = 0;
info.seq[0] = (bt_mesh.seq >> 16);
info.seq[1] = (bt_mesh.seq >> 8);
info.seq[2] = bt_mesh.seq++;
info.iv_index = BT_MESH_NET_IVI_TX;
return create_friend_pdu(frnd, &info, sdu);
}
static struct net_buf *encode_update(struct bt_mesh_friend *frnd, u8_t md)
{
struct bt_mesh_ctl_friend_update *upd;
struct net_buf_simple *sdu = NET_BUF_SIMPLE(1 + sizeof(*upd));
struct bt_mesh_subnet *sub = bt_mesh_subnet_get(frnd->net_idx);
__ASSERT_NO_MSG(sub != NULL);
BT_DBG("lpn 0x%04x md 0x%02x", frnd->lpn, md);
net_buf_simple_init(sdu, 1);
upd = net_buf_simple_add(sdu, sizeof(*upd));
upd->flags = bt_mesh_net_flags(sub);
upd->iv_index = sys_cpu_to_be32(bt_mesh.iv_index);
upd->md = md;
return encode_friend_ctl(frnd, TRANS_CTL_OP_FRIEND_UPDATE, sdu);
}
static void enqueue_sub_cfm(struct bt_mesh_friend *frnd, u8_t xact)
{
struct bt_mesh_ctl_friend_sub_confirm *cfm;
struct net_buf_simple *sdu = NET_BUF_SIMPLE(1 + sizeof(*cfm));
struct net_buf *buf;
BT_DBG("lpn 0x%04x xact 0x%02x", frnd->lpn, xact);
net_buf_simple_init(sdu, 1);
cfm = net_buf_simple_add(sdu, sizeof(*cfm));
cfm->xact = xact;
buf = encode_friend_ctl(frnd, TRANS_CTL_OP_FRIEND_SUB_CFM, sdu);
if (!buf) {
BT_ERR("Unable to encode Subscription List Confirmation");
return;
}
if (frnd->last) {
BT_DBG("Discarding last PDU");
net_buf_unref(frnd->last);
}
frnd->last = buf;
frnd->send_last = 1;
}
static void friend_recv_delay(struct bt_mesh_friend *frnd)
{
frnd->pending_req = 1;
k_delayed_work_submit(&frnd->timer, recv_delay(frnd));
BT_DBG("Waiting RecvDelay of %d ms", recv_delay(frnd));
}
int bt_mesh_friend_sub_add(struct bt_mesh_net_rx *rx,
struct net_buf_simple *buf)
{
struct bt_mesh_friend *frnd;
u8_t xact;
if (buf->len < BT_MESH_FRIEND_SUB_MIN_LEN) {
BT_WARN("Too short Friend Subscription Add");
return -EINVAL;
}
frnd = bt_mesh_friend_find(rx->sub->net_idx, rx->ctx.addr, true, true);
if (!frnd) {
BT_WARN("No matching LPN addr 0x%04x", rx->ctx.addr);
return 0;
}
if (frnd->pending_buf) {
BT_WARN("Previous buffer not yet sent!");
return 0;
}
friend_recv_delay(frnd);
xact = net_buf_simple_pull_u8(buf);
while (buf->len >= 2) {
friend_sub_add(frnd, net_buf_simple_pull_be16(buf));
}
enqueue_sub_cfm(frnd, xact);
return 0;
}
int bt_mesh_friend_sub_rem(struct bt_mesh_net_rx *rx,
struct net_buf_simple *buf)
{
struct bt_mesh_friend *frnd;
u8_t xact;
if (buf->len < BT_MESH_FRIEND_SUB_MIN_LEN) {
BT_WARN("Too short Friend Subscription Remove");
return -EINVAL;
}
frnd = bt_mesh_friend_find(rx->sub->net_idx, rx->ctx.addr, true, true);
if (!frnd) {
BT_WARN("No matching LPN addr 0x%04x", rx->ctx.addr);
return 0;
}
if (frnd->pending_buf) {
BT_WARN("Previous buffer not yet sent!");
return 0;
}
friend_recv_delay(frnd);
xact = net_buf_simple_pull_u8(buf);
while (buf->len >= 2) {
friend_sub_rem(frnd, net_buf_simple_pull_be16(buf));
}
enqueue_sub_cfm(frnd, xact);
return 0;
}
static void enqueue_buf(struct bt_mesh_friend *frnd, struct net_buf *buf)
{
net_buf_slist_put(&frnd->queue, buf);
frnd->queue_size++;
}
static void enqueue_update(struct bt_mesh_friend *frnd, u8_t md)
{
struct net_buf *buf;
buf = encode_update(frnd, md);
if (!buf) {
BT_ERR("Unable to encode Friend Update");
return;
}
frnd->sec_update = 0;
enqueue_buf(frnd, buf);
}
int bt_mesh_friend_poll(struct bt_mesh_net_rx *rx, struct net_buf_simple *buf)
{
struct bt_mesh_ctl_friend_poll *msg = (void *)buf->data;
struct bt_mesh_friend *frnd;
BT_DBG("%s: friend poll pkt received.", __func__);
if (buf->len < sizeof(*msg)) {
BT_WARN("Too short Friend Poll");
return -EINVAL;
}
frnd = bt_mesh_friend_find(rx->sub->net_idx, rx->ctx.addr, true, false);
if (!frnd) {
BT_WARN("No matching LPN addr 0x%04x", rx->ctx.addr);
return 0;
}
if (msg->fsn & ~1) {
BT_WARN("Prohibited (non-zero) padding bits");
return -EINVAL;
}
if (frnd->pending_buf) {
BT_WARN("Previous buffer not yet sent");
return 0;
}
BT_DBG("msg->fsn %u frnd->fsn %u", (msg->fsn & 1), frnd->fsn);
friend_recv_delay(frnd);
if (!frnd->established) {
BT_DBG("Friendship established with 0x%04x", frnd->lpn);
frnd->established = 1;
}
if (msg->fsn == frnd->fsn && frnd->last) {
BT_DBG("Re-sending last PDU");
frnd->send_last = 1;
} else {
if (frnd->last) {
net_buf_unref(frnd->last);
frnd->last = NULL;
}
frnd->fsn = msg->fsn;
if (sys_slist_is_empty(&frnd->queue)) {
enqueue_update(frnd, 0);
BT_DBG("Enqueued Friend Update to empty queue");
}
}
return 0;
}
static struct bt_mesh_friend *find_clear(u16_t prev_friend)
{
int i;
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (frnd->clear.frnd == prev_friend) {
return frnd;
}
}
return NULL;
}
static void friend_clear_sent(int err, void *user_data)
{
struct bt_mesh_friend *frnd = user_data;
k_delayed_work_submit(&frnd->clear.timer,
K_SECONDS(frnd->clear.repeat_sec));
frnd->clear.repeat_sec *= 2;
}
static const struct bt_mesh_send_cb clear_sent_cb = {
.end = friend_clear_sent,
};
static void send_friend_clear(struct bt_mesh_friend *frnd)
{
struct bt_mesh_msg_ctx ctx = {
.net_idx = frnd->net_idx,
.app_idx = BT_MESH_KEY_UNUSED,
.addr = frnd->clear.frnd,
.send_ttl = BT_MESH_TTL_MAX,
};
struct bt_mesh_net_tx tx = {
.sub = &bt_mesh.sub[0],
.ctx = &ctx,
.src = bt_mesh_primary_addr(),
.xmit = bt_mesh_net_transmit_get(),
};
struct bt_mesh_ctl_friend_clear req = {
.lpn_addr = sys_cpu_to_be16(frnd->lpn),
.lpn_counter = sys_cpu_to_be16(frnd->lpn_counter),
};
BT_DBG("");
bt_mesh_ctl_send(&tx, TRANS_CTL_OP_FRIEND_CLEAR, &req,
sizeof(req), NULL, &clear_sent_cb, frnd);
}
static void clear_timeout(struct k_work *work)
{
struct bt_mesh_friend *frnd = CONTAINER_OF(work, struct bt_mesh_friend,
clear.timer.work);
u32_t duration;
BT_DBG("LPN 0x%04x (old) Friend 0x%04x", frnd->lpn, frnd->clear.frnd);
duration = k_uptime_get_32() - frnd->clear.start;
if (duration > 2 * frnd->poll_to) {
BT_DBG("Clear Procedure timer expired");
frnd->clear.frnd = BT_MESH_ADDR_UNASSIGNED;
return;
}
send_friend_clear(frnd);
}
static void clear_procedure_start(struct bt_mesh_friend *frnd)
{
BT_DBG("LPN 0x%04x (old) Friend 0x%04x", frnd->lpn, frnd->clear.frnd);
frnd->clear.start = k_uptime_get_32() + (2 * frnd->poll_to);
frnd->clear.repeat_sec = 1;
send_friend_clear(frnd);
}
int bt_mesh_friend_clear_cfm(struct bt_mesh_net_rx *rx,
struct net_buf_simple *buf)
{
struct bt_mesh_ctl_friend_clear_confirm *msg = (void *)buf->data;
struct bt_mesh_friend *frnd;
u16_t lpn_addr, lpn_counter;
BT_DBG("");
if (buf->len < sizeof(*msg)) {
BT_WARN("Too short Friend Clear Confirm");
return -EINVAL;
}
frnd = find_clear(rx->ctx.addr);
if (!frnd) {
BT_WARN("No pending clear procedure for 0x%02x", rx->ctx.addr);
return 0;
}
lpn_addr = sys_be16_to_cpu(msg->lpn_addr);
if (lpn_addr != frnd->lpn) {
BT_WARN("LPN address mismatch (0x%04x != 0x%04x)",
lpn_addr, frnd->lpn);
return 0;
}
lpn_counter = sys_be16_to_cpu(msg->lpn_counter);
if (lpn_counter != frnd->lpn_counter) {
BT_WARN("LPN counter mismatch (0x%04x != 0x%04x)",
lpn_counter, frnd->lpn_counter);
return 0;
}
k_delayed_work_cancel(&frnd->clear.timer);
frnd->clear.frnd = BT_MESH_ADDR_UNASSIGNED;
return 0;
}
static void enqueue_offer(struct bt_mesh_friend *frnd, s8_t rssi)
{
struct bt_mesh_ctl_friend_offer *off;
struct net_buf_simple *sdu = NET_BUF_SIMPLE(1 + sizeof(*off));
struct net_buf *buf;
BT_DBG("");
net_buf_simple_init(sdu, 1);
off = net_buf_simple_add(sdu, sizeof(*off));
off->recv_win = CONFIG_BT_MESH_FRIEND_RECV_WIN,
off->queue_size = CONFIG_BT_MESH_FRIEND_QUEUE_SIZE,
off->sub_list_size = ARRAY_SIZE(frnd->sub_list),
off->rssi = rssi,
off->frnd_counter = sys_cpu_to_be16(frnd->counter);
buf = encode_friend_ctl(frnd, TRANS_CTL_OP_FRIEND_OFFER, sdu);
if (!buf) {
BT_ERR("Unable to encode Friend Offer");
return;
}
frnd->counter++;
if (frnd->last) {
net_buf_unref(frnd->last);
}
frnd->last = buf;
frnd->send_last = 1;
}
#define RECV_WIN CONFIG_BT_MESH_FRIEND_RECV_WIN
#define RSSI_FACT(crit) (((crit) >> 5) & (u8_t)MESH_BIT_MASK(2))
#define RECV_WIN_FACT(crit) (((crit) >> 3) & (u8_t)MESH_BIT_MASK(2))
#define MIN_QUEUE_SIZE_LOG(crit) ((crit) & (u8_t)MESH_BIT_MASK(3))
#define MIN_QUEUE_SIZE(crit) ((u32_t)MESH_BIT(MIN_QUEUE_SIZE_LOG(crit)))
static s32_t offer_delay(struct bt_mesh_friend *frnd, s8_t rssi, u8_t crit)
{
/* Scaling factors. The actual values are 1, 1.5, 2 & 2.5, but we
* want to avoid floating-point arithmetic.
*/
static const u8_t fact[] = { 10, 15, 20, 25 };
s32_t delay;
BT_DBG("ReceiveWindowFactor %u ReceiveWindow %u RSSIFactor %u RSSI %d",
fact[RECV_WIN_FACT(crit)], RECV_WIN,
fact[RSSI_FACT(crit)], rssi);
/* Delay = ReceiveWindowFactor * ReceiveWindow - RSSIFactor * RSSI */
delay = (s32_t)fact[RECV_WIN_FACT(crit)] * RECV_WIN;
delay -= (s32_t)fact[RSSI_FACT(crit)] * rssi;
delay /= 10;
BT_DBG("Local Delay calculated as %d ms", delay);
if (delay < 100) {
return K_MSEC(100);
}
return K_MSEC(delay);
}
int bt_mesh_friend_req(struct bt_mesh_net_rx *rx, struct net_buf_simple *buf)
{
struct bt_mesh_ctl_friend_req *msg = (void *)buf->data;
struct bt_mesh_friend *frnd = NULL;
u16_t old_friend;
u32_t poll_to;
int i;
BT_DBG("%s, friend request packaet received.", __func__);
if (buf->len < sizeof(*msg)) {
BT_WARN("Too short Friend Request");
return -EINVAL;
}
if (msg->recv_delay <= 0x09) {
BT_WARN("Prohibited ReceiveDelay (0x%02x)", msg->recv_delay);
return -EINVAL;
}
poll_to = (((u32_t)msg->poll_to[0] << 16) |
((u32_t)msg->poll_to[1] << 8) |
((u32_t)msg->poll_to[2]));
if (poll_to <= 0x000009 || poll_to >= 0x34bc00) {
BT_WARN("Prohibited PollTimeout (0x%06x)", poll_to);
return -EINVAL;
}
if (msg->num_elem == 0x00) {
BT_WARN("Prohibited NumElements value (0x00)");
return -EINVAL;
}
if (!MIN_QUEUE_SIZE_LOG(msg->criteria)) {
BT_WARN("Prohibited Minimum Queue Size in Friend Request");
return -EINVAL;
}
if (CONFIG_BT_MESH_FRIEND_QUEUE_SIZE < MIN_QUEUE_SIZE(msg->criteria)) {
BT_WARN("We have a too small Friend Queue size (%u < %u)",
CONFIG_BT_MESH_FRIEND_QUEUE_SIZE,
MIN_QUEUE_SIZE(msg->criteria));
return 0;
}
old_friend = sys_be16_to_cpu(msg->prev_addr);
if (BT_MESH_ADDR_IS_UNICAST(old_friend)) {
frnd = bt_mesh_friend_find(rx->sub->net_idx, old_friend,
true, false);
} else {
frnd = bt_mesh_friend_find(rx->sub->net_idx, rx->ctx.addr,
true, false);
}
if (frnd) {
BT_WARN("Existing LPN re-requesting Friendship");
friend_clear(frnd);
goto init_friend;
}
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
if (!bt_mesh.frnd[i].valid) {
frnd = &bt_mesh.frnd[i];
frnd->valid = 1;
break;
}
}
if (!frnd) {
BT_WARN("No free Friend contexts for new LPN");
return -ENOMEM;
}
init_friend:
frnd->lpn = rx->ctx.addr;
frnd->net_idx = rx->sub->net_idx;
frnd->recv_delay = msg->recv_delay;
frnd->poll_to = poll_to * 100;
frnd->lpn_counter = sys_be16_to_cpu(msg->lpn_counter);
frnd->clear.frnd = sys_be16_to_cpu(msg->prev_addr);
BT_DBG("LPN 0x%04x rssi %d recv_delay %u poll_to %ums",
frnd->lpn, rx->rssi, frnd->recv_delay, frnd->poll_to);
if (BT_MESH_ADDR_IS_UNICAST(old_friend) &&
!bt_mesh_elem_find(old_friend)) {
clear_procedure_start(frnd);
}
k_delayed_work_submit(&frnd->timer,
offer_delay(frnd, rx->rssi, msg->criteria));
friend_cred_create(rx->sub, frnd->lpn, frnd->lpn_counter,
frnd->counter);
enqueue_offer(frnd, rx->rssi);
return 0;
}
static struct bt_mesh_friend_seg *get_seg(struct bt_mesh_friend *frnd,
u16_t src, u64_t *seq_auth)
{
struct bt_mesh_friend_seg *unassigned = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(frnd->seg); i++) {
struct bt_mesh_friend_seg *seg = &frnd->seg[i];
struct net_buf *buf = (void *)sys_slist_peek_head(&seg->queue);
if (buf && BT_MESH_ADV(buf)->addr == src &&
FRIEND_ADV(buf)->seq_auth == *seq_auth) {
return seg;
}
if (!unassigned && !buf) {
unassigned = seg;
}
}
return unassigned;
}
static void enqueue_friend_pdu(struct bt_mesh_friend *frnd,
enum bt_mesh_friend_pdu_type type,
struct net_buf *buf)
{
struct bt_mesh_friend_seg *seg;
struct friend_adv *adv;
BT_DBG("type %u", type);
if (type == BT_MESH_FRIEND_PDU_SINGLE) {
if (frnd->sec_update) {
enqueue_update(frnd, 1);
}
enqueue_buf(frnd, buf);
return;
}
adv = FRIEND_ADV(buf);
seg = get_seg(frnd, BT_MESH_ADV(buf)->addr, &adv->seq_auth);
if (!seg) {
BT_ERR("No free friend segment RX contexts for 0x%04x",
BT_MESH_ADV(buf)->addr);
net_buf_unref(buf);
return;
}
net_buf_slist_put(&seg->queue, buf);
if (type == BT_MESH_FRIEND_PDU_COMPLETE) {
if (frnd->sec_update) {
enqueue_update(frnd, 1);
}
/* Only acks should have a valid SeqAuth in the Friend queue
* (otherwise we can't easily detect them there), so clear
* the SeqAuth information from the segments before merging.
*/
SYS_SLIST_FOR_EACH_CONTAINER(&seg->queue, buf, node) {
FRIEND_ADV(buf)->seq_auth = TRANS_SEQ_AUTH_NVAL;
frnd->queue_size++;
}
sys_slist_merge_slist(&frnd->queue, &seg->queue);
}
}
static void buf_send_start(u16_t duration, int err, void *user_data)
{
struct bt_mesh_friend *frnd = user_data;
BT_DBG("err %d", err);
frnd->pending_buf = 0;
/* Friend Offer doesn't follow the re-sending semantics */
if (!frnd->established) {
net_buf_unref(frnd->last);
frnd->last = NULL;
}
}
static void buf_send_end(int err, void *user_data)
{
struct bt_mesh_friend *frnd = user_data;
BT_DBG("err %d", err);
if (frnd->pending_req) {
BT_WARN("Another request before previous completed sending");
return;
}
if (frnd->established) {
k_delayed_work_submit(&frnd->timer, frnd->poll_to);
BT_DBG("Waiting %u ms for next poll", frnd->poll_to);
} else {
/* Friend offer timeout is 1 second */
/**
* TODO: spec requires friend offer to poll to be within 1 s,
* but outs needs more than 1, hmm ...
*/
k_delayed_work_submit(&frnd->timer, K_SECONDS(5));
BT_DBG("Waiting for first poll");
}
}
static void friend_timeout(struct k_work *work)
{
struct bt_mesh_friend *frnd = CONTAINER_OF(work, struct bt_mesh_friend,
timer.work);
static const struct bt_mesh_send_cb buf_sent_cb = {
.start = buf_send_start,
.end = buf_send_end,
};
__ASSERT_NO_MSG(frnd->pending_buf == 0);
BT_DBG("lpn 0x%04x send_last %u last %p", frnd->lpn,
frnd->send_last, frnd->last);
if (frnd->send_last && frnd->last) {
BT_DBG("Sending frnd->last %p", frnd->last);
frnd->send_last = 0;
goto send_last;
}
if (frnd->established && !frnd->pending_req) {
BT_WARN("Friendship lost with 0x%04x", frnd->lpn);
friend_clear(frnd);
return;
}
frnd->last = net_buf_slist_get(&frnd->queue);
if (!frnd->last) {
BT_WARN("Friendship not established with 0x%04x", frnd->lpn);
friend_clear(frnd);
return;
}
BT_DBG("Sending buf %p from Friend Queue of LPN 0x%04x",
frnd->last, frnd->lpn);
frnd->queue_size--;
send_last:
frnd->pending_req = 0;
frnd->pending_buf = 1;
bt_mesh_adv_send(frnd->last, &buf_sent_cb, frnd);
}
int bt_mesh_friend_init(void)
{
int i;
k_lifo_init(&friend_buf_pool.free);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
int j;
frnd->net_idx = BT_MESH_KEY_UNUSED;
sys_slist_init(&frnd->queue);
k_delayed_work_init(&frnd->timer, friend_timeout);
k_delayed_work_init(&frnd->clear.timer, clear_timeout);
for (j = 0; j < ARRAY_SIZE(frnd->seg); j++) {
sys_slist_init(&frnd->seg[j].queue);
}
}
return 0;
}
static void friend_purge_old_ack(struct bt_mesh_friend *frnd, u64_t *seq_auth,
u16_t src)
{
sys_snode_t *cur, *prev = NULL;
BT_DBG("SeqAuth %llx src 0x%04x", *seq_auth, src);
for (cur = sys_slist_peek_head(&frnd->queue);
cur != NULL; prev = cur, cur = sys_slist_peek_next(cur)) {
struct net_buf *buf = (void *)cur;
if (BT_MESH_ADV(buf)->addr == src &&
FRIEND_ADV(buf)->seq_auth == *seq_auth) {
BT_DBG("Removing old ack from Friend Queue");
sys_slist_remove(&frnd->queue, prev, cur);
frnd->queue_size--;
/* Make sure old slist entry state doesn't remain */
buf->frags = NULL;
net_buf_unref(buf);
break;
}
}
}
static void friend_lpn_enqueue_rx(struct bt_mesh_friend *frnd,
struct bt_mesh_net_rx *rx,
enum bt_mesh_friend_pdu_type type,
u64_t *seq_auth, struct net_buf_simple *sbuf)
{
struct friend_pdu_info info;
struct net_buf *buf;
BT_DBG("LPN 0x%04x queue_size %u", frnd->lpn, frnd->queue_size);
if (type == BT_MESH_FRIEND_PDU_SINGLE && seq_auth) {
friend_purge_old_ack(frnd, seq_auth, rx->ctx.addr);
}
info.src = rx->ctx.addr;
info.dst = rx->dst;
if (rx->net_if == BT_MESH_NET_IF_LOCAL) {
info.ttl = rx->ctx.recv_ttl;
} else {
info.ttl = rx->ctx.recv_ttl - 1;
}
info.ctl = rx->ctl;
info.seq[0] = (rx->seq >> 16);
info.seq[1] = (rx->seq >> 8);
info.seq[2] = rx->seq;
info.iv_index = BT_MESH_NET_IVI_RX(rx);
buf = create_friend_pdu(frnd, &info, sbuf);
if (!buf) {
BT_ERR("Failed to encode Friend buffer");
return;
}
if (seq_auth) {
FRIEND_ADV(buf)->seq_auth = *seq_auth;
}
enqueue_friend_pdu(frnd, type, buf);
BT_DBG("Queued message for LPN 0x%04x, queue_size %u",
frnd->lpn, frnd->queue_size);
}
static void friend_lpn_enqueue_tx(struct bt_mesh_friend *frnd,
struct bt_mesh_net_tx *tx,
enum bt_mesh_friend_pdu_type type,
u64_t *seq_auth, struct net_buf_simple *sbuf)
{
struct friend_pdu_info info;
struct net_buf *buf;
BT_DBG("LPN 0x%04x", frnd->lpn);
if (type == BT_MESH_FRIEND_PDU_SINGLE && seq_auth) {
friend_purge_old_ack(frnd, seq_auth, tx->src);
}
info.src = tx->src;
info.dst = tx->ctx->addr;
info.ttl = tx->ctx->send_ttl;
info.ctl = (tx->ctx->app_idx == BT_MESH_KEY_UNUSED);
info.seq[0] = (bt_mesh.seq >> 16);
info.seq[1] = (bt_mesh.seq >> 8);
info.seq[2] = bt_mesh.seq++;
info.iv_index = BT_MESH_NET_IVI_TX;
buf = create_friend_pdu(frnd, &info, sbuf);
if (!buf) {
BT_ERR("Failed to encode Friend buffer");
return;
}
if (seq_auth) {
FRIEND_ADV(buf)->seq_auth = *seq_auth;
}
enqueue_friend_pdu(frnd, type, buf);
BT_DBG("Queued message for LPN 0x%04x", frnd->lpn);
}
static bool friend_lpn_matches(struct bt_mesh_friend *frnd, u16_t net_idx,
u16_t addr)
{
int i;
if (!frnd->established) {
return false;
}
if (net_idx != frnd->net_idx) {
return false;
}
if (BT_MESH_ADDR_IS_UNICAST(addr)) {
if (addr == frnd->lpn) {
return true;
}
return false;
}
for (i = 0; i < ARRAY_SIZE(frnd->sub_list); i++) {
if (frnd->sub_list[i] == addr) {
return true;
}
}
return false;
}
bool bt_mesh_friend_match(u16_t net_idx, u16_t addr)
{
int i;
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (friend_lpn_matches(frnd, net_idx, addr)) {
BT_DBG("LPN 0x%04x matched address 0x%04x",
frnd->lpn, addr);
return true;
}
}
BT_DBG("No matching LPN for address 0x%04x", addr);
return false;
}
void bt_mesh_friend_enqueue_rx(struct bt_mesh_net_rx *rx,
enum bt_mesh_friend_pdu_type type,
u64_t *seq_auth, struct net_buf_simple *sbuf)
{
int i;
if (!rx->friend_match ||
(rx->ctx.recv_ttl <= 1 && rx->net_if != BT_MESH_NET_IF_LOCAL) ||
bt_mesh_friend_get() != BT_MESH_FRIEND_ENABLED) {
return;
}
BT_DBG("recv_ttl %u net_idx 0x%04x src 0x%04x dst 0x%04x",
rx->ctx.recv_ttl, rx->sub->net_idx, rx->ctx.addr, rx->dst);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (friend_lpn_matches(frnd, rx->sub->net_idx, rx->dst)) {
friend_lpn_enqueue_rx(frnd, rx, type, seq_auth, sbuf);
}
}
}
bool bt_mesh_friend_enqueue_tx(struct bt_mesh_net_tx *tx,
enum bt_mesh_friend_pdu_type type,
u64_t *seq_auth, struct net_buf_simple *sbuf)
{
bool matched = false;
int i;
if (!bt_mesh_friend_match(tx->sub->net_idx, tx->ctx->addr) ||
bt_mesh_friend_get() != BT_MESH_FRIEND_ENABLED) {
return matched;
}
BT_DBG("net_idx 0x%04x dst 0x%04x src 0x%04x", tx->sub->net_idx,
tx->ctx->addr, tx->src);
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
if (friend_lpn_matches(frnd, tx->sub->net_idx, tx->ctx->addr)) {
friend_lpn_enqueue_tx(frnd, tx, type, seq_auth, sbuf);
matched = true;
}
}
return matched;
}
void bt_mesh_friend_clear_incomplete(struct bt_mesh_subnet *sub, u16_t src,
u16_t dst, u64_t *seq_auth)
{
int i;
BT_DBG("");
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
int j;
if (!friend_lpn_matches(frnd, sub->net_idx, dst)) {
continue;
}
for (j = 0; j < ARRAY_SIZE(frnd->seg); j++) {
struct bt_mesh_friend_seg *seg = &frnd->seg[j];
struct net_buf *buf;
buf = (void *)sys_slist_peek_head(&seg->queue);
if (!buf) {
continue;
}
if (BT_MESH_ADV(buf)->addr != src) {
continue;
}
if (FRIEND_ADV(buf)->seq_auth != *seq_auth) {
continue;
}
BT_WARN("Clearing incomplete segments for 0x%04x", src);
while (!sys_slist_is_empty(&seg->queue)) {
net_buf_unref(net_buf_slist_get(&seg->queue));
}
}
}
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.