file
stringlengths
18
26
data
stringlengths
3
1.04M
the_stack_data/247018946.c
#include<stdio.h> int main(){ char c; //stablish variables int i=0; int ignore=0; char strong[13]={"strong/strong"}; char scrip[13]={"script/script"}; while ((c=getchar()) != EOF) { if (c == '<') { ignore=1; } else if (ignore != 1) { putchar (c); } else if (c == '>') { ignore= 0; } } if (i == 0){ i=1;} while (c != EOF && c!= '>'){ c = getchar ();} { if (i== 6 && i<13){ if (c == ' ') printf('_');{ putchar (c);} if((i!=6 && c!= '<' && c!= '>') || c == '\n'){ putchar (c); i=0; } putchar(c); } } } //this program could not have been developed without the help of Abner Z., Angel S, Julio B and Cracklitos
the_stack_data/699289.c
#include <unistd.h> // para castear de puntero a entero #include <string.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <semaphore.h> sem_t sem_grupos; sem_t sem_octavos; sem_t sem_cuartos; sem_t sem_semi; /////////////////////////////////////////////////////////////////////////// // Info de grupos /////////////////////////////////////////////////////////////////////////// #define CANTIDAD_GRUPOS 8 char* grupos_labels[] = {"A", "B", "C", "D", "E", "F", "G", "H"}; //cada grupo tiene //4 filas, una por cada pais //y 8 columnas: //columna 1 = Pts(Puntos) //columna 2 = PJ(Partidos Jugados) //columna 3 = PG(Partidos Ganados) //columna 4 = PE(Partidos Empatados) //columna 5 = PP(Partidos Perdidos) //columna 6 = GF(Goles a Favor) //columna 7 = GC(Goles en contra) //columna 8 = Dif(Diferencia de Gol) char* grupo_A[] = {"Rusia", "Arabia Saudita", "Egipto", "Uruguay"}; int grupo_A_favoritos[] = {1, 0, 0, 1}; int grupo_A_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_B[] = {"Portugal", "España", "Marruecos", "Irán"}; int grupo_B_favoritos[] = {1, 1, 0, 0}; int grupo_B_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_C[] = {"Francia", "Australia", "Peru", "Dinamarca"}; int grupo_C_favoritos[] = {1, 0, 0, 0}; int grupo_C_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_D[] = {"Argentina", "Islandia", "Croacia", "Nigeria"}; int grupo_D_favoritos[] = {1, 0, 0, 0}; int grupo_D_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_E[] = {"Brasil","Suiza", "Costa Rica", "Serbia"}; int grupo_E_favoritos[] = {1, 0, 0, 0}; int grupo_E_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_F[] = {"Alemania", "Mexico", "Suecia", "Korea del Sur"}; int grupo_F_favoritos[] = {1, 1, 0, 0}; int grupo_F_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_G[] = {"Belgica", "Panama", "Tunez", "Inglaterra"}; int grupo_G_favoritos[] = {0, 0, 0, 1}; int grupo_G_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; char* grupo_H[] = {"Polonia", "Senegal", "Colombia", "Japón"}; int grupo_H_favoritos[] = {0, 0, 1, 0}; int grupo_H_puntos[4][8] = { { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} , { 0,0,0,0,0,0,0,0} }; void print_tabla_grupo(char* grupo_data[], int tabla[][8]){ int i, j; for (i = 0; i < 4; i++){ printf ("%s ,",grupo_data[i]); for (j = 0; j < 8; j++){ printf ("%d ,",tabla[i][j]); } printf("\n"); } } /////////////////////////////////////////////////////////////////////////// // End Info de grupos /////////////////////////////////////////////////////////////////////////// void llenar_tabla(int tabla[][8], int equipo_1, int equipo_2, int goles_equipo_1, int goles_equipo_2) { //columna 2 = PJ(Partidos Jugados) tabla[equipo_1][1] = tabla[equipo_1][1]+1; tabla[equipo_2][1] = tabla[equipo_2][1]+1; if(goles_equipo_1 > goles_equipo_2){ //gano el equipo 1 //columna 1 = Pts(Puntos) tabla[equipo_1][0] = tabla[equipo_1][0]+3; //columna 3 = PG(Partidos Ganados) tabla[equipo_1][2] = tabla[equipo_1][2]+1; //columna 5 = PP(Partidos Perdidos) tabla[equipo_2][4] = tabla[equipo_2][4]+1; }else if (goles_equipo_1 == goles_equipo_2){ //empataron tabla[equipo_1][0] = tabla[equipo_1][0]+1; tabla[equipo_2][0] = tabla[equipo_2][0]+1; //columna 4 = PE(Partidos Empatados) tabla[equipo_1][3] = tabla[equipo_1][3]+1; tabla[equipo_2][3] = tabla[equipo_2][3]+1; }else{ //gano el equipo 2 tabla[equipo_2][0] = tabla[equipo_2][0]+3; tabla[equipo_2][2] = tabla[equipo_2][2]+1; //columna 5 = PP(Partidos Perdidos) tabla[equipo_1][4] = tabla[equipo_1][4]+1; } //columna 6 = GF(Goles a Favor) tabla[equipo_1][5] = tabla[equipo_1][5]+goles_equipo_1; tabla[equipo_2][5] = tabla[equipo_2][5]+goles_equipo_2; //columna 7 = GC(Goles en contra) tabla[equipo_1][6] = tabla[equipo_1][6]+goles_equipo_2; tabla[equipo_2][6] = tabla[equipo_2][6]+goles_equipo_1; //columna 8 = Dif(Diferencia de Gol) tabla[equipo_1][7] = 0; //TODO:revisar tabla[equipo_2][7] = 0; //TODO:revisar } void jugar_partido (int equipo_1, int equipo_2, int instancia, char* grupo_data[] , int grupo_puntos[][8], int grupo_favoritos[] ){ char * nombre_equipo_1; char * nombre_equipo_2; int goles_equipo_1; int goles_equipo_2; nombre_equipo_1=grupo_data[equipo_1]; nombre_equipo_2=grupo_data[equipo_2]; /* random int entre 0 y 10 */ int rango_gol_equipo1 = 1; int rango_gol_equipo2 = 1; int base=0; if(grupo_favoritos[equipo_1]){ rango_gol_equipo1 = 7; base=3; } if(grupo_favoritos[equipo_2]){ rango_gol_equipo2 = 7; base=3; } goles_equipo_1 = (rand() % rango_gol_equipo1)+base; goles_equipo_2 = (rand() % rango_gol_equipo2)+base; printf("Resultado %s %d -- %s %d \n", nombre_equipo_1, goles_equipo_1, nombre_equipo_2, goles_equipo_2); switch(instancia) { case 0 : //es instancia de grupos llenar_tabla(grupo_puntos, equipo_1, equipo_2, goles_equipo_1, goles_equipo_2); break; default : printf("Invalid instancia\n" ); } } void* jugar_grupo_A (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[0]); jugar_partido(0,1, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); jugar_partido(0,2, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); jugar_partido(2,1, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); jugar_partido(2,3, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); jugar_partido(3,1, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); jugar_partido(3,0, 0, grupo_A, grupo_A_puntos, grupo_A_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_B (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[1]); jugar_partido(0,1, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); jugar_partido(0,2, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); jugar_partido(2,1, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); jugar_partido(2,3, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); jugar_partido(3,1, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); jugar_partido(3,0, 0, grupo_B, grupo_B_puntos, grupo_B_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_C (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[2]); jugar_partido(0,1, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); jugar_partido(0,2, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); jugar_partido(2,1, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); jugar_partido(2,3, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); jugar_partido(3,1, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); jugar_partido(3,0, 0, grupo_C, grupo_C_puntos, grupo_C_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_D (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[3]); jugar_partido(0,1, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); jugar_partido(0,2, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); jugar_partido(2,1, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); jugar_partido(2,3, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); jugar_partido(3,1, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); jugar_partido(3,0, 0, grupo_D, grupo_D_puntos, grupo_D_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_E (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[4]); jugar_partido(0,1, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); jugar_partido(0,2, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); jugar_partido(2,1, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); jugar_partido(2,3, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); jugar_partido(3,1, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); jugar_partido(3,0, 0, grupo_E, grupo_E_puntos, grupo_E_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_F (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[5]); jugar_partido(0,1, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); jugar_partido(0,2, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); jugar_partido(2,1, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); jugar_partido(2,3, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); jugar_partido(3,1, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); jugar_partido(3,0, 0, grupo_F, grupo_F_puntos, grupo_F_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_G (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[6]); jugar_partido(0,1, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); jugar_partido(0,2, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); jugar_partido(2,1, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); jugar_partido(2,3, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); jugar_partido(3,1, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); jugar_partido(3,0, 0, grupo_G, grupo_G_puntos, grupo_G_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void* jugar_grupo_H (void* arg) { // printf("***************************************** \n"); // printf("Jugando grupo %c \n", *grupos_labels[7]); jugar_partido(0,1, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); jugar_partido(0,2, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); jugar_partido(2,1, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); jugar_partido(2,3, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); jugar_partido(3,1, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); jugar_partido(3,0, 0, grupo_H, grupo_H_puntos, grupo_H_favoritos); sem_post(&sem_grupos); pthread_exit(NULL); } void jugar_grupos () { /*pthread_t T1; int i; int rc; for( i=0; i < CANTIDAD_GRUPOS; i++ ) { rc = pthread_create(&T1,NULL,jugar_grupo(i),NULL); if (rc) { printf("Error:unable to create thread, %d \n", rc); exit(-1); } }*/ printf("***************************************** \n"); printf("* JUGANDO FASE DE GRUPOS * \n"); printf("***************************************** \n"); pthread_t g1; pthread_t g2; pthread_t g3; pthread_t g4; pthread_t g5; pthread_t g6; pthread_t g7; pthread_t g8; int rc; rc = pthread_create(&g1,NULL,jugar_grupo_A,NULL); rc = pthread_create(&g2,NULL,jugar_grupo_B,NULL); rc = pthread_create(&g3,NULL,jugar_grupo_C,NULL); rc = pthread_create(&g4,NULL,jugar_grupo_D,NULL); rc = pthread_create(&g5,NULL,jugar_grupo_E,NULL); rc = pthread_create(&g6,NULL,jugar_grupo_F,NULL); rc = pthread_create(&g7,NULL,jugar_grupo_G,NULL); rc = pthread_create(&g8,NULL,jugar_grupo_H,NULL); } char* winer_octavos[] = {"Wp49", "Wp50", "Wp51", "Wp52", "Wp53", "Wp54", "Wp55", "Wp56"}; void jugar_partido_winners(char * nombre_equipo_1, char * nombre_equipo_2, int index_partido, char* data_winners[]){ printf("Jugando partido entre %s y %s \n", nombre_equipo_1, nombre_equipo_2); int goles_equipo_1; int goles_equipo_2; /* random int entre 0 y 10 */ goles_equipo_1 = rand() % 10; goles_equipo_2 = rand() % 10; if(goles_equipo_1 > goles_equipo_2){ data_winners[index_partido]=nombre_equipo_1; }else{ data_winners[index_partido]=nombre_equipo_2; } } int obtener_primero(int grupo_puntos[][8]){ int i; int puntaje_maximo=-1; int index_maximo=-1; for (i = 0; i < 4; i++){ if(grupo_puntos[i][0]>=puntaje_maximo){ index_maximo=i; puntaje_maximo=grupo_puntos[i][0]; } } return index_maximo; } int obtener_segundo(int grupo_puntos[][8]){ int i; int puntaje_maximo=-1; int index_segundo=-1; int index_primero=obtener_primero(grupo_puntos); for (i = 0; i < 4; i++){ if(grupo_puntos[i][0]>=puntaje_maximo && i!=index_primero){ index_segundo=i; puntaje_maximo=grupo_puntos[i][0]; } } return index_segundo; } void* jugar_partido_49 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 49: A1 vs B2 index_1=obtener_primero(grupo_A_puntos); nombre_equipo_1=grupo_A[index_1]; index_2=obtener_segundo(grupo_B_puntos); nombre_equipo_2=grupo_B[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 0, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_50 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 50: C1 vs D2 index_1=obtener_primero(grupo_C_puntos); nombre_equipo_1=grupo_C[index_1]; index_2=obtener_segundo(grupo_D_puntos); nombre_equipo_2=grupo_D[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 1, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_51 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 51: B1 vs A2 index_1=obtener_primero(grupo_B_puntos); nombre_equipo_1=grupo_B[index_1]; index_2=obtener_segundo(grupo_A_puntos); nombre_equipo_2=grupo_A[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 2, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_52 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 52: D1 vs C2 index_1=obtener_primero(grupo_D_puntos); nombre_equipo_1=grupo_D[index_1]; index_2=obtener_segundo(grupo_C_puntos); nombre_equipo_2=grupo_C[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 3, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_53 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 53: E1 vs F2 index_1=obtener_primero(grupo_E_puntos); nombre_equipo_1=grupo_E[index_1]; index_2=obtener_segundo(grupo_F_puntos); nombre_equipo_2=grupo_F[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 4, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_54 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 54: G1 vs H2 index_1=obtener_primero(grupo_G_puntos); nombre_equipo_1=grupo_G[index_1]; index_2=obtener_segundo(grupo_H_puntos); nombre_equipo_2=grupo_H[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 5, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_55 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 55: F1 vs E2 index_1=obtener_primero(grupo_F_puntos); nombre_equipo_1=grupo_F[index_1]; index_2=obtener_segundo(grupo_E_puntos); nombre_equipo_2=grupo_E[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 6, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void* jugar_partido_56 () { int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 56: H1 vs G2 index_1=obtener_primero(grupo_H_puntos); nombre_equipo_1=grupo_H[index_1]; index_2=obtener_segundo(grupo_G_puntos); nombre_equipo_2=grupo_G[index_2]; jugar_partido_winners(nombre_equipo_1, nombre_equipo_2, 7, winer_octavos); sem_post(&sem_octavos); pthread_exit(NULL); } void jugar_octavos () { for(int i = 0; i < 8; i++) { sem_wait(&sem_grupos); } printf("***************************************** \n"); printf("* JUGANDO OCTAVOS * \n"); printf("***************************************** \n"); pthread_t p1; pthread_t p2; pthread_t p3; pthread_t p4; pthread_t p5; pthread_t p6; pthread_t p7; pthread_t p8; pthread_create(&p1,NULL,jugar_partido_49,NULL); pthread_create(&p2,NULL,jugar_partido_50,NULL); pthread_create(&p3,NULL,jugar_partido_51,NULL); pthread_create(&p4,NULL,jugar_partido_52,NULL); pthread_create(&p5,NULL,jugar_partido_53,NULL); pthread_create(&p6,NULL,jugar_partido_54,NULL); pthread_create(&p7,NULL,jugar_partido_55,NULL); pthread_create(&p8,NULL,jugar_partido_56,NULL); } char* winer_cuartos[] = {"Wp57", "Wp58", "Wp59", "Wp60"}; void jugar_cuartos () { for(int i = 0; i < 8; i++) { sem_wait(&sem_octavos); } printf("***************************************** \n"); printf("Jugando cuartos \n"); int index_1; char * nombre_equipo_1; int index_2; char * nombre_equipo_2; //partido 57: Wp49 vs Wp50 jugar_partido_winners(winer_octavos[0], winer_octavos[1], 0, winer_cuartos); //partido 58: Wp53 vs Wp54 jugar_partido_winners(winer_octavos[4], winer_octavos[5], 1, winer_cuartos); //partido 59: Wp51 vs Wp52 jugar_partido_winners(winer_octavos[2], winer_octavos[3], 2, winer_cuartos); //partido 60: Wp55 vs Wp56 jugar_partido_winners(winer_octavos[6], winer_octavos[7], 3, winer_cuartos); } char* winer_semis[] = {"Wp61", "Wp62"}; char* champion[] = {"Wfinal"}; void jugar_semis () { printf("***************************************** \n"); printf("Jugando semis \n"); //partido 61: Wp57 vs Wp58 jugar_partido_winners(winer_cuartos[0], winer_cuartos[1], 0, winer_semis); //partido 62: Wp59 vs Wp60 jugar_partido_winners(winer_cuartos[2], winer_cuartos[3], 1, winer_semis); } void jugar_final () { printf("***************************************** \n"); printf("Jugando final \n"); //final: Wp91 vs Wp62 jugar_partido_winners(winer_semis[0], winer_semis[1], 0, champion); printf("***************************************** \n"); printf("El nuevo campeon del mundo es %s \n", champion[0]); printf("***************************************** \n"); } int main () { sem_init(&sem_grupos,0,0); sem_init(&sem_octavos,0,0); sem_init(&sem_cuartos,0,0); sem_init(&sem_semi,0,0); ////////////////////////////////////////////////////////////////////// // start mundial ////////////////////////////////////////////////////////////////////// srand(time(NULL)); jugar_grupos(); jugar_octavos(); jugar_cuartos(); jugar_semis(); jugar_final(); ///////////////////////////////////////////////////////////////////// // end mundial ////////////////////////////////////////////////////////////////////// } //Para compilar: gcc simulador_paralelo.c -o simulador_paralelo -pthread //Para ejecutar: ./simulador_paralelo
the_stack_data/196582.c
/* * IMPLEMENTATION OF RUNGE KUTTA METHOD * FOR CALCULATING FUNCTIONAL VALUE * WHEN DERIVATIVE (SLOPE) AND A VALUE GIVEN * * F : USER GIVEN FUNCTION DERIVATIVE * x0 : LOWER LIMIT OF INTIGRATION * y0 : FUNCTIONAL VALUE AT x0 * xn : UPPER LIMIT OF INTIGRATION * h : FIXED DIFFERENCE IN INTERVAL (LIMIT -> 0) * n : FIXED NUMBER OF INTERVAL (LIMIT --> infinity) */ #include <stdio.h> double RungeK(const double x0, const double y0, const double xn, const double h, double F(double, double)){ double y = y0, k1, k2, k3, k4, k; for (double x = x0; x <= xn; x += h){ k1 = h * F(x, y); k2 = h * F(x + h / 2, y + k1 / 2); k3 = h * F(x + h / 2, y + k2 / 2); k4 = h * F(x + h, y + k3); k = (k1 + 2 * k2 + 2 * k3 + k4) / 6; y += k; } return y; } // User defined functions double func(const double x, const double y){ return x * y; } int main(void){ printf("RungeK: y = %g\n", RungeK(1, 2, 1.4, 0.1, func)); return 0; }
the_stack_data/31871.c
/* * FreeRTOS V202111.00 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://www.FreeRTOS.org * http://aws.amazon.com/freertos * * 1 tab == 4 spaces! */ /** * @brief Mem fault handler. */ void MemManage_Handler( void ) __attribute__ (( naked )); /*-----------------------------------------------------------*/ void MemManage_Handler( void ) { __asm volatile ( " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" " mrsne r0, psp \n" " ldr r1, handler_address_const \n" " bx r1 \n" " \n" " .align 4 \n" " handler_address_const: .word vHandleMemoryFault \n" ); } /*-----------------------------------------------------------*/
the_stack_data/1069922.c
#include <stdio.h> void computeranks(int count, int freq[], int rank[], int topten[]) { int i, j; for(i = 0; i < count; i++) { for(j = 0; j < count; j++) { if(i != j) if(freq[j] < freq[i]) rank[i]++; } } for(i = 0; i < count; i++) { if(rank[i] >= count - 10) topten[count - rank[i] - 1] = i; } } void main() { int topalpha[10]; int topbigram[10]; int toptrigram[10]; int inputalphafreq[26], inputalpharank[26]; int inputbigramfreq[26*26], inputbigramrank[26*26]; int inputtrigramfreq[26*26*26], inputtrigramrank[26*26*26]; char data[100000], c, pc, ppc; int i, j, k, datacount; for(i = 0; i < 26; i++) { inputalphafreq[i] = 0; inputalpharank[i] = 0; for(j = 0; j < 26; j++) { inputbigramfreq[i*26+j] = 0; inputbigramrank[i*26 + j] = 0; for(k = 0; k < 26; k++) { inputtrigramfreq[i*26*26 + j*26 + k] = 0; inputtrigramrank[i*26*26 + k*26 + k] = 0; } } } i = 0; pc = 0; ppc = 0; while((c = getchar()) != EOF) { data[i++] = c; if(c >= 'a' && c <= 'z') { inputalphafreq[c - 'a']++; if(pc > 0 && pc >= 'a' && pc <= 'z') { inputbigramfreq[(pc - 'a')*26 + c - 'a']++; if(ppc > 0 && ppc >= 'a' && ppc <= 'z') { inputtrigramfreq[(ppc - 'a')*26*26 + (pc - 'a')*26 + c - 'a']++; } } } ppc = pc; pc = c; } datacount = i; computeranks(26, inputalphafreq, inputalpharank, topalpha); computeranks(26*26, inputbigramfreq, inputbigramrank, topbigram); computeranks(26*26*26, inputtrigramfreq, inputtrigramrank, toptrigram); printf("top ten alphabets:\t"); for(i = 0; i < 10; i++) printf(" %c ", topalpha[i] + 'a'); printf("\n\t\t\t[e] [a t] [i o n] [s r h] [l d]\n"); printf("\ntop ten bigrams:\t"); for(i = 0; i < 10; i++) printf(" %c%c ", topbigram[i] / 26 + 'a', topbigram[i] % 26 + 'a'); printf("\n\t\t\t[th] [he in] [an er on] [re ed nd ou]\n"); printf("\ntop ten trigrams:\t"); for(i = 0; i < 10; i++) printf(" %c%c%c ", toptrigram[i] / (26*26) + 'a', (toptrigram[i] - (toptrigram[i] / (26*26)) * 26*26) / 26 + 'a', toptrigram[i] % 26 + 'a'); printf("\n\t\t\t[the] [ing and] [ion hat] [tha] [ent] [tio]\n"); // for(i = 0; i < datacount; i++) { // if(data[i] >= 'a' && data[i] <= 'z') { // printf("%c", alphafreq[25 - inputrank[data[i] - 'a']]); // } // else { // printf("%c", data[i]); // } // } }
the_stack_data/76700487.c
#include <stdio.h> #define MAX (1000009) int top = 1; int w[MAX], v[MAX]; int dp[10000009]; int max(int a, int b) { return a > b ? a : b; } int main(void) { int n, ww; int i, j; scanf("%d%d", &n, &ww); for (i = 0; i < n; ++i) { int a, b, c; scanf("%d%d%d", &a, &b, &c); for (j = 0; c >= (1 << j); ++j) { v[top] = a * (1 << j); w[top] = b * (1 << j); ++top; c -= (1 << j); } if (c) { v[top] = a * c; w[top] = b * c; ++top; } } for (i = 1; i < top; ++i) for (j = ww; j >= 0; --j) if (j >= w[i]) dp[j] = max(dp[j], dp[j - w[i]] + v[i]); printf("%d\n", dp[ww]); return 0; }
the_stack_data/29826661.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <stdbool.h> double randnum(const double min, const double max) { double temp = (double)rand() / (double)RAND_MAX; temp = min + (max - min)*temp; return temp; } bool chkcir(const double x, const double y) { const double x_c = 0.5; const double y_c = 0.5; const double r = 0.5; const double f = (x - x_c)*(x - x_c) + (y - y_c)*(y - y_c) - r*r; if (f > 0.0) { return false; } else { return true; } } bool chk_square(const double x, const double y) { if (y > 0.25 && y < 0.75 && x< 2.5 && x >0.5) { return true; } else { return false; } } void main() { FILE *of = fopen("quiz.txt", "w"); srand((unsigned int)time(NULL)); for (int i = 0; i < 100000; i++) { double x = randnum(0.0, 3.0); double y = randnum(0.0, 3.0); if (chkcir(x, y) == true) { fprintf(of, "%f %f\n", x, y); fprintf(of, "%f %f\n", x + 2.0, y); } else if (chk_square(x, y) == true) { fprintf(of, "%f %f\n", x, y); } } fclose(of); }
the_stack_data/1334.c
#include <stdio.h> #include <string.h> #define MAX 500 /* En este programa los bits se trabajan de forma invertida: MSB...LSB -> LSB...MSB */ // MARTES 25 MZO: TRAMA 1 INDICA SI ES O NO MODO EXTENDIDO, en caso de SNRME | SABME | SARME then activar una bandera int modoExtendido = 0; enum OrdenS { rrS = 0, rejS = 2, rnrS = 1, srejS = 3, }; enum OrdenU { snrmO = 16, snrmeO = 27, sarmO = 3, sarmeO = 11, sabmO = 7, sabmeO = 15, uiO = 0, discO = 8, simO = 1, upO = 4, rsetO = 19, xidO = 23, }; enum RespuestaU { dmR = 3, uiR = 0, uaR = 12, rdR = 8, rimR = 1, xidR = 23, frmrR = 17, }; int checkTamTipo(const unsigned char *tr) // Se comporta distinto si no se considera del tipo 'unsigned' { if((tr[12]*256)+tr[13] < 1500) return 1; else return 0; } unsigned int getEthernetType(const unsigned char *tr) { unsigned int d, x[2]; x[0] = (unsigned int) tr[12]; x[1] = (unsigned int) tr[13]; return d = (x[0]<<8) | x[1]; } char checkTipoTrama(const unsigned char *tr) // Saber si la trama es de Informacion, Supervision o Unsigned (HDLC) { /* Analizando con bits volteados: Informacion: [nr][nr][nr][p/f][ns][ns][ns][0] Supervision: [nr][nr][nr][p/f][s][s][0][1] Unnumbered: [m][m][m][p/f][m][m][1][1] Hacer dos comparaciones a la vez a7 a6 a5 a4 a3 a2 a1 a0 & 0 0 0 0 0 0 1 1 ___________________________ 0 0 0 0 0 0 a1 a0 a1 a0 == 00 -> Informacion a1 a0 == 10 -> Informacion a1 a0 == 01 -> Supervision a1 a0 == 11 -> Unnumbered */ switch(tr[16] & 3) { case 0: return 'I'; break; case 1: return 'S'; break; case 2: return 'I'; break; case 3: return 'U'; break; } } void getOrdenRespuestaTramaU(const unsigned char pF, unsigned char campoControl, char *ordenOrRespuesta) { // Juntar bits [MM MMM] campo control... --> MMPMMM 101010 --> MM = 10 --> MMM = 010 unsigned char or = (campoControl >> 3) & 28, tmp = (campoControl >> 2) & 3, mmmmm = or | tmp; if(pF == 0) { enum OrdenU ou; switch(mmmmm) { case snrmO: strcpy(ordenOrRespuesta, "SNRM"); break; case snrmeO: strcpy(ordenOrRespuesta, "SNRME"); modoExtendido = 1; break; case sarmO: strcpy(ordenOrRespuesta, "SARM"); break; case sarmeO: strcpy(ordenOrRespuesta, "SARME"); modoExtendido = 1; break; case sabmO: strcpy(ordenOrRespuesta, "SABM"); break; case 15: strcpy(ordenOrRespuesta, "SABME"); modoExtendido = 1; break; case uiO: strcpy(ordenOrRespuesta, "UI"); break; case discO: strcpy(ordenOrRespuesta, "DISC"); break; case simO: strcpy(ordenOrRespuesta, "SIM"); break; case upO: strcpy(ordenOrRespuesta, "UP0"); break; case rsetO: strcpy(ordenOrRespuesta, "RSET"); break; case xidO: strcpy(ordenOrRespuesta, "XID"); break; default: strcpy(ordenOrRespuesta, "---"); break; } } else if(pF == 1) { enum RespuestaU ru; switch(mmmmm) { case dmR: strcpy(ordenOrRespuesta, "DM"); break; case uiR: strcpy(ordenOrRespuesta, "UI"); break; case uaR: strcpy(ordenOrRespuesta, "UA"); break; case rdR: strcpy(ordenOrRespuesta, "RD"); break; case rimR: strcpy(ordenOrRespuesta, "RIM"); break; case xidR: strcpy(ordenOrRespuesta, "XID"); break; case frmrR: strcpy(ordenOrRespuesta, "FRMR"); break; default: strcpy(ordenOrRespuesta, "---"); break; } } } void getOrdenTramaS(unsigned char campoControl, char *ordenOrRespuesta) { enum OrdenS os; switch(campoControl & 12) { case rrS: strcpy(ordenOrRespuesta, "RR"); break; case rejS: strcpy(ordenOrRespuesta, "REJ"); break; case rnrS: strcpy(ordenOrRespuesta, "RNR"); break; case srejS: strcpy(ordenOrRespuesta, "SREJ"); break; default: strcpy(ordenOrRespuesta, "---"); break; } } unsigned char voltearBits(unsigned char o) { unsigned char orig = o, tmp1, final = 0, i; for(i = 7; i > 0; i--) { tmp1 = orig & 1; tmp1 <<= i; final = tmp1 | final; orig >>= 1; } return final; } unsigned char getNr(unsigned int tam, const unsigned char *tr) { if(tam < 4) return voltearBits(tr[16]) & 7; else return voltearBits(tr[17]) & 127; } unsigned char getNs(unsigned int tam, const unsigned char *tr) { if(tam < 4) return voltearBits(tr[16]) & 112; else return voltearBits(tr[17]) & 127; } int main(void) { unsigned char trama[MAX], z, hex[3]; unsigned int eth, flag = 0, sync = 0, pos = 0, j; FILE *arch = fopen("tramas", "r"), *salida = fopen("analisisTramas", "w"); if(arch == NULL || salida == NULL) { perror("Error [Archivo]"); return -1; } hex[2] = '\0'; while(feof(arch) == 0) { z = (char) fgetc(arch); if(z != ' ') { if(z == '\n') { if(flag == 1) { unsigned char pfBit = trama[15] & 1; unsigned int tamanio = (trama[12] << 8) | trama[13]; fprintf(salida, "MAC destino:\t"); for(j = 0; j < 6; j++) { fprintf(salida, "%02X", trama[j]); if(j < 5) fprintf(salida, ":"); } fprintf(salida, "\n"); fprintf(salida, "MAC origen:\t"); for(j = 6; j < 12; j++) { fprintf(salida, "%02X", trama[j]); if(j < 11) fprintf(salida, ":"); } fprintf(salida, "\n"); if(checkTamTipo(trama)) { char tipoTrama = checkTipoTrama(trama), ordenOrRespuesta[6]; fprintf(salida, "Trama:\t\tIEEE 802.3\n"); fprintf(salida, "Tamanio:\t%d bytes\n", tamanio); if(tipoTrama == 'U') { getOrdenRespuestaTramaU(pfBit, trama[16], ordenOrRespuesta); fprintf(salida, "Tipo:\t\t%c - %s\n", tipoTrama, ordenOrRespuesta); memset(ordenOrRespuesta, '\0', strlen(ordenOrRespuesta)); // Limpiar la cadena } else if(tipoTrama == 'S') { fprintf(salida, "N(r): \t\t%u\n", getNr(tamanio, ordenOrRespuesta)); getOrdenTramaS(trama[16], ordenOrRespuesta); fprintf(salida, "Tipo:\t\t%c - %s\n", tipoTrama, ordenOrRespuesta); memset(ordenOrRespuesta, '\0', strlen(ordenOrRespuesta)); } else { fprintf(salida, "N(r): \t\t%u\n", getNr(tamanio, ordenOrRespuesta)); fprintf(salida, "N(s): \t\t%u\n", getNs(tamanio, ordenOrRespuesta)); fprintf(salida, "Tipo:\t\t%c\n", tipoTrama); } } else { eth = getEthernetType(trama); fprintf(salida, "Trama:\t\tEthernet\n"); fprintf(salida, "Ethertype:\t0x%04X = %d ", eth, eth); if(eth == 2048) fprintf(salida, "(DOD IP)\n"); else if(eth == 2054) fprintf(salida, "(ARP)\n"); } if(pfBit == 0) fprintf(salida, "P/F:\t\t[P]\n"); else if(pfBit == 1) fprintf(salida, "P/F:\t\t[F]\n"); fprintf(salida, "\n"); pos = 0; } else flag = 1; } else { if(sync == 0) { hex[sync] = z; sync = 1; } else { hex[sync] = z; sscanf(hex, "%2x", &trama[pos]); sync = 0; pos++; } flag = 0; } } } fprintf(salida, "Ortega Ortunio Eder - 2CM9 | multiaportes.com\n"); fclose(salida); fclose(arch); return 0; }
the_stack_data/125140063.c
#ifdef _WIN32 #include <windows.h> #else #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #endif #if defined(__cplusplus) extern "C" { #endif #ifdef _WIN32 int entropy_fun(unsigned char buf[], unsigned int len) { HCRYPTPROV provider; unsigned __int64 pentium_tsc[1]; unsigned int i; int result = 0; if (CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) { result = CryptGenRandom(provider, len, buf); CryptReleaseContext(provider, 0); if (result) return len; } QueryPerformanceCounter((LARGE_INTEGER *)pentium_tsc); for(i = 0; i < 8 && i < len; ++i) buf[i] = ((unsigned char*)pentium_tsc)[i]; return i; } #else int entropy_fun(unsigned char buf[], unsigned int len) { int frand = open("/dev/random", O_RDONLY); int rlen = 0; if (frand != -1) { rlen = (int)read(frand, buf, len); close(frand); } return rlen; } #endif #if defined(__cplusplus) } #endif
the_stack_data/59233.c
// RUN: env SDKROOT="/" %clang -arch arm64 -c -### %s 2>&1 | \ // RUN: FileCheck %s // // REQUIRES: system-darwin // // CHECK: "-triple" "arm64-apple-macosx{{[0-9.]+}}"
the_stack_data/151782.c
/* miniz.c v1.11 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <[email protected]>, last updated May 27, 2011 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History May 15, v1.09 - Initial stable release. May 27, v1.10 - Substantial compressor optimizations: Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. Refactored the compression code for better readability and maintainability. Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). May 28, v1.11 - Added statement from unlicense.org * Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB wrapping buffer or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED #include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able to get the current time, or // get/set file times. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive API's. #define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent conflicts against stock zlib. #define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if (__BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses. #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. typedef unsigned long mz_ulong; // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS #define MZ_VERSION "9.1.11" #define MZ_VERNUM 0x91B0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 11 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other stuff is for advanced use. enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio. // (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window size and whether or not the stream has been wrapped with a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both sized large enough to decompress the entire stream in a single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside what's already in the input buffer, and that the output buffer is large enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. For zlib streams, the adler-32 of the decompressed data has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the inflater needs more input to continue, or if the output buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // Works around MSVC's spammy "warning C4127: conditional expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64*1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is 0 this function returns the number of bytes needed to fully store the filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the archive's filename so it can be reopened for writing. If the file can't be reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be growable using the realloc callback (which defaults to realloc unless you've overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a forwardslash with empty buffer. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records the disk file's modified time into the archive. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) appends a memory blob to a ZIP archive. mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) do { (r)->m_state = 0; } MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // Compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory). // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1, } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the input as possible, and writing as much compressed data as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a valid tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16)==2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32)==4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64)==8 ? 1 : -1]; #include <string.h> #include <assert.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a,b) (((a)>(b))?(a):(b)) #define MZ_MIN(a,b) (((a)<(b))?(a):(b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(__forceinline) #ifdef __cplusplus #define __forceinline inline #else #define __forceinline #endif #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, MZ_FREE(address); } static void *def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque; return MZ_REALLOC(address, items * size); } mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; if (!ptr) return MZ_CRC32_INIT; crc = ~crc; while (buf_len--) { mz_uint8 b = *ptr++; crc = (crc >> 4) ^ s_crc32[(crc & 0xF) ^ (b & 0xF)]; crc = (crc >> 4) ^ s_crc32[(crc & 0xF) ^ (b >> 4)]; } return ~crc; } #ifndef MINIZ_NO_ZLIB_APIS const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor*)pStream->state, NULL, NULL, ((tdefl_compressor*)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor*)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for ( ; ; ) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor*)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor*)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state*)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state* pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state*)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for ( ; ; ) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif //MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN switch(r->m_state) { case 0: #define TINFL_CR_RETURN(state_index, result) do { status = result; r->m_state = state_index; goto common_exit; case state_index:; } MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) do { for ( ; ; ) { TINFL_CR_RETURN(state_index, result); } } MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt to read beyond the input buf, then something is wrong with the input because the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for ( ; ; ) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else c = *pIn_buf_cur++; } MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) do { mz_uint c; TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } b = bit_buf & ((1 << (n)) - 1); bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a // Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) \ break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); if (temp >= 0) break; \ } TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read // beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully // decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) do { \ int temp; mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); pIn_buf_cur += 2; num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; do { temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; } while (temp < 0); \ } sym = temp; bit_buf >>= code_len; num_bits -= code_len; } MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 }; static const int s_length_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; static const int s_dist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; static const int s_dist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; static const mz_uint8 s_length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for ( i = 0; i <= 143; ++i) *p++ = 8; for ( ; i <= 255; ++i) *p++ = 9; for ( ; i <= 279; ++i) *p++ = 7; for ( ; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for ( ; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]); ) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for ( ; ; ) { mz_uint8 *pSrc; for ( ; ; ) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for ( ; ; ) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8*)pBuf, pBuf ? (mz_uint8*)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf, &src_buf_len, (mz_uint8*)pOut_buf, (mz_uint8*)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8*)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for ( ; ; ) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257,258,259,260,261,262,263,264,265,265,266,266,267,267,268,268,269,269,269,269,270,270,270,270,271,271,271,271,272,272,272,272, 273,273,273,273,273,273,273,273,274,274,274,274,274,274,274,274,275,275,275,275,275,275,275,275,276,276,276,276,276,276,276,276, 277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278, 279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280, 281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281, 282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282, 283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283, 284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11, 11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13, 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14, 14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14, 14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16, 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16, 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16, 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17, 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17, 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17, 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0,0,0,0,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0,0,18,19,20,20,21,21,22,22,22,22,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26, 26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28, 28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0,0,8,8,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12, 12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13, 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13 }; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq* tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq* pSyms0, tdefl_sym_freq* pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32* pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n==0) return; else if (n==1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next=1; next < n-1; next++) { if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n-2].m_key = 0; for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1; avbl = 1; used = dpth = 0; root = n-2; next = n-1; while (avbl>0) { while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; } while (avbl>used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2*used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) do { \ mz_uint bits = b; mz_uint len = l; MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() { if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); packed_code_sizes[num_packed_code_sizes++] = 16; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \ } rle_repeat_count = 0; } } #define TDEFL_RLE_ZERO_CODE_SIZE() { if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); packed_code_sizes[num_packed_code_sizes++] = 17; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); packed_code_sizes[num_packed_code_sizes++] = 18; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \ } rle_z_count = 0; } } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes; ) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for ( ; i <= 255; ++i) *p++ = 9; for ( ; i <= 279; ++i) *p++ = 7; for ( ; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) { bit_buffer |= (((mz_uint64)(b)) << bits_in); bits_in += (l); } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64*)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. if ( ((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size) ) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16*)(p) static __forceinline void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16*)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for ( ; ; ) { for ( ; ; ) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16*)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) ); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8*)p == *(const mz_uint8*)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static __forceinline void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for ( ; ; ) { for ( ; ; ) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) ); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static __forceinline void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static __forceinline void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[cur_pos]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output buffer. if ( (d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ( (d->m_total_lz_bytes > 31*1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) ) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if ( ((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf) ) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor*)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8*)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8*)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8*)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; // level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : 6] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #ifdef _MSC_VER #pragma warning (push) #pragma warning (disable:4204) // nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57+MZ_MAX(64, (1+bpl)*h); if (NULL == (out_buf.m_pBuf = (mz_uint8*)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, TDEFL_DEFAULT_MAX_PROBES | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8*)pImage + y * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size-41; { mz_uint8 pnghdr[41]={0x89,0x50,0x4e,0x47,0x0d,0x0a,0x1a,0x0a,0x00,0x00,0x00,0x0d,0x49,0x48,0x44,0x52, 0,0,(mz_uint8)(w>>8),(mz_uint8)w,0,0,(mz_uint8)(h>>8),(mz_uint8)h,8,"\0\0\04\02\06"[num_chans],0,0,0,0,0,0,0, (mz_uint8)(*pLen_out>>24),(mz_uint8)(*pLen_out>>16),(mz_uint8)(*pLen_out>>8),(mz_uint8)*pLen_out,0x49,0x44,0x41,0x54}; c=(mz_uint32)mz_crc32(MZ_CRC32_INIT,pnghdr+12,17); for (i=0; i<4; ++i, c<<=8) ((mz_uint8*)(pnghdr+29))[i]=(mz_uint8)(c>>24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT,out_buf.m_pBuf+41-4, *pLen_out+4); for (i=0; i<4; ++i, c<<=8) (out_buf.m_pBuf+out_buf.m_size-16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } #ifdef _MSC_VER #pragma warning (pop) #endif // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #ifndef MINIZ_NO_TIME #include <time.h> #endif #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) #include <sys/utime.h> #define MZ_FILE FILE #define MZ_FOPEN fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN freopen #define MZ_DELETE_FILE remove #else #include <utime.h> #define MZ_FILE FILE #define MZ_FOPEN fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN freopen #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler alignment and platform endian issues, miniz.c doesn't use structs for any of this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[index] static __forceinline void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static __forceinline mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static __forceinline mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static __forceinline mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static __forceinline mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8*)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { struct tm *tm = localtime(&time); *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { #ifndef MINIZ_NO_TIME struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); #else pFilename, access_time, modified_time; return MZ_TRUE; #endif // #ifndef MINIZ_NO_TIME } #endif static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static __forceinline mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) do { mz_uint32 t = a; a = b; b = t; } MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for ( ; ; ) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for ( ; ; ) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint i, n, cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; // Basic sanity checks - reject files which are too small, and check the first 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for ( ; ; ) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { // Read the entire central directory into a heap block, and allocate another heap block to hold the unsorted central dir file record offsets, and another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some basic sanity checking on each record, and check for zip64 entries (which are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pMem = (void *)pMem; pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) return MZ_FALSE; file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static __forceinline const mz_uint8 *mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, internal_attr, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((!internal_attr) && ((external_attr & 0x10) != 0)) return MZ_TRUE; filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static __forceinline mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static __forceinline int mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_p)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if (!file_stat.m_comp_size) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_uncomp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if (!file_stat.m_comp_size) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE*)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void* pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir(mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header(pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start with a forward slash, cannot contain a drive letter, and cannot use DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level = level_and_flags & 0xF, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > 10)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an allocation fails the file remains unmodified. (A good idea if we're doing an in-place modification.) if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8*)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level = level_and_flags & 0xF, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > 10)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) return MZ_FALSE; local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for ( ; ; ) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)pState->m_central_dir.m_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > 9)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid central directory. (This may not always succeed, but we can try.) status = status && mz_zip_writer_finalize_archive(&zip_archive); status = status && mz_zip_writer_end(&zip_archive); if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. MZ_DELETE_FILE(pZip_filename); } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */
the_stack_data/211080629.c
/*prog03e18.c : Testes e Condições *AUTOR: Lucas Deboni *DATA: 25/10/2020 */ #include <stdio.h> main() { int ano; printf("Digite o ano: "); scanf("%d", &ano); ano%4 == 0 ? printf("Ano %d é bissexto\n", ano) : printf("Ano %d não é bissexto\n", ano); }
the_stack_data/126702412.c
/* <@(#)abort.c 1.4> */ /* 3.0 SID # 1.4 */ /*LINTLIBRARY*/ /* * abort() - terminate current process with dump via SIGIOT */ #include <signal.h> extern int kill(), getpid(); static pass = 0; /* counts how many times abort has been called*/ int abort() { /* increment first to avoid any hassle with interupts */ if (++pass == 1) { _cleanup(); } return(kill(getpid(), SIGIOT)); }
the_stack_data/995467.c
#include <stdio.h> #include <math.h> #define pi acos(-1) int main () { double a, b, c, r1, r2, area_t, area_t1, area_c1, area_c2, s; while (scanf("%lf %lf %lf", &a, &b, &c)==3) { s= (a+b+c)/2; area_t= sqrt(s*(s-a)*(s-b)*(s-c)); r1= (area_t/s); area_c2= pi*r1*r1; r2= ((a*b*c)/(area_t))/4; area_c1= (pi*r2*r2)-area_t; area_t1= area_t-area_c2; printf("%.4lf %.4lf %.4lf\n", area_c1, area_t1, area_c2); } return 0; }
the_stack_data/90761992.c
#include <stdio.h> #include <stdlib.h> struct node { int value; struct node *next; struct node *prev; }; struct list { struct node *head; struct node *tail; }; void init(struct list* l) { l->head=NULL; l->tail=NULL; } void clear(struct list* l) { struct node* tmp1; struct node* tmp2; tmp1=l->head; while (tmp1->next!=NULL) { tmp2=tmp1; tmp1=tmp1->next; free(tmp2); } free(tmp1); init(l); } int isEmpty(struct list* l) { if (l->head==NULL) return 0; else return 1; } struct node* find(struct list* l, int val) { struct node* tmp; tmp=l->head; while (tmp->next!=NULL) { if (tmp->value==val) return tmp; tmp=tmp->next; } return NULL; } struct node* findk(struct list* l, int n) { int i=1; struct node* tmp; tmp=l->head; for(i=1; i<n; i++) tmp=tmp->next; return tmp; } int push_back(struct list* l, int val) { struct node* new_node; new_node=(struct node*)malloc(sizeof(struct node)); new_node->value = val; new_node->next = NULL; if (l->head==NULL) { new_node->prev = NULL; l->head=new_node; } else if (l->head->next==NULL) { new_node->prev = l->head; l->head->next = new_node; } else { new_node->prev = l->tail; l->tail->next = new_node; } l->tail=new_node; return 0; } int push_front(struct list* l, int val) { struct node* new_node; new_node=(struct node*)malloc(sizeof(struct node)); new_node->value = val; new_node->prev = NULL; if (l->head==NULL) { new_node->next = NULL; l->tail=new_node; } else if (l->head->next==NULL) { new_node->next = l->tail; l->tail->prev = new_node; } else { new_node->next = l->head; l->head->prev = new_node; } l->head=new_node; return 0; } int insertAfter(struct node* n, int val) { struct node* new_node; new_node=(struct node*)malloc(sizeof(struct node)); new_node->value = val; new_node->next = n->next; new_node->prev = n; n->next->prev = new_node; n->next = new_node; return 0; } int insertBefore(struct node* n, int val) { struct node* new_node; new_node=(struct node*)malloc(sizeof(struct node)); new_node->value = val; new_node->prev = n->prev; new_node->next = n; n->prev->next = new_node; n->prev = new_node; return 0; } int removeFirst(struct list* l, int val) { struct node* tmp; tmp=l->head; while(tmp->value!=val) tmp=tmp->next; if (tmp==l->head) { tmp->next->prev = NULL; l->head = tmp->next; } else if (tmp==l->tail) { tmp->prev->next = NULL; l->tail = tmp->prev; } else { tmp->prev->next = tmp->next; tmp->next->prev = tmp->prev; } free(tmp); return 0; } int removeLast(struct list* l, int val) { struct node* tmp; tmp=l->tail; while(tmp->value!=val) tmp=tmp->prev; if (tmp==l->head) { tmp->next->prev = NULL; l->head = tmp->next; } else if (tmp==l->tail) { tmp->prev->next = NULL; l->tail = tmp->prev; } else { tmp->prev->next = tmp->next; tmp->next->prev = tmp->prev; } free(tmp); return 0; } void print(struct list* l) { if (isEmpty(l)==1) { struct node* tmp; tmp=l->head; while (tmp->next!=NULL) { printf("%d ", tmp->value); tmp = tmp->next; } printf("%d\n", l->tail->value); } } void print_invers(struct list* l) { if (isEmpty(l)==1) { struct node* tmp; tmp=l->tail; while (tmp->prev!=NULL) { printf("%d ", tmp->value); tmp = tmp->prev; } printf("%d\n", l->head->value); } } int main() { int n, x, i, k1, k2, k3; struct list a; struct node* b; init(&a); scanf("%d", &n); for (i=1; i<=n; i=i+1) { scanf("%d", &x); push_back(&a, x); } print(&a); scanf("%d %d %d", &k1, &k2, &k3); b=find(&a, k1); if (b!=NULL) k1=1; else k1=0; b=find(&a, k2); if (b!=NULL) k2=1; else k2=0; b=find(&a, k3); if (b!=NULL) k3=1; else k3=0; printf("%d%d%d\n", k1, k2, k3); scanf("%d", &x); push_back(&a, x); print_invers(&a); scanf("%d", &x); push_front(&a, x); print(&a); scanf("%d %d", &n, &x); b=findk(&a, n); insertAfter(b, x); print_invers(&a); scanf("%d %d", &n, &x); b=findk(&a, n); insertBefore(b, x); print(&a); scanf("%d", &x); removeFirst(&a, x); print_invers(&a); scanf("%d", &x); removeLast(&a, x); print(&a); clear(&a); return 0; }
the_stack_data/93886431.c
#include <stdio.h> int main(void) { int n; int a[5]; int *p; a[2] = 1024; p = &n; /* * write your line of code here... * Remember: * - you are not allowed to use a * - you are not allowed to modify p * - only one statement * - you are not allowed to code anything else than this line of code */ *(p + 5) = 98; /* ...so that this prints 98\n */ printf("a[2] = %d\n", a[2]); return (0); }
the_stack_data/168894539.c
#include <stdio.h> #include <string.h> #include <stdbool.h> #include <stdlib.h> #define MAX 100 struct CarLog // 출입 정보를 저장하는 구조체 { int carid; // 차량 번호 char studentID[7]; // 학번 char enterDate[11]; // 입차 년월일 char enterTime[6]; // 입차 시간 char exitDate[11]; // 출차 년월일 char exitTime[6]; // 출차 시간 }; int eventInput(struct CarLog*, int); // 이벤트를 입력받는 함수 void findViolence(struct CarLog*, int); // 부정 사용자들을 적발하는 함수 int main(void) { int N; // 입력할 횟수 struct CarLog data[MAX]; // 출입 기록 저장 구조체 배열 scanf("%d", &N); // 입력 받을 횟수를 입력 받는다. getchar(); // 버퍼 내 개행 문자 제거 eventInput(data, N); // 이벤트 입력 함수 호출 findViolence(data, N); // 부정 사용자 적발 함수 호출 return 0; } int eventInput(struct CarLog* input, int N) { // 이벤트를 입력 받는 함수 int i; // 루프에서 사용할 지역 변수 (auto variable) for (i = 0; i < N; i++) // 입력 루프 { scanf("%d %s %s %s %s %s", &(input + i)->carid, &(input + i)->studentID, &(input + i)->enterDate, &(input + i)->enterTime, &(input + i)->exitDate, &(input + i)->exitTime); getchar(); // 정보를 입력 받고 버퍼 내 개행 문자를 다음 입력을 위해 제거 } return i; // 입력 받은 개수 반환; 응용하면 N과 일치하지 않은 경우 에러 메시지를 띄울 수 있을 것이다. } void findViolence(struct CarLog* input, int N) // 부정 사용자들을 적발하는 함수 { bool status[MAX] = { false, }, dup = false; // 첫번째 배열의 경우 중복될 경우 true로 하여 다시 검사하지 않게 하고, 두번째의 경우 2대를 초과해서 나오는 경우를 위한 변수이다. int count = 0; // 부정 사용자 수를 세는 변수 struct CarLog violators[MAX], temp; // 부정 사용자를 저장하는 구조체 배열과 소팅에서 사용할 임시 변수이다. struct CarLog* pos = input, * pos2; // 둘 다 루프에서 사용할 구조체 포인터이다. for (; pos < input + N; pos++) // 적발하는 루프 { if (*(status + (pos - input)) == true) // 중복된 것이 나온 경우 패스 continue; dup = false; // dup = false로 초기화 for (pos2 = pos; pos2 < input + N; pos2++) // pos가 가리키는 구조체 부분과 pos2를 사용한 다른 구조체 요소 대조 { if (strcmp(pos->studentID, pos2->studentID) == 0 && pos->carid != pos2->carid && pos != pos2) // 자기 자신이 아니고 학번과 차량번호가 일치한 경우 { status[pos2 - input] = true; // pos와 중복되므로 true로 변경 if (!dup) // pos가 가리키는 자의 부정 사용이 처음 적발된 경우 { violators[count++] = *pos; // 부정 사용자를 저장하는 구조체에 기록 후 카운트 후치 연산 dup = true; // 단순히 기록만 하는 것이므로 이후에 나와도 이 조건문을 지나지 않게 true로 값을 바꾼다. } } } } for (int i = 0; i < count - 1; i++) // 부정 사용자들을 저장한 구조체를 학번 오름차순으로 배열하는 루프 { for (int j = 0; j < count - 1 - i; j++) { if (strcmp(violators[j].studentID, violators[j + 1].studentID) > 0) { // 후자가 전자보다 학번이 높은 경우 temp = violators[j]; // 임시 저장 violators[j] = violators[j + 1]; // 스왑 violators[j + 1] = temp; // 변경 완료 } } } for (int i = 0; i < count; i++) // 부정 사용자들의 학번을 출력하는 루프 puts(violators[i].studentID); // 출력 }
the_stack_data/154831765.c
/* { dg-lto-do link } */ /* { dg-lto-options { { -O -flto -fexceptions -fnon-call-exceptions --param allow-store-data-races=0 } } } */ typedef struct { unsigned int e0 : 16; } s1; typedef struct { unsigned int e0 : 16; } s2; typedef struct { s1 i1; s2 i2; } io; static io *i; void f1 (void) { s1 x0; i->i1 = x0; } int main () { f1 (); return 0; }
the_stack_data/100842.c
#include<stdio.h> #include<stdint.h> int main(void) { int age = 0; printf("Enter your age here :"); scanf("%d",&age); if(age < 18){ printf("Sorry ! you are not eligible to vote\n"); }else{ printf("Congrats ! you are eligible to vote\n"); } printf("Press enter key to exit this application"); while(getchar() != '\n') { //just read the input buffer and do nothing } getchar(); }
the_stack_data/187643658.c
/* Copyright (c) 2016, 2018, 2020 Dennis Wölfing * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* libc/src/dirent/opendir.c * Opens a directory. */ #include <dirent.h> #include <fcntl.h> #include <unistd.h> DIR* opendir(const char* path) { int fd = open(path, O_RDONLY | O_CLOEXEC | O_DIRECTORY); if (fd < 0) return NULL; DIR* result = fdopendir(fd); if (!result) { close(fd); } return result; }
the_stack_data/666683.c
// KASAN: global-out-of-bounds Read in fbcon_get_font // https://syzkaller.appspot.com/bug?id=954d05ba515ec8f322f7b17c2a80ad15d57cd5ba // status:open // autogenerated by syzkaller (https://github.com/google/syzkaller) #define _GNU_SOURCE #include <arpa/inet.h> #include <endian.h> #include <errno.h> #include <fcntl.h> #include <net/if.h> #include <netinet/in.h> #include <pthread.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/syscall.h> #include <sys/types.h> #include <time.h> #include <unistd.h> #include <linux/futex.h> #include <linux/genetlink.h> #include <linux/if_addr.h> #include <linux/if_link.h> #include <linux/in6.h> #include <linux/neighbour.h> #include <linux/net.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/veth.h> static void sleep_ms(uint64_t ms) { usleep(ms * 1000); } static uint64_t current_time_ms(void) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000; } static void thread_start(void* (*fn)(void*), void* arg) { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 128 << 10); int i; for (i = 0; i < 100; i++) { if (pthread_create(&th, &attr, fn, arg) == 0) { pthread_attr_destroy(&attr); return; } if (errno == EAGAIN) { usleep(50); continue; } break; } exit(1); } typedef struct { int state; } event_t; static void event_init(event_t* ev) { ev->state = 0; } static void event_reset(event_t* ev) { ev->state = 0; } static void event_set(event_t* ev) { if (ev->state) exit(1); __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE); syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000); } static void event_wait(event_t* ev) { while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0); } static int event_isset(event_t* ev) { return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE); } static int event_timedwait(event_t* ev, uint64_t timeout) { uint64_t start = current_time_ms(); uint64_t now = start; for (;;) { uint64_t remain = timeout - (now - start); struct timespec ts; ts.tv_sec = remain / 1000; ts.tv_nsec = (remain % 1000) * 1000 * 1000; syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts); if (__atomic_load_n(&ev->state, __ATOMIC_RELAXED)) return 1; now = current_time_ms(); if (now - start > timeout) return 0; } } struct nlmsg { char* pos; int nesting; struct nlattr* nested[8]; char buf[1024]; }; static struct nlmsg nlmsg; static void netlink_init(struct nlmsg* nlmsg, int typ, int flags, const void* data, int size) { memset(nlmsg, 0, sizeof(*nlmsg)); struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf; hdr->nlmsg_type = typ; hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags; memcpy(hdr + 1, data, size); nlmsg->pos = (char*)(hdr + 1) + NLMSG_ALIGN(size); } static void netlink_attr(struct nlmsg* nlmsg, int typ, const void* data, int size) { struct nlattr* attr = (struct nlattr*)nlmsg->pos; attr->nla_len = sizeof(*attr) + size; attr->nla_type = typ; memcpy(attr + 1, data, size); nlmsg->pos += NLMSG_ALIGN(attr->nla_len); } static int netlink_send_ext(struct nlmsg* nlmsg, int sock, uint16_t reply_type, int* reply_len) { if (nlmsg->pos > nlmsg->buf + sizeof(nlmsg->buf) || nlmsg->nesting) exit(1); struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg->buf; hdr->nlmsg_len = nlmsg->pos - nlmsg->buf; struct sockaddr_nl addr; memset(&addr, 0, sizeof(addr)); addr.nl_family = AF_NETLINK; unsigned n = sendto(sock, nlmsg->buf, hdr->nlmsg_len, 0, (struct sockaddr*)&addr, sizeof(addr)); if (n != hdr->nlmsg_len) exit(1); n = recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0); if (hdr->nlmsg_type == NLMSG_DONE) { *reply_len = 0; return 0; } if (n < sizeof(struct nlmsghdr)) exit(1); if (reply_len && hdr->nlmsg_type == reply_type) { *reply_len = n; return 0; } if (n < sizeof(struct nlmsghdr) + sizeof(struct nlmsgerr)) exit(1); if (hdr->nlmsg_type != NLMSG_ERROR) exit(1); return -((struct nlmsgerr*)(hdr + 1))->error; } static int netlink_send(struct nlmsg* nlmsg, int sock) { return netlink_send_ext(nlmsg, sock, 0, NULL); } static int netlink_next_msg(struct nlmsg* nlmsg, unsigned int offset, unsigned int total_len) { struct nlmsghdr* hdr = (struct nlmsghdr*)(nlmsg->buf + offset); if (offset == total_len || offset + hdr->nlmsg_len > total_len) return -1; return hdr->nlmsg_len; } static void netlink_device_change(struct nlmsg* nlmsg, int sock, const char* name, bool up, const char* master, const void* mac, int macsize, const char* new_name) { struct ifinfomsg hdr; memset(&hdr, 0, sizeof(hdr)); if (up) hdr.ifi_flags = hdr.ifi_change = IFF_UP; hdr.ifi_index = if_nametoindex(name); netlink_init(nlmsg, RTM_NEWLINK, 0, &hdr, sizeof(hdr)); if (new_name) netlink_attr(nlmsg, IFLA_IFNAME, new_name, strlen(new_name)); if (master) { int ifindex = if_nametoindex(master); netlink_attr(nlmsg, IFLA_MASTER, &ifindex, sizeof(ifindex)); } if (macsize) netlink_attr(nlmsg, IFLA_ADDRESS, mac, macsize); int err = netlink_send(nlmsg, sock); (void)err; } const int kInitNetNsFd = 239; #define DEVLINK_FAMILY_NAME "devlink" #define DEVLINK_CMD_PORT_GET 5 #define DEVLINK_CMD_RELOAD 37 #define DEVLINK_ATTR_BUS_NAME 1 #define DEVLINK_ATTR_DEV_NAME 2 #define DEVLINK_ATTR_NETDEV_NAME 7 #define DEVLINK_ATTR_NETNS_FD 138 static int netlink_devlink_id_get(struct nlmsg* nlmsg, int sock) { struct genlmsghdr genlhdr; struct nlattr* attr; int err, n; uint16_t id = 0; memset(&genlhdr, 0, sizeof(genlhdr)); genlhdr.cmd = CTRL_CMD_GETFAMILY; netlink_init(nlmsg, GENL_ID_CTRL, 0, &genlhdr, sizeof(genlhdr)); netlink_attr(nlmsg, CTRL_ATTR_FAMILY_NAME, DEVLINK_FAMILY_NAME, strlen(DEVLINK_FAMILY_NAME) + 1); err = netlink_send_ext(nlmsg, sock, GENL_ID_CTRL, &n); if (err) { return -1; } attr = (struct nlattr*)(nlmsg->buf + NLMSG_HDRLEN + NLMSG_ALIGN(sizeof(genlhdr))); for (; (char*)attr < nlmsg->buf + n; attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) { if (attr->nla_type == CTRL_ATTR_FAMILY_ID) { id = *(uint16_t*)(attr + 1); break; } } if (!id) { return -1; } recv(sock, nlmsg->buf, sizeof(nlmsg->buf), 0); /* recv ack */ return id; } static void netlink_devlink_netns_move(const char* bus_name, const char* dev_name, int netns_fd) { struct genlmsghdr genlhdr; int sock; int id, err; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC); if (sock == -1) exit(1); id = netlink_devlink_id_get(&nlmsg, sock); if (id == -1) goto error; memset(&genlhdr, 0, sizeof(genlhdr)); genlhdr.cmd = DEVLINK_CMD_RELOAD; netlink_init(&nlmsg, id, 0, &genlhdr, sizeof(genlhdr)); netlink_attr(&nlmsg, DEVLINK_ATTR_BUS_NAME, bus_name, strlen(bus_name) + 1); netlink_attr(&nlmsg, DEVLINK_ATTR_DEV_NAME, dev_name, strlen(dev_name) + 1); netlink_attr(&nlmsg, DEVLINK_ATTR_NETNS_FD, &netns_fd, sizeof(netns_fd)); err = netlink_send(&nlmsg, sock); if (err) { } error: close(sock); } static struct nlmsg nlmsg2; static void initialize_devlink_ports(const char* bus_name, const char* dev_name, const char* netdev_prefix) { struct genlmsghdr genlhdr; int len, total_len, id, err, offset; uint16_t netdev_index; int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC); if (sock == -1) exit(1); int rtsock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (rtsock == -1) exit(1); id = netlink_devlink_id_get(&nlmsg, sock); if (id == -1) goto error; memset(&genlhdr, 0, sizeof(genlhdr)); genlhdr.cmd = DEVLINK_CMD_PORT_GET; netlink_init(&nlmsg, id, NLM_F_DUMP, &genlhdr, sizeof(genlhdr)); netlink_attr(&nlmsg, DEVLINK_ATTR_BUS_NAME, bus_name, strlen(bus_name) + 1); netlink_attr(&nlmsg, DEVLINK_ATTR_DEV_NAME, dev_name, strlen(dev_name) + 1); err = netlink_send_ext(&nlmsg, sock, id, &total_len); if (err) { goto error; } offset = 0; netdev_index = 0; while ((len = netlink_next_msg(&nlmsg, offset, total_len)) != -1) { struct nlattr* attr = (struct nlattr*)(nlmsg.buf + offset + NLMSG_HDRLEN + NLMSG_ALIGN(sizeof(genlhdr))); for (; (char*)attr < nlmsg.buf + offset + len; attr = (struct nlattr*)((char*)attr + NLMSG_ALIGN(attr->nla_len))) { if (attr->nla_type == DEVLINK_ATTR_NETDEV_NAME) { char* port_name; char netdev_name[IFNAMSIZ]; port_name = (char*)(attr + 1); snprintf(netdev_name, sizeof(netdev_name), "%s%d", netdev_prefix, netdev_index); netlink_device_change(&nlmsg2, rtsock, port_name, true, 0, 0, 0, netdev_name); break; } } offset += len; netdev_index++; } error: close(rtsock); close(sock); } static void initialize_devlink_pci(void) { int netns = open("/proc/self/ns/net", O_RDONLY); if (netns == -1) exit(1); int ret = setns(kInitNetNsFd, 0); if (ret == -1) exit(1); netlink_devlink_netns_move("pci", "0000:00:10.0", netns); ret = setns(netns, 0); if (ret == -1) exit(1); close(netns); initialize_devlink_ports("pci", "0000:00:10.0", "netpci"); } static long syz_open_dev(volatile long a0, volatile long a1, volatile long a2) { if (a0 == 0xc || a0 == 0xb) { char buf[128]; sprintf(buf, "/dev/%s/%d:%d", a0 == 0xc ? "char" : "block", (uint8_t)a1, (uint8_t)a2); return open(buf, O_RDWR, 0); } else { char buf[1024]; char* hash; strncpy(buf, (char*)a0, sizeof(buf) - 1); buf[sizeof(buf) - 1] = 0; while ((hash = strchr(buf, '#'))) { *hash = '0' + (char)(a1 % 10); a1 /= 10; } return open(buf, a2, 0); } } struct thread_t { int created, call; event_t ready, done; }; static struct thread_t threads[16]; static void execute_call(int call); static int running; static void* thr(void* arg) { struct thread_t* th = (struct thread_t*)arg; for (;;) { event_wait(&th->ready); event_reset(&th->ready); execute_call(th->call); __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); event_set(&th->done); } return 0; } static void loop(void) { int i, call, thread; int collide = 0; again: for (call = 0; call < 6; call++) { for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) { struct thread_t* th = &threads[thread]; if (!th->created) { th->created = 1; event_init(&th->ready); event_init(&th->done); event_set(&th->done); thread_start(thr, th); } if (!event_isset(&th->done)) continue; event_reset(&th->done); th->call = call; __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); event_set(&th->ready); if (collide && (call % 2) == 0) break; event_timedwait(&th->done, 45); break; } } for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++) sleep_ms(1); if (!collide) { collide = 1; goto again; } } uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; void execute_call(int call) { intptr_t res; switch (call) { case 0: res = syz_open_dev(0xc, 4, 1); if (res != -1) r[0] = res; break; case 1: *(uint32_t*)0x200000c0 = 3; *(uint32_t*)0x200000c4 = 0; *(uint32_t*)0x200000c8 = 0; *(uint32_t*)0x200000cc = 5; *(uint32_t*)0x200000d0 = 0; *(uint64_t*)0x200000d8 = 0; syscall(__NR_ioctl, r[0], 0x4b72ul, 0x200000c0ul); break; case 2: res = syz_open_dev(0xc, 4, 1); if (res != -1) r[1] = res; break; case 3: *(uint32_t*)0x20000200 = 0x81; *(uint32_t*)0x20000204 = 0; *(uint32_t*)0x20000208 = 0; *(uint32_t*)0x2000020c = 0; *(uint8_t*)0x20000210 = 0; memcpy((void*)0x20000211, "\x49\xd9\xfe\x4d\xa0\x87\xf5\xd6\xf1\x0f\x99\x76" "\x12\x7b\xcf\x86\xfe\x01\xfe", 19); syscall(__NR_ioctl, r[1], 0x4b60ul, 0x20000200ul); break; case 4: res = syz_open_dev(0xc, 4, 1); if (res != -1) r[2] = res; break; case 5: *(uint32_t*)0x20000200 = 0x81; *(uint32_t*)0x20000204 = 0; *(uint32_t*)0x20000208 = 0; *(uint32_t*)0x2000020c = 0; *(uint8_t*)0x20000210 = 0; memcpy((void*)0x20000211, "\x49\xd9\xfe\x4d\xa0\x87\xf5\xd6\xf1\x0f\x99\x76" "\x12\x7b\xcf\x86\xfe\x01\xfe", 19); syscall(__NR_ioctl, r[2], 0x4b61ul, 0x20000200ul); break; } } int main(void) { syscall(__NR_mmap, 0x20000000ul, 0x1000000ul, 3ul, 0x32ul, -1, 0); loop(); return 0; }
the_stack_data/975121.c
/* hw7_27 */ #include <stdio.h> #include <stdlib.h> int main(void) { int i,j,k; for(i=1;i<=5;i++) { j=5; while(j-i!=0) { printf(" "); j--; } k=1; while(k<=j) { printf("%d",k); k++; } printf("\n"); } system("pause"); return 0; } /* 1 12 123 1234 12345 Press any key to continue . . . */
the_stack_data/61439.c
# 1 "security/mbedtls/src/hmac_drbg.c" # 1 "/home/stone/Documents/Ali_IOT//" # 1 "<built-in>" # 1 "<command-line>" # 1 "security/mbedtls/src/hmac_drbg.c" # 29 "security/mbedtls/src/hmac_drbg.c" # 1 "./security/mbedtls/include/mbedtls/config.h" 1 # 99 "./security/mbedtls/include/mbedtls/config.h" # 1 "./security/mbedtls/include/mbedtls/check_config.h" 1 # 36 "./security/mbedtls/include/mbedtls/check_config.h" # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 1 3 4 # 34 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/syslimits.h" 1 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 1 3 4 # 168 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/limits.h" 1 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/newlib.h" 1 3 4 # 14 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/newlib.h" 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/_newlib_version.h" 1 3 4 # 15 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/newlib.h" 2 3 4 # 5 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/limits.h" 2 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/sys/cdefs.h" 1 3 4 # 43 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/sys/cdefs.h" 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 1 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/sys/features.h" 1 3 4 # 9 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 2 3 4 # 27 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 # 27 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef signed char __int8_t; typedef unsigned char __uint8_t; # 41 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef short int __int16_t; typedef short unsigned int __uint16_t; # 63 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef long int __int32_t; typedef long unsigned int __uint32_t; # 89 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef long long int __int64_t; typedef long long unsigned int __uint64_t; # 120 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef signed char __int_least8_t; typedef unsigned char __uint_least8_t; # 146 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef short int __int_least16_t; typedef short unsigned int __uint_least16_t; # 168 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef long int __int_least32_t; typedef long unsigned int __uint_least32_t; # 186 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef long long int __int_least64_t; typedef long long unsigned int __uint_least64_t; # 200 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/machine/_default_types.h" 3 4 typedef int __intptr_t; typedef unsigned int __uintptr_t; # 44 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/sys/cdefs.h" 2 3 4 # 1 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include/stddef.h" 1 3 4 # 149 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include/stddef.h" 3 4 typedef int ptrdiff_t; # 216 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include/stddef.h" 3 4 typedef unsigned int size_t; # 328 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include/stddef.h" 3 4 typedef unsigned int wchar_t; # 426 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include/stddef.h" 3 4 typedef struct { long long __max_align_ll __attribute__((__aligned__(__alignof__(long long)))); long double __max_align_ld __attribute__((__aligned__(__alignof__(long double)))); } max_align_t; # 46 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/sys/cdefs.h" 2 3 4 # 6 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/arm-none-eabi/include/limits.h" 2 3 4 # 169 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 2 3 4 # 8 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/syslimits.h" 2 3 4 # 35 "/home/stone/Documents/Ali_IOT/build/compiler/gcc-arm-none-eabi/Linux64/lib/gcc/arm-none-eabi/5.4.1/include-fixed/limits.h" 2 3 4 # 37 "./security/mbedtls/include/mbedtls/check_config.h" 2 # 672 "./security/mbedtls/include/mbedtls/check_config.h" # 672 "./security/mbedtls/include/mbedtls/check_config.h" typedef int mbedtls_iso_c_forbids_empty_translation_units; # 100 "./security/mbedtls/include/mbedtls/config.h" 2 # 30 "security/mbedtls/src/hmac_drbg.c" 2
the_stack_data/92327235.c
/* f2c.h -- Standard Fortran to C header file */ /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ #ifndef F2C_INCLUDE #define F2C_INCLUDE #include <math.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <complex.h> #ifdef complex #undef complex #endif #ifdef I #undef I #endif #if defined(_WIN64) typedef long long BLASLONG; typedef unsigned long long BLASULONG; #else typedef long BLASLONG; typedef unsigned long BLASULONG; #endif #ifdef LAPACK_ILP64 typedef BLASLONG blasint; #if defined(_WIN64) #define blasabs(x) llabs(x) #else #define blasabs(x) labs(x) #endif #else typedef int blasint; #define blasabs(x) abs(x) #endif typedef blasint integer; typedef unsigned int uinteger; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;} static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;} static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;} static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;} #define pCf(z) (*_pCf(z)) #define pCd(z) (*_pCd(z)) typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ typedef int flag; typedef int ftnlen; typedef int ftnint; /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ integer1 g; shortint h; integer i; /* longint j; */ real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; #define abs(x) ((x) >= 0 ? (x) : -(x)) #define dabs(x) (fabs(x)) #define f2cmin(a,b) ((a) <= (b) ? (a) : (b)) #define f2cmax(a,b) ((a) >= (b) ? (a) : (b)) #define dmin(a,b) (f2cmin(a,b)) #define dmax(a,b) (f2cmax(a,b)) #define bit_test(a,b) ((a) >> (b) & 1) #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b))) #define bit_set(a,b) ((a) | ((uinteger)1 << (b))) #define abort_() { sig_die("Fortran abort routine called", 1); } #define c_abs(z) (cabsf(Cf(z))) #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); } #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);} #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);} #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));} #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));} #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));} //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));} #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));} #define d_abs(x) (fabs(*(x))) #define d_acos(x) (acos(*(x))) #define d_asin(x) (asin(*(x))) #define d_atan(x) (atan(*(x))) #define d_atn2(x, y) (atan2(*(x),*(y))) #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); } #define r_cnjg(R, Z) { pCf(R) = conj(Cf(Z)); } #define d_cos(x) (cos(*(x))) #define d_cosh(x) (cosh(*(x))) #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 ) #define d_exp(x) (exp(*(x))) #define d_imag(z) (cimag(Cd(z))) #define r_imag(z) (cimag(Cf(z))) #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define d_log(x) (log(*(x))) #define d_mod(x, y) (fmod(*(x), *(y))) #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x))) #define d_nint(x) u_nint(*(x)) #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a))) #define d_sign(a,b) u_sign(*(a),*(b)) #define r_sign(a,b) u_sign(*(a),*(b)) #define d_sin(x) (sin(*(x))) #define d_sinh(x) (sinh(*(x))) #define d_sqrt(x) (sqrt(*(x))) #define d_tan(x) (tan(*(x))) #define d_tanh(x) (tanh(*(x))) #define i_abs(x) abs(*(x)) #define i_dnnt(x) ((integer)u_nint(*(x))) #define i_len(s, n) (n) #define i_nint(x) ((integer)u_nint(*(x))) #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b))) #define pow_dd(ap, bp) ( pow(*(ap), *(bp))) #define pow_si(B,E) spow_ui(*(B),*(E)) #define pow_ri(B,E) spow_ui(*(B),*(E)) #define pow_di(B,E) dpow_ui(*(B),*(E)) #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));} #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));} #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));} #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; } #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d)))) #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; } #define sig_die(s, kill) { exit(1); } #define s_stop(s, n) {exit(0);} static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define z_abs(z) (cabs(Cd(z))) #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));} #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));} #define myexit_() break; #define mycycle() continue; #define myceiling(w) {ceil(w)} #define myhuge(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)} /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef logical (*L_fp)(...); #else typedef logical (*L_fp)(); #endif static float spow_ui(float x, integer n) { float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static double dpow_ui(double x, integer n) { double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static _Complex float cpow_ui(_Complex float x, integer n) { _Complex float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static _Complex double zpow_ui(_Complex double x, integer n) { _Complex double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static integer pow_ii(integer x, integer n) { integer pow; unsigned long int u; if (n <= 0) { if (n == 0 || x == 1) pow = 1; else if (x != -1) pow = x == 0 ? 1/x : 0; else n = -n; } if ((n > 0) || !(n == 0 || x == 1 || x != -1)) { u = n; for(pow = 1; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static integer dmaxloc_(double *w, integer s, integer e, integer *n) { double m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static integer smaxloc_(float *w, integer s, integer e, integer *n) { float m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i])) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i])) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i]) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i]) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } #endif /* -- translated by f2c (version 20000121). You must link the resulting object file with the libraries: -lf2c -lm (in that order) */ /* Table of constant values */ static doublereal c_b8 = 1.; static integer c__1 = 1; /* > \brief \b ZPPTRI */ /* =========== DOCUMENTATION =========== */ /* Online html documentation available at */ /* http://www.netlib.org/lapack/explore-html/ */ /* > \htmlonly */ /* > Download ZPPTRI + dependencies */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zpptri. f"> */ /* > [TGZ]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zpptri. f"> */ /* > [ZIP]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zpptri. f"> */ /* > [TXT]</a> */ /* > \endhtmlonly */ /* Definition: */ /* =========== */ /* SUBROUTINE ZPPTRI( UPLO, N, AP, INFO ) */ /* CHARACTER UPLO */ /* INTEGER INFO, N */ /* COMPLEX*16 AP( * ) */ /* > \par Purpose: */ /* ============= */ /* > */ /* > \verbatim */ /* > */ /* > ZPPTRI computes the inverse of a complex Hermitian positive definite */ /* > matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */ /* > computed by ZPPTRF. */ /* > \endverbatim */ /* Arguments: */ /* ========== */ /* > \param[in] UPLO */ /* > \verbatim */ /* > UPLO is CHARACTER*1 */ /* > = 'U': Upper triangular factor is stored in AP; */ /* > = 'L': Lower triangular factor is stored in AP. */ /* > \endverbatim */ /* > */ /* > \param[in] N */ /* > \verbatim */ /* > N is INTEGER */ /* > The order of the matrix A. N >= 0. */ /* > \endverbatim */ /* > */ /* > \param[in,out] AP */ /* > \verbatim */ /* > AP is COMPLEX*16 array, dimension (N*(N+1)/2) */ /* > On entry, the triangular factor U or L from the Cholesky */ /* > factorization A = U**H*U or A = L*L**H, packed columnwise as */ /* > a linear array. The j-th column of U or L is stored in the */ /* > array AP as follows: */ /* > if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */ /* > if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */ /* > */ /* > On exit, the upper or lower triangle of the (Hermitian) */ /* > inverse of A, overwriting the input factor U or L. */ /* > \endverbatim */ /* > */ /* > \param[out] INFO */ /* > \verbatim */ /* > INFO is INTEGER */ /* > = 0: successful exit */ /* > < 0: if INFO = -i, the i-th argument had an illegal value */ /* > > 0: if INFO = i, the (i,i) element of the factor U or L is */ /* > zero, and the inverse could not be computed. */ /* > \endverbatim */ /* Authors: */ /* ======== */ /* > \author Univ. of Tennessee */ /* > \author Univ. of California Berkeley */ /* > \author Univ. of Colorado Denver */ /* > \author NAG Ltd. */ /* > \date December 2016 */ /* > \ingroup complex16OTHERcomputational */ /* ===================================================================== */ /* Subroutine */ int zpptri_(char *uplo, integer *n, doublecomplex *ap, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; doublereal d__1; doublecomplex z__1; /* Local variables */ extern /* Subroutine */ int zhpr_(char *, integer *, doublereal *, doublecomplex *, integer *, doublecomplex *); integer j; extern logical lsame_(char *, char *); extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); logical upper; extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *, doublecomplex *, doublecomplex *, integer *); integer jc, jj; extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen), zdscal_( integer *, doublereal *, doublecomplex *, integer *), ztptri_( char *, char *, integer *, doublecomplex *, integer *); doublereal ajj; integer jjn; /* -- LAPACK computational routine (version 3.7.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* December 2016 */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ --ap; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } if (*info != 0) { i__1 = -(*info); xerbla_("ZPPTRI", &i__1, (ftnlen)6); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Invert the triangular Cholesky factor U or L. */ ztptri_(uplo, "Non-unit", n, &ap[1], info); if (*info > 0) { return 0; } if (upper) { /* Compute the product inv(U) * inv(U)**H. */ jj = 0; i__1 = *n; for (j = 1; j <= i__1; ++j) { jc = jj + 1; jj += j; if (j > 1) { i__2 = j - 1; zhpr_("Upper", &i__2, &c_b8, &ap[jc], &c__1, &ap[1]); } i__2 = jj; ajj = ap[i__2].r; zdscal_(&j, &ajj, &ap[jc], &c__1); /* L10: */ } } else { /* Compute the product inv(L)**H * inv(L). */ jj = 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { jjn = jj + *n - j + 1; i__2 = jj; i__3 = *n - j + 1; zdotc_(&z__1, &i__3, &ap[jj], &c__1, &ap[jj], &c__1); d__1 = z__1.r; ap[i__2].r = d__1, ap[i__2].i = 0.; if (j < *n) { i__2 = *n - j; ztpmv_("Lower", "Conjugate transpose", "Non-unit", &i__2, &ap[ jjn], &ap[jj + 1], &c__1); } jj = jjn; /* L20: */ } } return 0; /* End of ZPPTRI */ } /* zpptri_ */
the_stack_data/103699.c
#include <stdio.h> #include <stdlib.h> // Modifies Chapter 5 broker.c with loop de loops. int main(void) { float commission, value; printf("Enter value of trade: "); scanf("%f", &value); while (value > 0) { if (value < 2500.0) { commission = 30.0 + .017 * value; } else if (value < 6250.0) { commission = 56.0 + .0066 * value; } else if (value < 20000.0) { commission = 76.0 + .0034 * value; } else if (value < 50000.0) { commission = 100.0 + .0022 * value; } else if (value < 500000.0) { commission = 155.0 + .0011 * value; } else { commission = 255.0 + .0009 * value; } if (commission < 39.0) commission = 39.0; printf("Commission is $%.2f\n", commission); printf("Enter value of trade: "); scanf("%f", &value); } return 0; }
the_stack_data/232954631.c
/* crypto/md4/md4_one.c */ /* Copyright (C) 1995-1998 Eric Young ([email protected]) * All rights reserved. * * This package is an SSL implementation written * by Eric Young ([email protected]). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson ([email protected]). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young ([email protected])" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson ([email protected])" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <stdio.h> #include <string.h> #include <openssl/md4.h> #include <openssl/crypto.h> #ifdef CHARSET_EBCDIC #include <openssl/ebcdic.h> #endif unsigned char *MD4(const unsigned char *d, size_t n, unsigned char *md) { MD4_CTX c; static unsigned char m[MD4_DIGEST_LENGTH]; if (md == NULL) md=m; if (!MD4_Init(&c)) return NULL; #ifndef CHARSET_EBCDIC MD4_Update(&c,d,n); #else { char temp[1024]; unsigned long chunk; while (n > 0) { chunk = (n > sizeof(temp)) ? sizeof(temp) : n; ebcdic2ascii(temp, d, chunk); MD4_Update(&c,temp,chunk); n -= chunk; d += chunk; } } #endif MD4_Final(md,&c); OPENSSL_cleanse(&c,sizeof(c)); /* security consideration */ return(md); }
the_stack_data/104827925.c
#include <stdio.h> #include <math.h> int base10_len(int n) { int len = 0; while (n) ++len, n /= 10; return len; } int main(void) { int product = 1; int step = 1; int i1 = 1; int i2 = 10; int i; int index; for (i = 1, index = 1; index <= 1000000; index += step, ++i) { if (index >= i1) { int copy = i; int copy_len = base10_len(copy) - 1; int shift = copy_len - index + i1; while (shift--) copy /= 10; product *= copy % 10; i1 *= 10; } if (i == i2) { i2 *= 10; ++step; } } printf("%d\n", product); return 0; }
the_stack_data/231394489.c
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <dirent.h> #include <string.h> void print_parent_prefix(int depth, bool is_parent_last) { int adjusted_depth = is_parent_last ? depth - 1 : depth; for (int i = 0; i < adjusted_depth; ++i) printf("┃ "); if (is_parent_last) printf(" "); } struct dirent *superscalar_readdir(DIR *path) { /* * Since readdir(3) returns both "." and "..", we need readdir-like * function that will read the directory and return everything except * "." and "..". Otherwise, one of "." or ".." may be read last which * breaks the code that checks if an entry is the last entry in the dir. */ while (true) { struct dirent *p = readdir(path); if (!p) return NULL; if (strcmp(p->d_name, ".") == 0 || strcmp(p->d_name, "..") == 0) continue; return p; } } void traverse( char *current, bool exclude_hidden_files, int depth, bool is_parent_last ) { DIR *path; struct dirent *file, *next; path = opendir(current); file = superscalar_readdir(path); next = superscalar_readdir(path); for (; file; file = next, next = superscalar_readdir(path)) { if (exclude_hidden_files && strncmp(file->d_name, ".", 1) == 0) continue; bool is_last = next == NULL; const char *filename_prefix = is_last ? "┗━" : "┣━"; if (file->d_type == DT_LNK) { printf("%s %s -> %s\n", filename_prefix, file->d_name, realpath(file->d_name, NULL)); continue; } print_parent_prefix(depth, is_parent_last); if (file->d_type == DT_DIR) { printf("%s %s\n", filename_prefix, file->d_name); char buf[PATH_MAX]; snprintf(buf, PATH_MAX, "%s/%s", current, file->d_name); traverse(buf, exclude_hidden_files, depth+1, is_last); } else { printf("%s %s\n", filename_prefix, file->d_name); } } closedir(path); } int main(int argc, char *argv[]) { bool exclude_hidden_files = false; if (argv[2]) exclude_hidden_files = strcmp(argv[2], "-e") == 0; traverse(argv[1], exclude_hidden_files, 0, false); }
the_stack_data/365005.c
/* CalculiX - A 3-dimensional finite element program */ /* Copyright (C) 1998-2005 Guido Dhondt */ /* This program is free software; you can redistribute it and/or */ /* modify it under the terms of the GNU General Public License as */ /* published by the Free Software Foundation(version 2); */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef PARDISO #include <stdio.h> #include <math.h> #include <stdlib.h> #include "CalculiX.h" #include "pardiso.h" ITG *irowpardisoas=NULL,*pointersas=NULL,iparmas[64]; long long ptas[64]; double *aupardisoas=NULL; ITG nthread_mkl_as=0; /* factorization, solving and cleaning with PARDISO for real unsymmetric matrices */ void pardiso_factor_as(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *jq){ char *env; ITG i,j,k,maxfct=1,mnum=1,mtype=11,phase=12,nrhs=1,*perm=NULL, msglvl=0,error=0,ifortran,lfortran,index,id; long long ndim; ITG nthread,nthread_v; double *b=NULL,*x=NULL; printf(" Factoring the system of equations using the asymmetric pardiso solver\n"); iparmas[0]=0; /* set MKL_NUM_THREADS to min(CCX_NPROC_EQUATION_SOLVER,OMP_NUM_THREADS) must be done once */ if (nthread_mkl_as == 0) { nthread=1; env=getenv("MKL_NUM_THREADS"); if(env) { nthread=atoi(env);} else { env=getenv("OMP_NUM_THREADS"); if(env) {nthread=atoi(env);} } env=getenv("CCX_NPROC_EQUATION_SOLVER"); if(env) { nthread_v=atoi(env); if (nthread_v <= nthread) {nthread=nthread_v;} } if (nthread < 1) {nthread=1;} sprintf(envMKL,"MKL_NUM_THREADS=%" ITGFORMAT "",nthread); putenv(envMKL); nthread_mkl_as=nthread; } printf(" number of threads =% d\n\n",nthread_mkl_as); for(i=0;i<64;i++){ptas[i]=0;} ndim=*neq+(long long)2**nzs; NNEW(pointersas,ITG,*neq+1); NNEW(irowpardisoas,ITG,ndim); NNEW(aupardisoas,double,ndim); k=0; /* PARDISO requires the matrix to be stored row by row aupardisoas contains the entries irowpardisoas the corresponding column numbers (for each row in ascending order) pointersas(i) points to the first entry for row i in field aupardisoas */ if(*sigma==0.){ for(i=0;i<*neq;i++){ pointersas[i]=k+1; /* lower left triangular matrix */ for(j=0;j<i;j++){ ifortran=i+1; lfortran=jq[j+1]-jq[j]; FORTRAN(nident,(&irow[jq[j]-1],&ifortran,&lfortran,&id)); if(id>0){ index=jq[j]+id-2; if(irow[index]==ifortran){ irowpardisoas[k]=j+1; aupardisoas[k]=au[index]; k++; } } } /* diagonal entry */ irowpardisoas[k]=i+1; aupardisoas[k]=ad[i]; k++; /* upper right triangular matrix */ for(j=jq[i];j<jq[i+1];j++){ irowpardisoas[k]=irow[j-1]; aupardisoas[k]=au[j+*nzs-1]; k++; } } pointersas[*neq]=k+1; }else{ for(i=0;i<*neq;i++){ pointersas[i]=k+1; /* lower left triangular matrix */ for(j=0;j<i;j++){ ifortran=i+1; lfortran=jq[j+1]-jq[j]; FORTRAN(nident,(&irow[jq[j]-1],&ifortran,&lfortran,&id)); if(id>0){ index=jq[j]+id-2; if(irow[index]==ifortran){ irowpardisoas[k]=j+1; aupardisoas[k]=au[index]-*sigma*aub[index]; k++; } } } /* diagonal entry */ irowpardisoas[k]=i+1; aupardisoas[k]=ad[i]-*sigma*adb[i]; k++; /* upper right triangular matrix */ for(j=jq[i];j<jq[i+1];j++){ irowpardisoas[k]=irow[j-1]; aupardisoas[k]=au[j+*nzs-1]-*sigma*aub[j+*nzs-1]; k++; } } pointersas[*neq]=k+1; } FORTRAN(pardiso,(ptas,&maxfct,&mnum,&mtype,&phase,neq,aupardisoas, pointersas,irowpardisoas,perm,&nrhs,iparmas,&msglvl, b,x,&error)); return; } void pardiso_solve_as(double *b, ITG *neq){ char *env; ITG maxfct=1,mnum=1,mtype=11,phase=33,*perm=NULL,nrhs=1, msglvl=0,i,error=0; double *x=NULL; printf(" Solving the system of equations using the asymmetric pardiso solver\n"); iparmas[0]=0; /* pardiso_factor has been called befor, MKL_NUM_THREADS=nthread_mkl_as is set*/ printf(" number of threads =% d\n\n",nthread_mkl_as); NNEW(x,double,*neq); FORTRAN(pardiso,(ptas,&maxfct,&mnum,&mtype,&phase,neq,aupardisoas, pointersas,irowpardisoas,perm,&nrhs,iparmas,&msglvl, b,x,&error)); for(i=0;i<*neq;i++){b[i]=x[i];} SFREE(x); return; } void pardiso_cleanup_as(ITG *neq){ ITG maxfct=1,mnum=1,mtype=11,phase=-1,*perm=NULL,nrhs=1, msglvl=0,error=0; double *b=NULL,*x=NULL; FORTRAN(pardiso,(ptas,&maxfct,&mnum,&mtype,&phase,neq,aupardisoas, pointersas,irowpardisoas,perm,&nrhs,iparmas,&msglvl, b,x,&error)); SFREE(irowpardisoas); SFREE(aupardisoas); SFREE(pointersas); return; } void pardiso_main_as(double *ad, double *au, double *adb, double *aub, double *sigma, double *b, ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *jq){ if(*neq==0) return; pardiso_factor_as(ad,au,adb,aub,sigma,icol,irow, neq,nzs,jq); pardiso_solve_as(b,neq); pardiso_cleanup_as(neq); return; } #endif
the_stack_data/73574377.c
/*numPass=5, numTotal=5 Verdict:ACCEPTED, Visibility:1, Input:"3 23", ExpOutput:"3 5 7 11 13 17 19 23 ", Output:"3 5 7 11 13 17 19 23 " Verdict:ACCEPTED, Visibility:1, Input:"5 31", ExpOutput:"5 7 11 13 17 19 23 29 31 ", Output:"5 7 11 13 17 19 23 29 31 " Verdict:ACCEPTED, Visibility:1, Input:"1 20", ExpOutput:"2 3 5 7 11 13 17 19 ", Output:"2 3 5 7 11 13 17 19 " Verdict:ACCEPTED, Visibility:0, Input:"23 57", ExpOutput:"23 29 31 37 41 43 47 53 ", Output:"23 29 31 37 41 43 47 53 " Verdict:ACCEPTED, Visibility:0, Input:"31 47", ExpOutput:"31 37 41 43 47 ", Output:"31 37 41 43 47 " */ #include<stdio.h> int check_prime(int num) { int i; if (num==1) { return 0; } for(i=2;i<num;i=i+1) { if (num%i==0) { return 0; } } return 1; } int main() { int n1,n2,a; scanf("%d%d",&n1,&n2); a=n1; while((a>=n1)&&(a<=n2)) { int m; m=check_prime(a); if (m==1) { printf("%d ",a); } a=a+1; } return 0; }
the_stack_data/90762749.c
/* vi: set sw=4 ts=4: */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <getopt.h> #include <time.h> #include <pwd.h> #include <grp.h> #include <unistd.h> #include <ctype.h> #include <errno.h> #include <libgen.h> #include <stdarg.h> #include <sys/stat.h> #include <sys/types.h> #ifndef __APPLE__ #include <sys/sysmacros.h> /* major() and minor() */ #endif #include <ftw.h> const char *bb_applet_name; uid_t recursive_uid; gid_t recursive_gid; unsigned int recursive_mode; void bb_verror_msg(const char *s, va_list p) { fflush(stdout); fprintf(stderr, "%s: ", bb_applet_name); vfprintf(stderr, s, p); } void bb_error_msg(const char *s, ...) { va_list p; va_start(p, s); bb_verror_msg(s, p); va_end(p); putc('\n', stderr); } void bb_error_msg_and_die(const char *s, ...) { va_list p; va_start(p, s); bb_verror_msg(s, p); va_end(p); putc('\n', stderr); exit(1); } void bb_vperror_msg(const char *s, va_list p) { int err=errno; if(s == 0) s = ""; bb_verror_msg(s, p); if (*s) s = ": "; fprintf(stderr, "%s%s\n", s, strerror(err)); } void bb_perror_msg(const char *s, ...) { va_list p; va_start(p, s); bb_vperror_msg(s, p); va_end(p); } void bb_perror_msg_and_die(const char *s, ...) { va_list p; va_start(p, s); bb_vperror_msg(s, p); va_end(p); exit(1); } FILE *bb_xfopen(const char *path, const char *mode) { FILE *fp; if ((fp = fopen(path, mode)) == NULL) bb_perror_msg_and_die("%s", path); return fp; } enum { FILEUTILS_PRESERVE_STATUS = 1, FILEUTILS_DEREFERENCE = 2, FILEUTILS_RECUR = 4, FILEUTILS_FORCE = 8, FILEUTILS_INTERACTIVE = 16 }; int bb_make_directory (char *path, long mode, int flags) { mode_t mask; const char *fail_msg; char *s = path; char c; struct stat st; mask = umask(0); if (mode == -1) { umask(mask); mode = (S_IXUSR | S_IXGRP | S_IXOTH | S_IWUSR | S_IWGRP | S_IWOTH | S_IRUSR | S_IRGRP | S_IROTH) & ~mask; } else { umask(mask & ~0300); } do { c = 0; if (flags & FILEUTILS_RECUR) { /* Get the parent. */ /* Bypass leading non-'/'s and then subsequent '/'s. */ while (*s) { if (*s == '/') { do { ++s; } while (*s == '/'); c = *s; /* Save the current char */ *s = 0; /* and replace it with nul. */ break; } ++s; } } if (mkdir(path, 0777) < 0) { /* If we failed for any other reason than the directory * already exists, output a diagnostic and return -1.*/ if ((errno != EEXIST && errno != EISDIR) || !(flags & FILEUTILS_RECUR) || (stat(path, &st) < 0 || !S_ISDIR(st.st_mode))) { fail_msg = "create"; umask(mask); break; } /* Since the directory exists, don't attempt to change * permissions if it was the full target. Note that * this is not an error conditon. */ if (!c) { umask(mask); return 0; } } if (!c) { /* Done. If necessary, updated perms on the newly * created directory. Failure to update here _is_ * an error.*/ umask(mask); if ((mode != -1) && (chmod(path, mode) < 0)){ fail_msg = "set permissions of"; break; } return 0; } /* Remove any inserted nul from the path (recursive mode). */ *s = c; } while (1); bb_perror_msg ("Cannot %s directory `%s'", fail_msg, path); return -1; } const char * const bb_msg_memory_exhausted = "memory exhausted"; void *xmalloc(size_t size) { void *ptr = malloc(size); if (ptr == NULL && size != 0) bb_error_msg_and_die(bb_msg_memory_exhausted); return ptr; } void *xcalloc(size_t nmemb, size_t size) { void *ptr = calloc(nmemb, size); if (ptr == NULL && nmemb != 0 && size != 0) bb_error_msg_and_die(bb_msg_memory_exhausted); return ptr; } void *xrealloc(void *ptr, size_t size) { ptr = realloc(ptr, size); if (ptr == NULL && size != 0) bb_error_msg_and_die(bb_msg_memory_exhausted); return ptr; } char *private_get_line_from_file(FILE *file, int c) { #define GROWBY (80) /* how large we will grow strings by */ int ch; int idx = 0; char *linebuf = NULL; int linebufsz = 0; while ((ch = getc(file)) != EOF) { /* grow the line buffer as necessary */ if (idx > linebufsz - 2) { linebuf = xrealloc(linebuf, linebufsz += GROWBY); } linebuf[idx++] = (char)ch; if (!ch) return linebuf; if (c<2 && ch == '\n') { if (c) { --idx; } break; } } if (linebuf) { if (ferror(file)) { free(linebuf); return NULL; } linebuf[idx] = 0; } return linebuf; } char *bb_get_chomped_line_from_file(FILE *file) { return private_get_line_from_file(file, 1); } long my_getpwnam(const char *name) { struct passwd *myuser; myuser = getpwnam(name); if (myuser==NULL) bb_error_msg_and_die("unknown user name: %s", name); return myuser->pw_uid; } long my_getgrnam(const char *name) { struct group *mygroup; mygroup = getgrnam(name); if (mygroup==NULL) bb_error_msg_and_die("unknown group name: %s", name); return (mygroup->gr_gid); } unsigned long get_ug_id(const char *s, long (*my_getxxnam)(const char *)) { unsigned long r; char *p; r = strtoul(s, &p, 10); if (*p || (s == p)) { r = my_getxxnam(s); } return r; } char * last_char_is(const char *s, int c) { char *sret = (char *)s; if (sret) { sret = strrchr(sret, c); if(sret != NULL && *(sret+1) != 0) sret = NULL; } return sret; } void bb_xasprintf(char **string_ptr, const char *format, ...) { va_list p; int r; va_start(p, format); r = vasprintf(string_ptr, format, p); va_end(p); if (r < 0) { bb_perror_msg_and_die("bb_xasprintf"); } } char *concat_path_file(const char *path, const char *filename) { char *outbuf; char *lc; if (!path) path = ""; lc = last_char_is(path, '/'); while (*filename == '/') filename++; bb_xasprintf(&outbuf, "%s%s%s", path, (lc==NULL ? "/" : ""), filename); return outbuf; } void bb_show_usage(void) { fprintf(stderr, "%s: [-d device_table] rootdir\n\n", bb_applet_name); fprintf(stderr, "Creates a batch of special files as specified in a device table.\n"); fprintf(stderr, "Device table entries take the form of:\n"); fprintf(stderr, "name type mode user group major minor start increment count\n\n"); fprintf(stderr, "Where name is the file name, type can be one of:\n"); fprintf(stderr, " f A regular file\n"); fprintf(stderr, " d Directory\n"); fprintf(stderr, " r Directory recursively\n"); fprintf(stderr, " c Character special device file\n"); fprintf(stderr, " b Block special device file\n"); fprintf(stderr, " p Fifo (named pipe)\n"); fprintf(stderr, "uid is the user id for the target file, gid is the group id for the\n"); fprintf(stderr, "target file. The rest of the entries (major, minor, etc) apply to\n"); fprintf(stderr, "to device special files. A '-' may be used for blank entries.\n\n"); fprintf(stderr, "For example:\n"); fprintf(stderr, "<name> <type> <mode> <uid> <gid> <major> <minor> <start> <inc> <count>\n"); fprintf(stderr, "/dev d 755 0 0 - - - - -\n"); fprintf(stderr, "/dev/console c 666 0 0 5 1 - - -\n"); fprintf(stderr, "/dev/null c 666 0 0 1 3 0 0 -\n"); fprintf(stderr, "/dev/zero c 666 0 0 1 5 0 0 -\n"); fprintf(stderr, "/dev/hda b 640 0 0 3 0 0 0 -\n"); fprintf(stderr, "/dev/hda b 640 0 0 3 1 1 1 15\n"); fprintf(stderr, "/dev/rtp b 640 0 0 250 0 0 1 5\n"); fprintf(stderr, "/dev/gps b 640 0 0 251 0 1 1 5\n"); fprintf(stderr, "/dev/uio b 640 0 0 252 0 1 2 5\n"); fprintf(stderr, "/dev/uio b 640 0 0 252 1 6 2 5\n\n"); fprintf(stderr, "Will Produce:\n"); fprintf(stderr, "/dev\n"); fprintf(stderr, "/dev/console\n"); fprintf(stderr, "/dev/null\n"); fprintf(stderr, "/dev/zero\n"); fprintf(stderr, "/dev/hda\n"); fprintf(stderr, "/dev/hda[1-15] with minor numbers [1-15]\n"); fprintf(stderr, "/dev/rtp[0-4] with minor numbers [0-4]\n"); fprintf(stderr, "/dev/gps[1-5] with minor numbers [0-4]\n"); fprintf(stderr, "/dev/uio[1-5] with minor numbers 0,2,4,6,8\n"); fprintf(stderr, "/dev/uio[6-10] with minor numbers 1,3,5,7,9\n"); exit(1); } int bb_recursive(const char *fpath, const struct stat *sb, int tflag, struct FTW *ftwbuf){ if (chown(fpath, recursive_uid, recursive_gid) == -1) { bb_perror_msg("chown failed for %s", fpath); return -1; } if (recursive_mode != -1) { if (chmod(fpath, recursive_mode) < 0) { bb_perror_msg("chmod failed for %s", fpath); return -1; } } return 0; } int main(int argc, char **argv) { int opt; FILE *table = stdin; char *rootdir = NULL; char *line = NULL; int linenum = 0; int ret = EXIT_SUCCESS; bb_applet_name = basename(argv[0]); while ((opt = getopt(argc, argv, "d:")) != -1) { switch(opt) { case 'd': table = bb_xfopen((line=optarg), "r"); break; default: bb_show_usage(); } } if (optind >= argc || (rootdir=argv[optind])==NULL) { bb_error_msg_and_die("root directory not speficied"); } if (chdir(rootdir) != 0) { bb_perror_msg_and_die("Couldnt chdir to %s", rootdir); } umask(0); printf("rootdir=%s\n", rootdir); if (line) { printf("table='%s'\n", line); } else { printf("table=<stdin>\n"); } while ((line = bb_get_chomped_line_from_file(table))) { char type; unsigned int mode = 0755; unsigned int major = 0; unsigned int minor = 0; unsigned int count = 0; unsigned int increment = 0; unsigned int start = 0; char name[4096]; char user[41]; char group[41]; char *full_name; uid_t uid; gid_t gid; linenum++; if ((2 > sscanf(line, "%4095s %c %o %40s %40s %u %u %u %u %u", name, &type, &mode, user, group, &major, &minor, &start, &increment, &count)) || ((major | minor | start | count | increment) > 0xfffff)) { if (*line=='\0' || *line=='#' || isspace(*line)) continue; bb_error_msg("line %d invalid: '%s'\n", linenum, line); ret = EXIT_FAILURE; continue; } if (name[0] == '#') { continue; } if (*group) { gid = get_ug_id(group, my_getgrnam); } else { gid = getgid(); } if (*user) { uid = get_ug_id(user, my_getpwnam); } else { uid = getuid(); } full_name = concat_path_file(rootdir, name); if (type == 'd') { bb_make_directory(full_name, mode | S_IFDIR, FILEUTILS_RECUR); if (chown(full_name, uid, gid) == -1) { bb_perror_msg("line %d: chown failed for %s", linenum, full_name); ret = EXIT_FAILURE; goto loop; } if ((mode != -1) && (chmod(full_name, mode) < 0)){ bb_perror_msg("line %d: chmod failed for %s", linenum, full_name); ret = EXIT_FAILURE; goto loop; } } else if (type == 'f') { struct stat st; if ((stat(full_name, &st) < 0 || !S_ISREG(st.st_mode))) { bb_perror_msg("line %d: regular file '%s' does not exist", linenum, full_name); ret = EXIT_FAILURE; goto loop; } if (chown(full_name, uid, gid) == -1) { bb_perror_msg("line %d: chown failed for %s", linenum, full_name); ret = EXIT_FAILURE; goto loop; } if ((mode != -1) && (chmod(full_name, mode) < 0)){ bb_perror_msg("line %d: chmod failed for %s", linenum, full_name); ret = EXIT_FAILURE; goto loop; } } else if (type == 'r') { recursive_uid = uid; recursive_gid = gid; recursive_mode = mode; if (nftw(full_name, bb_recursive, 20, FTW_MOUNT | FTW_PHYS) < 0) { bb_perror_msg("line %d: recursive failed for %s", linenum, full_name); ret = EXIT_FAILURE; goto loop; } } else { dev_t rdev; if (type == 'p') { mode |= S_IFIFO; } else if (type == 'c') { mode |= S_IFCHR; } else if (type == 'b') { mode |= S_IFBLK; } else { bb_error_msg("line %d: Unsupported file type %c", linenum, type); ret = EXIT_FAILURE; goto loop; } if (count > 0) { int i; char *full_name_inc; full_name_inc = xmalloc(strlen(full_name) + 8); for (i = 0; i < count; i++) { sprintf(full_name_inc, "%s%d", full_name, start + i); rdev = makedev(major, minor + i * increment); if (mknod(full_name_inc, mode, rdev) == -1) { bb_perror_msg("line %d: Couldnt create node %s", linenum, full_name_inc); ret = EXIT_FAILURE; } else if (chown(full_name_inc, uid, gid) == -1) { bb_perror_msg("line %d: chown failed for %s", linenum, full_name_inc); ret = EXIT_FAILURE; } if ((mode != -1) && (chmod(full_name_inc, mode) < 0)){ bb_perror_msg("line %d: chmod failed for %s", linenum, full_name_inc); ret = EXIT_FAILURE; } } free(full_name_inc); } else { rdev = makedev(major, minor); if (mknod(full_name, mode, rdev) == -1) { bb_perror_msg("line %d: Couldnt create node %s", linenum, full_name); ret = EXIT_FAILURE; } else if (chown(full_name, uid, gid) == -1) { bb_perror_msg("line %d: chown failed for %s", linenum, full_name); ret = EXIT_FAILURE; } if ((mode != -1) && (chmod(full_name, mode) < 0)){ bb_perror_msg("line %d: chmod failed for %s", linenum, full_name); ret = EXIT_FAILURE; } } } loop: free(line); free(full_name); } fclose(table); return ret; }
the_stack_data/102511.c
#include <stdio.h> void scilab_rt_barh_d2d0d0s0s0_(int in00, int in01, double matrixin0[in00][in01], double scalarin0, double scalarin1, char* scalarin2, char* scalarin3) { int i; int j; double val0 = 0; for (i = 0; i < in00; ++i) { for (j = 0; j < in01; ++j) { val0 += matrixin0[i][j]; } } printf("%f", val0); printf("%f", scalarin0); printf("%f", scalarin1); printf("%s", scalarin2); printf("%s", scalarin3); }
the_stack_data/162643257.c
/* * This little program is used to parse the FreeType headers and * find the declaration of all public APIs. This is easy, because * they all look like the following: * * FT_EXPORT( return_type ) * function_name( function arguments ); * * You must pass the list of header files as arguments. Wildcards are * accepted if you are using GCC for compilation (and probably by * other compilers too). * * Author: David Turner, 2005, 2006, 2008-2013, 2015 * * This code is explicitly placed into the public domain. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #define PROGRAM_NAME "apinames" #define PROGRAM_VERSION "0.2" #define LINEBUFF_SIZE 1024 typedef enum OutputFormat_ { OUTPUT_LIST = 0, /* output the list of names, one per line */ OUTPUT_WINDOWS_DEF, /* output a Windows .DEF file for Visual C++ or Mingw */ OUTPUT_BORLAND_DEF, /* output a Windows .DEF file for Borland C++ */ OUTPUT_WATCOM_LBC, /* output a Watcom Linker Command File */ OUTPUT_NETWARE_IMP /* output a NetWare ImportFile */ } OutputFormat; static void panic(const char *message) { fprintf(stderr, "PANIC: %s\n", message); exit(2); } typedef struct NameRec_ { char *name; unsigned int hash; } NameRec, *Name; static Name the_names; static int num_names; static int max_names; static void names_add(const char *name, const char *end) { unsigned int h; int nn, len; Name nm; if (end <= name) return; /* compute hash value */ len = (int) (end - name); h = 0; for (nn = 0; nn < len; nn++) h = h * 33 + name[nn]; /* check for an pre-existing name */ for (nn = 0; nn < num_names; nn++) { nm = the_names + nn; if ((int) nm->hash == h && memcmp(name, nm->name, len) == 0 && nm->name[len] == 0) return; } /* add new name */ if (num_names >= max_names) { max_names += (max_names >> 1) + 4; the_names = (NameRec *) realloc(the_names, sizeof(the_names[0]) * max_names); if (the_names == NULL) panic("not enough memory"); } nm = &the_names[num_names++]; nm->hash = h; nm->name = (char *) malloc(len + 1); if (nm->name == NULL) panic("not enough memory"); memcpy(nm->name, name, len); nm->name[len] = 0; } static int name_compare(const void *name1, const void *name2) { Name n1 = (Name) name1; Name n2 = (Name) name2; return strcmp(n1->name, n2->name); } static void names_sort(void) { qsort(the_names, (size_t) num_names, sizeof(the_names[0]), name_compare); } static void names_dump(FILE *out, OutputFormat format, const char *dll_name) { int nn; switch (format) { case OUTPUT_WINDOWS_DEF: if (dll_name) fprintf(out, "LIBRARY %s\n", dll_name); fprintf(out, "DESCRIPTION FreeType 2 DLL\n"); fprintf(out, "EXPORTS\n"); for (nn = 0; nn < num_names; nn++) fprintf(out, " %s\n", the_names[nn].name); break; case OUTPUT_BORLAND_DEF: if (dll_name) fprintf(out, "LIBRARY %s\n", dll_name); fprintf(out, "DESCRIPTION FreeType 2 DLL\n"); fprintf(out, "EXPORTS\n"); for (nn = 0; nn < num_names; nn++) fprintf(out, " _%s\n", the_names[nn].name); break; case OUTPUT_WATCOM_LBC: { const char *dot; char temp[512]; if (dll_name == NULL) { fprintf(stderr, "you must provide a DLL name with the -d option!\n"); exit(4); } /* we must omit the .dll suffix from the library name */ dot = strchr(dll_name, '.'); if (dot != NULL) { int len = dot - dll_name; if (len > (int) (sizeof(temp) - 1)) len = sizeof(temp) - 1; memcpy(temp, dll_name, len); temp[len] = 0; dll_name = (const char *) temp; } for (nn = 0; nn < num_names; nn++) fprintf(out, "++_%s.%s.%s\n", the_names[nn].name, dll_name, the_names[nn].name); } break; case OUTPUT_NETWARE_IMP: { if (dll_name != NULL) fprintf(out, " (%s)\n", dll_name); for (nn = 0; nn < num_names - 1; nn++) fprintf(out, " %s,\n", the_names[nn].name); fprintf(out, " %s\n", the_names[num_names - 1].name); } break; default: /* LIST */ for (nn = 0; nn < num_names; nn++) fprintf(out, "%s\n", the_names[nn].name); } } /* states of the line parser */ typedef enum State_ { STATE_START = 0, /* waiting for FT_EXPORT keyword and return type */ STATE_TYPE /* type was read, waiting for function name */ } State; static int read_header_file(FILE *file, int verbose) { static char buff[LINEBUFF_SIZE + 1]; State state = STATE_START; while (!feof(file)) { char *p; if (!fgets(buff, LINEBUFF_SIZE, file)) break; p = buff; while (*p && (*p == ' ' || *p == '\\')) /* skip leading whitespace */ p++; if (*p == '\n' || *p == '\r') /* skip empty lines */ continue; switch (state) { case STATE_START: { if (memcmp(p, "FT_EXPORT(", 10) != 0) break; p += 10; for (;;) { if (*p == 0 || *p == '\n' || *p == '\r') goto NextLine; if (*p == ')') { p++; break; } p++; } state = STATE_TYPE; /* sometimes, the name is just after the FT_EXPORT(...), so * skip whitespace, and fall-through if we find an alphanumeric * character */ while (*p == ' ' || *p == '\t') p++; if (!isalpha(*p)) break; } /* fall-through */ case STATE_TYPE: { char *name = p; while (isalnum(*p) || *p == '_') p++; if (p > name) { if (verbose) fprintf(stderr, ">>> %.*s\n", (int) (p - name), name); names_add(name, p); } state = STATE_START; } break; default:; } NextLine:; } return 0; } static void usage(void) { static const char *const format = "%s %s: extract FreeType API names from header files\n\n" "this program is used to extract the list of public FreeType API\n" "functions. It receives the list of header files as argument and\n" "generates a sorted list of unique identifiers\n\n" "usage: %s header1 [options] [header2 ...]\n\n" "options: - : parse the content of stdin, ignore arguments\n" " -v : verbose mode, output sent to standard error\n" " -oFILE : write output to FILE instead of standard output\n" " -dNAME : indicate DLL file name, 'freetype.dll' by default\n" " -w : output .DEF file for Visual C++ and Mingw\n" " -wB : output .DEF file for Borland C++\n" " -wW : output Watcom Linker Response File\n" " -wN : output NetWare Import File\n" "\n"; fprintf(stderr, format, PROGRAM_NAME, PROGRAM_VERSION, PROGRAM_NAME ); exit(1); } int main(int argc, const char *const *argv) { int from_stdin = 0; int verbose = 0; OutputFormat format = OUTPUT_LIST; /* the default */ FILE *out = stdout; const char *library_name = NULL; if (argc < 2) usage(); /* '-' used as a single argument means read source file from stdin */ while (argc > 1 && argv[1][0] == '-') { const char *arg = argv[1]; switch (arg[1]) { case 'v': verbose = 1; break; case 'o': if (arg[2] == 0) { if (argc < 2) usage(); arg = argv[2]; argv++; argc--; } else arg += 2; out = fopen(arg, "wt"); if (out == NULL) { fprintf(stderr, "could not open '%s' for writing\n", argv[2]); exit(3); } break; case 'd': if (arg[2] == 0) { if (argc < 2) usage(); arg = argv[2]; argv++; argc--; } else arg += 2; library_name = arg; break; case 'w': format = OUTPUT_WINDOWS_DEF; switch (arg[2]) { case 'B': format = OUTPUT_BORLAND_DEF; break; case 'W': format = OUTPUT_WATCOM_LBC; break; case 'N': format = OUTPUT_NETWARE_IMP; break; case 0: break; default: usage(); } break; case 0: from_stdin = 1; break; default: usage(); } argc--; argv++; } if (from_stdin) { read_header_file(stdin, verbose); } else { for (--argc, argv++; argc > 0; argc--, argv++) { FILE *file = fopen(argv[0], "rb"); if (file == NULL) fprintf(stderr, "unable to open '%s'\n", argv[0]); else { if (verbose) fprintf(stderr, "opening '%s'\n", argv[0]); read_header_file(file, verbose); fclose(file); } } } if (num_names == 0) panic("could not find exported functions !!\n"); names_sort(); names_dump(out, format, library_name); if (out != stdout) fclose(out); return 0; }
the_stack_data/125257.c
#include "stdio.h" #include "math.h" long double puissance(int x, int n){ if(n < 0) return (1.0/puissance(x, (-1) * n)); else if(!n) return 1; else if(n == 2) return x * x; else if(!(n % 2)) return pow(puissance(x, n/2),2); else return pow(puissance(x, (n-1)/2), 2) * x; } int main() { int a,b; printf("Ce programme calcul la puissance n de x separe d'espace\nx n?:"); scanf("%d %d",&a,&b); printf("%Lf\n",puissance(a,b) ); return 0; }
the_stack_data/37638737.c
/* * Find the Nth Catalan number based on its recursive formula * * Author: Ansor Kasimov * Created: March 27 2022 * Copyright (c) 2022 Ansor Kasimov */ #include <stdio.h> int catalan(int); int main() { printf("The 5th Catalan number is: %d\n", catalan(5)); printf("The 9th Catalan number is: %d\n", catalan(9)); printf("The 13th Catalan number is: %d\n", catalan(13)); return 0; } int catalan(int n) { int res = 0; if (n <= 1) return 1; for (int i = 0; i < n; ++i) res += catalan(i) * catalan(n - i - 1); return res; }
the_stack_data/14264.c
#include <stdio.h> #include <stdlib.h> #include <string.h> int main(void){ int T, str_len, i, j, k, sum, *N_digit, counter; char N[1000] = {0}; scanf("%d", &T); for(i = 1; i <= T; i++){ sum = 0; counter = 0; memset(N, 0, sizeof(N)); scanf("%s", N); str_len = strlen(N); N_digit = malloc(str_len * sizeof(int)); // store each digit in an array, and count the sum of digits for(j = 0; j < str_len; j++){ N_digit[j] = N[j] - '0'; sum += N_digit[j]; } // count how many digits is divisible by 3 for(j = 0; j < str_len; j++){ if(N_digit[j] % 3 == 0) counter++; } // 3 possible conditions if(sum % 3 == 0){ // 1 // can only substract for (counter) amount of times if(counter % 2 == 0) printf("Case %d: T\n", i); else printf("Case %d: S\n", i); } else{ for(j = 0; j < str_len; j++){ if((sum - N_digit[j]) % 3 == 0){ // 2 if(counter % 2 == 0) printf("Case %d: S\n", i); else printf("Case %d: T\n", i); break; } else if(j == str_len - 1){ // 3 printf("Case %d: T\n", i); } } } free(N_digit); } }
the_stack_data/495304.c
#include <inttypes.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __GLIBC__ # include <malloc.h> # include <mcheck.h> void print_mcheck_status(enum mcheck_status s) { fprintf(stderr, "%s\n", (s == MCHECK_DISABLED) ? "N/A, you didn't enable mcheck()" : (s == MCHECK_OK) ? "No inconsistency detected" : (s == MCHECK_HEAD) ? "Memory preceding an allocated block was clobbered" : (s == MCHECK_TAIL) ? "Memory following an allocated block was clobbered" : (s == MCHECK_FREE) ? "A block of memory was freed twice" : "unknown memory check code!"); } void report_mcheck_fail(enum mcheck_status s) { fprintf(stderr, "*** PROGRAM WOULD ABORT: "); print_mcheck_status(s); } #endif int main(int argc, char ** argv) { fprintf(stderr, "pid: %d\n", getpid()); char buffer[1000]; while (1) { fprintf(stderr, "> "); fgets(buffer, sizeof(buffer), stdin); char cmd[1000]; intptr_t arg1, arg2; int num = sscanf(buffer, "%s %"SCNiPTR" %"SCNiPTR, cmd, &arg1, &arg2); if (strcmp(cmd, "malloc") == 0) { void* result = malloc(arg1); fprintf(stderr, "==> %p\n", result); } else if (strcmp(cmd, "free") == 0) { free((void*) arg1); fprintf(stderr, "==> ok\n"); } else if (strcmp(cmd, "show") == 0) { if (num == 2) { arg2 = 1; } long * src = (long*) arg1; for (int i = 0; i < arg2; i++) { fprintf(stderr, "%p: %#16.0lx\n", &src[i], src[i]); } #ifdef __GLIBC__ } else if (strcmp(cmd, "usable") == 0) { fprintf(stderr, "usable size: %zu\n", malloc_usable_size((void*) arg1)); } else if (strcmp(cmd, "stats") == 0) { malloc_stats(); } else if (strcmp(cmd, "info") == 0) { malloc_info(0, stdout); } else if (strcmp(cmd, "mcheck") == 0) { fprintf(stderr, "==> %s\n", mcheck(report_mcheck_fail) == 0 ? "OK" : "ERROR"); } else if (strcmp(cmd, "mcheck_pedantic") == 0) { fprintf(stderr, "==> %s\n", mcheck_pedantic(report_mcheck_fail) == 0 ? "OK" : "ERROR"); } else if (strcmp(cmd, "mprobe") == 0) { if (num > 1) { print_mcheck_status(mprobe((void*) arg1)); } else { mcheck_check_all(); fprintf(stderr, "==> check_all ok\n"); } #endif } else { puts("Commands: malloc n, free p, show p [n], usable p, stats, info, mprobe [p], mcheck, mcheck_pedantic"); } } }
the_stack_data/34512159.c
#include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> #include <dirent.h> #include <string.h> void AlterarPermisos(char pathname[1024], int mask){ struct stat atributos_antiguos, atributos_nuevos; DIR *directorio; struct dirent *entrada; const char* dir1 = "."; const char* dir2 = ".."; const char* sucess = "Sucess"; //Abrimos el directorio, obteniendo un puntero a su DIR if((directorio = opendir(pathname)) == NULL) { perror("\nError la apertura del directorio directorio"); exit(EXIT_FAILURE); } //Cambiamos el directorio actual if((chdir(pathname)) != 0) { perror("\nError en el cambio de directorio"); exit(EXIT_FAILURE); } //Leemos la primera entrada de la struct DIR, el primer elemento del directorio if((entrada = readdir(directorio)) == NULL) { perror("\nError en la lectura del directorio ó directorio vacío."); exit(EXIT_FAILURE); } //Iteramos para todos los archivos del directorio while (entrada != NULL){ //Estudiamos el fichero si es distinto de "." y ".." if (strcmp(entrada->d_name, dir1) && strcmp(entrada->d_name, dir2)) { //Obtenemos los datos del fichero if(lstat(entrada->d_name, &atributos_antiguos) != 0) { perror("\nError en lstat de atributos antiguos"); exit(EXIT_FAILURE); } //Cambiamos los permisos del fichero if (chmod(entrada->d_name, mask) == 0) { //Obtenemos los nuevos datos del fichero para poder imprimir los nuevos permisos if(lstat(entrada->d_name, &atributos_nuevos) != 0) { perror("\nError en lstat de atributos nuevos"); exit(EXIT_FAILURE); } //Mostramos los resultados del cambio printf("%s : %d -> %d\n\n", entrada->d_name, atributos_antiguos.st_mode, atributos_nuevos.st_mode); } else { //Mostramos que el cambio no se ha realizado printf("Permisos no cambiados para %s : %d %d\n\n", entrada->d_name, errno, atributos_antiguos.st_mode); } } //Leemos la siguiente entrada de la struct DIR //No comprobamos el error porque si es NULL, saldremos del bucle entrada = readdir(directorio); } //Cerramos el directorio if (closedir(directorio) != 0){ perror("\nError en close"); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { if (argc < 3) { printf ("Sintaxis de ejecución: %s <directorio> <permisos-en-octal>\n", argv[0]); exit(-1); } else { int permisos = strtol(argv[2], NULL, 8); AlterarPermisos(argv[1], permisos); } return(0); }
the_stack_data/33522.c
/* * outputs/test_spmp_ok_share_types1_typex0_umode0.c * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. * * This test program is to test spmp_ok() when share mode (RW=01, SRWX: 1000, SRWX: 1111). * Based on other test cases for mseccfg stiky bits, this test expects following: * - RW = 01/SRWX:1000/SRWX:1111. * - Regine matched. * * Remarks: * - */ /* * Macros from encoding.h */ #define MSTATUS_MPP 0x00001800 #define SSTATUS_SPP 0x00000100 #define PMP_R 0x01 #define PMP_W 0x02 #define PMP_X 0x04 #define PMP_A 0x18 #define PMP_L 0x80 #define PMP_SHIFT 2 #define PMP_TOR 0x08 #define PMP_NA4 0x10 #define PMP_NAPOT 0x18 #define SPMP_S 0x80 #define SSTATUS_SUM 0x00040000 #define CSR_SPMPCFG0 0x1a0 #define CSR_SPMPCFG1 0x1a1 #define CSR_SPMPCFG2 0x1a2 #define CSR_SPMPCFG3 0x1a3 #define CSR_SPMPADDR0 0x1b0 #define CSR_SPMPADDR1 0x1b1 #define CSR_SPMPADDR2 0x1b2 #define CSR_SPMPADDR3 0x1b3 #define CSR_SPMPADDR4 0x1b4 #define CSR_SPMPADDR5 0x1b5 #define CSR_SPMPADDR6 0x1b6 #define CSR_SPMPADDR7 0x1b7 #define CSR_SPMPADDR8 0x1b8 #define CSR_SPMPADDR9 0x1b9 #define CSR_SPMPADDR10 0x1ba #define CSR_SPMPADDR11 0x1bb #define CSR_SPMPADDR12 0x1bc #define CSR_SPMPADDR13 0x1bd #define CSR_SPMPADDR14 0x1be #define CSR_SPMPADDR15 0x1bf #define TEST_RW (1 - 0) #define TEST_FETCH (0) #define CAUSE_LOAD_ACCESS 0x5 #define CAUSE_STORE_ACCESS 0x7 #define CAUSE_FETCH_PAGE_FAULT 0xc #define CAUSE_LOAD_PAGE_FAULT 0xd #define CAUSE_STORE_PAGE_FAULT 0xf typedef unsigned long reg_t; typedef unsigned long uintptr_t; /* * functions from syscalls.c */ #if PRINTF_SUPPORTED int printf(const char* fmt, ...); #else #define printf(...) #endif void __attribute__((noreturn)) tohost_exit(uintptr_t code); void exit(int code); /* * local status */ #define TEST_MEM_START 0x200000 #define TEST_MEM_END 0x240000 #define U_MEM_END (TEST_MEM_END + 0x10000) #define S_MEM_END (0x280000 + 0x10000) #define FAKE_ADDRESS 0x10000000 static const unsigned long expected_r_fail = 0; __attribute ((section(".data_smode"), noinline)) static unsigned actual_r_fail = 0; static const unsigned long expected_w_fail = 1; __attribute ((section(".data_smode"), noinline)) static unsigned actual_w_fail = 0; static const unsigned long expected_x_fail = 0; static unsigned actual_x_fail = 0; static void checkTestResult(void); /* * override syscalls.c. * currently simply skip to nexp instruction */ uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) { if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { asm volatile ("nop"); actual_x_fail = 1; checkTestResult(); //} else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { } else if (cause == CAUSE_LOAD_PAGE_FAULT || cause == CAUSE_STORE_PAGE_FAULT) { reg_t addr; asm volatile ("csrr %0, mtval\n" : "=r"(addr)); printf("got spmp except, addr = 0x%x\n", addr); if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { if (cause == CAUSE_LOAD_PAGE_FAULT) actual_r_fail = 1; else actual_w_fail = 1; return epc + 4; } if (addr == FAKE_ADDRESS) { asm volatile ("nop"); asm volatile ("nop"); checkTestResult(); } } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { reg_t addr; asm volatile ("csrr %0, mtval\n" : "=r"(addr)); if (addr == FAKE_ADDRESS) { asm volatile ("nop"); asm volatile ("nop"); checkTestResult(); } } printf("cause = %ld, epc = 0x%lx\n", cause, epc); tohost_exit(1337); } static void switch_mode_access() { #if 0 reg_t tmp; asm volatile ( "li %0, %1\n" "\tcsrc mstatus, t0\n" "\tla %0, try_access_umode \n" "\tcsrw mepc, %0\n" "\tli sp, %2\n" "\tmret\n" : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); #else //switch to U mode from S mode reg_t tmp; asm volatile ( "li %0, %1\n" "\tcsrc sstatus, t0\n" "\tla %0, try_access_umode \n" "\tcsrw sepc, %0\n" "\tli sp, %2\n" "\tsret\n" : "=r"(tmp) : "n"(SSTATUS_SPP), "n"(U_MEM_END) : "memory"); #endif } __attribute ((section(".text_test_foo"), noinline)) static void target_foo() { asm volatile ("nop"); actual_x_fail = 0; } /* * avoid to access actual_x_fail lies in M mode */ __attribute ((section(".text_test_foo"), noinline)) static void target_foo_umode() { asm volatile ("nop"); } __attribute ((section(".data_test_arr"), aligned(8))) static volatile unsigned char target_arr[100] = { 1,2,3,4,5,6,7,8, }; // from pmp_ok() side,W/R/X is similar __attribute ((section(".text_smode"))) __attribute ((noinline)) static void try_access() { #if TEST_RW target_arr[0] += 1; const unsigned long delta = 0x1020304005060708UL; *(long *)target_arr += delta; if (actual_r_fail == 0 && actual_w_fail == 0) { if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { actual_r_fail = 1; actual_w_fail = 1; } } #endif #if TEST_FETCH actual_x_fail = 1; // reset inside target_foo() target_foo(); #endif } // S-mode text __attribute ((section(".text_smode"))) void smode_main() { /* configure sPMP */ /* */ asm volatile ("csrw 0x1b3, %0 \n" :: "r"(S_MEM_END >> 2) : "memory"); //spmpaddr3 asm volatile ("csrw 0x1b2, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); //spmpaddr2 asm volatile ("csrw 0x1b1, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); //spmpaddr1 asm volatile ("csrw 0x1b0, %0 \n" :: "r"((TEST_MEM_START + 0) >> 2) : "memory"); //spmpaddr0 //reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT) << 24; // for S_MEM /* This is a shared R/W/X regions because we do not have R/W/X for S-mode only */ reg_t cfg0 = (!PMP_R | !PMP_W | !PMP_X | SPMP_S | PMP_TOR) << 24; // for S_MEM cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 16; // for U_MEM cfg0 |= ((1 ? PMP_R:0) //for TEST_MEM | (1 ? PMP_W:0) | (1 ? PMP_X:0) | PMP_TOR | (1 ? SPMP_S:0)) << 8; asm volatile ("csrw 0x1a0, %0 \n" : : "r"(cfg0) : "memory"); // currently dummy since tlb flushed when set_csr on mseccfg asm volatile ("fence.i \n"); #if 0 //SUM bit will not affect shared region behavior //configure SUM bit #if 0 //set SUM asm volatile ( "li t0, 0x00040000\n" "\tcsrs sstatus, t0\n" ::: "t0", "memory"); #else //clear SUM asm volatile ( "li t0, 0x00040000\n" "\tcsrc sstatus, t0\n" :::"t0", "memory"); #endif #endif /* Test */ // switch_mode(); // in case swith to u mode, branch to try_access_umode directly // try_access(); #if 0 switch_mode_access(); // access in umode and report final result #else try_access(); #endif /* End */ checkTestResult(); } static void switch_smode() { #if 0 reg_t tmp; asm volatile ( "li %0, %1\n" "\tcsrc mstatus, t0\n" "\tla %0, smode_main\n" "\tcsrw mepc, %0\n" "\tli sp, %2\n" "\tmret\n" : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(S_MEM_END) : "memory"); #else reg_t tmp; asm volatile ( "csrr %0, mstatus\n" : "=r"(tmp)::"memory"); tmp = tmp & ~(MSTATUS_MPP); // clear MPP bits tmp = tmp | (0x1 << 11); // set MPP to S-mode asm volatile ( "csrw mstatus, %0\n" ::"r"(tmp) :"memory"); //clear satp (to disable paging) asm volatile ( "csrw satp, x0\n" :::"memory"); asm volatile ( "\tla %0, smode_main\n" "\tcsrw mepc, %0\n" "\tli sp, %1\n" "\tmret\n" : "=r"(tmp) : "n"(S_MEM_END) : "memory"); #endif } /* * On processor_t::reset(): * - set_csr(CSR_PMPADDR0, ~reg_t(0)); * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); */ static void set_cfg() { // Give all the memory accessible to the S/U modes //asm volatile ("csrw pmpaddr0, %0\n" :: "r"(~reg_t(0)) : "memory"); asm volatile ("csrw pmpaddr0, %0\n" :: "r"((0x400000>>2)-1) : "memory"); /* We can not give permission to access all addresses because we rely on fake addr for trap */ asm volatile ("csrw pmpcfg0, %0\n" :: "r"(PMP_R | PMP_W | PMP_X | PMP_NAPOT) : "memory"); // currently dummy since tlb flushed when set_csr on mseccfg asm volatile ("fence.i \n"); } // in case mml set, printf cannot be used in U mode __attribute ((section(".text_umode"))) void try_access_umode() { #if TEST_RW target_arr[0] += 1; // const unsigned long delta = 0x1020304005060708UL; // *(long *)target_arr += delta; // if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { // actual_rw_fail = 1; // } #endif #if TEST_FETCH target_foo_umode(); #endif /* * switch to M mode by invoking a write access fault for special address. */ *(char *)(FAKE_ADDRESS) = 1; } static void checkTestResult() { int ret = 0; if (expected_r_fail != actual_r_fail) { ret += 1; printf("Read test fail, expected %d, actual %d.\n", expected_r_fail, actual_r_fail); } if (expected_w_fail != actual_w_fail) { ret += 2; printf("Write test fail, expected %d, actual %d.\n", expected_w_fail, actual_w_fail); } if (expected_x_fail != actual_x_fail) { ret += 4; printf("Fetch test fail, expected %d, actual %d.\n", expected_x_fail, actual_x_fail); } printf("Test done, exit %d.\n", ret); exit(ret); } int main() { // assert in M mode set_cfg(); switch_smode(); return 0; // assert 0 }
the_stack_data/152959.c
// The third order polynomial p(x) with p(0)=y1, p'(0)=y2-y0, p(1)=y2, p'(1)=y3-y1. double catmull_rom(double x, double y0, double y1, double y2, double y3) { double a3 = y3 - y2 - y0 + y1; y3 = y0 - y1 - a3; return y1 + x * (y2 - y0 + x * (y3 + x * a3)); }
the_stack_data/773961.c
#include <stdlib.h> struct UnnamedTypeUnionField { int count; union { int i; char c[4]; } value; }; int extract_value(struct UnnamedTypeUnionField arg) { if (arg.count > 0) { return arg.value.i; } return 0; } union UnnamedTypeStructField { struct { int a; double b; } value1; struct { char c; int d; } value2; }; int extract_value_2(union UnnamedTypeStructField arg) { if (arg.value2.c == 0) { return arg.value1.a; } return arg.value2.d; } struct AnonymousUnionField { union { // anonymous union int i; char c[4]; }; }; int extract_value_3(struct AnonymousUnionField arg) { if (arg.i > 0) { return arg.i; } return (int)arg.c[0]; } union AnonymousStructField { struct { int a; double b; }; struct { char c; int d; }; }; int extract_value_4(union AnonymousStructField arg) { if (arg.c == 0) { return arg.a; } return arg.d; } struct Vector { int len; int data[]; }; long sum_of(struct Vector arg) { long sum = 0; for (int i = 0; i < arg.len; i++) { sum += arg.data[i]; } if (sum > 0) { return sum; } if (sum < 0) { return -sum; } return 0; } static struct Vector* create(int len) { struct Vector *vector = malloc(sizeof(struct Vector) + sizeof(int) * len); vector->len = len; for (int i = 0; i < vector->len; i++) { vector->data[i] = 0; } return vector; } struct IncompleteType; void accept_incomplete(struct IncompleteType *arg) { free(arg); } struct IncompleteType* return_incomplete() { void *ptr = malloc(100); return (struct IncompleteType *)(ptr); } struct ForwardDecl; struct ForwardDecl* pass_forward_decl(struct ForwardDecl* arg) { return arg; } struct ForwardDecl { int x, y; }; // it's a duplicate of struct in types.h struct SupportedStruct1 { int length; int x[2]; }; long mul_of(struct SupportedStruct1 arg) { long mul = 1; for (int i = 0; i < arg.length; i++) { mul *= arg.x[i]; } if (mul < 0) { return -mul; } return mul; } enum { FALSE, TRUE } option; int check_option() { if (option == FALSE) { return -1; } return +1; }
the_stack_data/75136523.c
#include <stdio.h> void solve() { long long d, n; scanf("%lld%lld", &n, &d); long long M[1000005]; for (int i = 1; i <= n; i++) scanf("%lld", &M[i]); for (int i = n; i > 0; i--) d -= d % M[i]; printf("%lld\n", d); } int main() { int t; scanf("%d", &t); for (int i = 1; i <= t; i++) { printf("Case #%d: ", i); solve(); } return 0; }
the_stack_data/1145342.c
int fib(int n) { if (n <= 2) return 1; else return fib(n-1) + fib(n-2); } int foo(int n) { printf("Hello World!\n"); printf("fib(%d) = %d\n", n, fib(n)); //printf("add(%d, %d) = %d\n", n, 2 * n, add(n, 2 * n)); return 0; } int main(int argc, char** argv) { //printf("Hello, world, %d!\n", fib(10)); /*printf("argc = %d\n", argc); int i = 0; for(i = 0; i < argc; i++) { printf("argv[%d] = %s\n", i, argv[i]); } return 3;*/ }
the_stack_data/132522.c
/* Scrivere un programma in C che acquisisce una stringa s1 di massimo 50 caratteri e ne costruisce una nuova s2 copiando il contenuto di s1 e sostituendo le vocali con il carattere *. Infine il programma visualizza s2. Esempio: s1="straniero" -> s2="str*n**r*". */ #include <stdio.h> #define DIM 50 int main(){ char s1[DIM+1], s2[DIM+1]; int i; scanf("%s", s1); for(i=0; s1[i]!='\0'; i++){ if(s1[i]=='a' || s1[i]=='e' || s1[i]=='i' || s1[i]=='o' || s1[i]=='u') s2[i] = '*'; else s2[i] = s1[i]; } s2[i] = '\0'; printf("%s", s2); return 0; }
the_stack_data/115264.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/ioctl.h> #include <linux/vt.h> int main (int argc, char** argv) { int consfd = -1; int vtn = 0; int sleeptime = 0; FILE* consio; char vtname[100]; if(argc==1){ printf("I want a vt number\n"); return 1; } vtn = atoi(argv[1]); if(vtn==0){ printf("I want a numeric vt number\n"); return 1; } snprintf(vtname, 20, "/dev/tty%d", vtn); printf("VT name is %s\n", vtname); consio = fopen(vtname, "r+"); if(consio == NULL){ printf("Oops, cannot open the vt\n"); return 2; } consfd = open("/dev/console", O_RDWR); if (consfd == -1){ printf("Oops, cannot open /dev/console"); return 2; } printf("Starting\n"); /* just in case */ ioctl(consfd, VT_UNLOCKSWITCH); if(ioctl(consfd, VT_ACTIVATE, vtn)==-1){ printf("Couldn't switch console\n"); return 3; } fprintf(consio,"Starting\n"); if(ioctl(consfd, VT_LOCKSWITCH)==-1){ printf("Couldn't lock console\n"); fprintf(consio, "Couldn't lock console\n"); return 3; } printf("Mwahaha?\n"); fprintf(consio, "Mwahaha?\n"); if(argc>2){ sleeptime = atoi(argv[2]); } if(sleeptime==0){ sleeptime = 5; }; sleep(sleeptime); printf("Finishing\n"); fprintf(consio,"Finishing\n"); if(ioctl(consfd, VT_UNLOCKSWITCH)==-1){ printf("Couldn't unlock console, strange\n"); fprintf(consio, "Couldn't unlock console, strange\n"); return 4; } printf("Bye\n"); fprintf(consio,"Bye\n"); return 0; }
the_stack_data/53959.c
//<ctype.h> //example shows the usage of isgraph() function. #include <stdio.h> #include <ctype.h> int main () { int var1 = '3'; int var2 = 'm'; int var3 = ' '; if( isgraph(var1) ) { printf("var1 = |%c| can be printed\n", var1 ); } else { printf("var1 = |%c| can't be printed\n", var1 ); } if( isgraph(var2) ) { printf("var2 = |%c| can be printed\n", var2 ); } else { printf("var2 = |%c| can't be printed\n", var2 ); } if( isgraph(var3) ) { printf("var3 = |%c| can be printed\n", var3 ); } else { printf("var3 = |%c| can't be printed\n", var3 ); } return(0); }
the_stack_data/64201378.c
/* Exercise 1 - Calculations Write a C program to input marks of two subjects. Calculate and print the average of the two marks. */ #include <stdio.h> int main() { int no1,no2,total; float ave; printf("Enter the number 1 : "); scanf("%d",&no1); printf("Enter the number 2 : "); scanf("%d",&no2); total=no1+no2; ave=total/2; printf("Average is %.2f",ave); return 0; }
the_stack_data/150140631.c
#include <stdio.h> #include <math.h> void start_slice(){ __asm__ __volatile__ (""); } void end_slice(){ __asm__ __volatile__ (""); } double foo(double a) { return (a*sqrt(a)); } double sum(double (*fn)(double)) { start_slice(); int i; double b = 2.3; b = fn(b); end_slice(); return b; } int main(int argc, char *argv[]) { double x = sum(foo); printf("x:%e", x); }
the_stack_data/23576499.c
#include <stdbool.h> bool checkPossibility(int* nums, int numsSize) { int op = 1; //modify operation for (int i = 1; i < numsSize && op >= 0; ++i) { if (nums[i - 1] > nums[i]) { --op; if (i - 2 >= 0 && nums[i - 2] > nums[i]) nums[i] = nums[i - 1]; } } return op >= 0; }
the_stack_data/6388686.c
/* Licensed under the 3-clause BSD License, see the file LICENSE.txt in the root of this directory.*/ #include <stdio.h> #include <stdint.h> struct dtm{ int y; // year unsigned int m; //month unsigned int d; // day int unsigned h; // hour unsigned int mi; // minute unsigned int s; // seconds unsigned int a100s_of_nano_sec; // 100s_of_nano_sec seconds }; // converts to Julian day number (less the half day offset) // Caller must ensure valid input uint32_t jd(int y, int m, int d) { return (1461 * (y+4800+(m-14)/12))/4 +(367 * (m-2-12 * ((m-14)/12)))/12 - (3 * ((y + 4900 + (m - 14)/12)/100))/4 + d-32075; } // converts a day, month, year and time to a UNIX time-stamp. // This will overflow in 2038. BSD in 32- and 64-bit machine are now using // 64-bit integer to avoid the overflow. Linux on 64-bit also returns 64-bit integer // Caller must ensure valid input uint32_t unix_time_stamp(int y, int m, int d, int h, int mi, int s) { uint32_t j; j = jd(y, m, d)-jd(1970,1,1); j = j*24*60*60; j += h*60*60 + mi*60 + s; return j; } // converts day, month, year and time to time stamp used in Active Directory // It is the number of '100-nano seconds' that has elapsed since 1 Jan 1601. // It returns a 64-bit unsigned integer uint64_t adtime(struct dtm z) { uint64_t x; x=jd(z.y, z.m, z.d) - jd(1601,1,1); x=x*24*60*60*10000000L; x += ((uint64_t ) z.h *60*60 *10000000UL); x += (uint64_t ) z.mi*60*10000000UL; x += (uint64_t ) z.s*10000000UL; x += (uint64_t ) z.a100s_of_nano_sec/100; return x; } int adtime_inv(uint64_t z, struct dtm *m) { uint64_t a; uint32_t e,f,g,h; int D,M,Y; a=z%10000000UL; z=z/10000000UL; m->a100s_of_nano_sec= a; a=z%60; z=z/60; m->s=a; a=z%60; z=z/60; m->mi=a; a=z%24; z=z/24; m->h=a; z=z+jd(1601,1,1); f = z + 1401 + (((4 * z + 274277) / 146097) * 3) / 4 -38; e=4*f+3; g=(e%1461)/4; h=5*g+2; D=(h%153)/5+1; M=((h/153+2)%12)+1; Y=(e/1461) - 4716 + (12+2-M)/12; m->d=D; m->m=M; m->y=Y; return 0; } #ifdef TEST int main() { uint64_t x; uint32_t u; int i; struct dtm g={1786, 8, 31,15,56,17,123456}, M; x=jd(2000,1,1); printf("jd = %lld\n", x); u=unix_time_stamp(1970,1,1,1,5,3); printf("jd = %u\n", u); x=adtime(g); printf("x = %lld\n", x); i=adtime_inv(x, &M); printf("\n\n%u/%u/%u %i:%i:%i \t%u\n",M.d, M.m, M.y, M.h,M.mi,M.s,M.a100s_of_nano_sec); u=unix_time_stamp(2009,1,21,4,5,0); printf("UTS = %u", u); return 0; } #endif
the_stack_data/712692.c
#include <stdio.h> int main() { printf("This is a test for C program.\n"); return 0; }
the_stack_data/594548.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_strier.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: habdel-i <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/11/13 12:46:31 by habdel-i #+# #+# */ /* Updated: 2017/11/13 12:49:46 by habdel-i ### ########.fr */ /* */ /* ************************************************************************** */ void ft_striter(char *str, void (*f)(char *)) { if (!str || !f) return ; while (*str) { f(str); str++; } }
the_stack_data/156393953.c
#include <stdio.h> #define MAX_TREE_SIZE 100 typedef int element_type_t; typedef struct __node { element_type_t data; int parent; } node_t; typedef struct { node_t nodes[MAX_TREE_SIZE]; int r, n; } tree_t;
the_stack_data/132953193.c
/* Check that lstat:ing an nonexistent file works as expected. #notarget: cris*-*-elf */ #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> int main (void) { struct stat buf; if (lstat ("nonexistent", &buf) == 0 || errno != ENOENT) abort (); printf ("pass\n"); exit (0); }
the_stack_data/1045686.c
/* ** Copyright 2001, Travis Geiselbrecht. All rights reserved. ** Distributed under the terms of the NewOS License. */ #include <sys/types.h> #include <string.h> size_t strspn(char const *s, char const *accept) { const char *p; const char *a; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (a = accept; *a != '\0'; ++a) { if (*p == *a) break; } if (*a == '\0') return count; ++count; } return count; }
the_stack_data/445671.c
// wigner_3j_m // =========== // // Evaluate the 3j symbol g(m2) = (l1 l2 l3 ) // (m1 m2 -m1-m2) // for all allowed values of m2, the other parameters being held fixed. // // Origin // ------ // SLATEC, http://www.netlib.org/slatec. // // Keywords // -------- // 3j coefficients, 3j symbols, Clebsch-Gordan coefficients, // Racah coefficients, vector addition coefficients, Wigner coefficients // // Author // ------ // Gordon, R. G., Harvard University // Schulten, K., Max Planck Institute // // Usage // ----- // int wigner_3j_m(double l1, double l2, double l3, double m1, double* m2min, // double* m2max, double* thrcof, int ndim); // // Arguments // --------- // l1 : double // Parameter in 3j symbol. // l2 : double // Parameter in 3j symbol. // l3 : double // Parameter in 3j symbol. // m1 : double // Parameter in 3j symbol. // m2min : double*, out // Smallest allowable m2 in 3j symbol. // m2max : double*, out // Largest allowable m2 in 3j symbol. // thrcof : double* out // Set of 3j coefficients generated by evaluating the 3j symbol for all // allowed values of m2. The size of `thrcof` must be `m2max-m2min+1` // and `thrcof[i]` will contain g(m2min+i), i=0, 1, ..., m2max-m2min. // If `thrcof` is `NULL`, the function will return after `m2min` and // `m2max` are set, so that an array of the correct size can be allocated // for a subsequent function call. // ndim : int // Declared length of `thrcof` in calling program. If `thrcof` is `NULL`, // then `ndim` should be set to 0. // // Returns // ------- // ier : int // Error flag. // ier=0 No errors. // ier=1 Either `l1 < abs(m1)` or `l1+abs(m1)` non-integer. // ier=2 `abs(l1-l2) <= l3 <= l1+l2` not satisfied. // ier=3 `l1+l2+l3` not an integer. // ier=4 `m2max-m2min` not an integer. // ier=5 `m2max` less than `m2min`. // ier=6 `ndim` less than `m2max-m2min+1`. // // Description // ----------- // Although conventionally the parameters of the vector addition // coefficients satisfy certain restrictions, such as being integers // or integers plus 1/2, the restrictions imposed on input to this // subroutine are somewhat weaker. See, for example, Section 27.9 of // Abramowitz and Stegun or Appendix C of Volume II of A. Messiah. // The restrictions imposed by this subroutine are // 1. `l1 >= abs(m1)` and `l1+abs(m1)` must be an integer; // 2. `abs(l1-l2)` <= l3 <= l1+l2`; // 3. `l1+l2+l3` must be an integer; // 4. `m2max-m2min` must be an integer, where // `m2max=min(l2,l3-m1)` and `m2min=max(-l2,-l3-m1)`. // If the conventional restrictions are satisfied, then these // restrictions are met. // // The user should be cautious in using input parameters that do // not satisfy the conventional restrictions. For example, the // the subroutine produces values of // g(M2) = (0.75 1.50 1.75 ) // (0.25 m2 -0.25-m2) // for m2=-1.5,-0.5,0.5,1.5 but none of the symmetry properties of the // 3j symbol, set forth on page 1056 of Messiah, is satisfied. // // The subroutine generates g(m2min), g(m2min+1), ..., g(m2max) // where `m2min` and `m2max` are defined above. The sequence g(m2) is // generated by a three-term recurrence algorithm with scaling to // control overflow. Both backward and forward recurrence are used to // maintain numerical stability. The two recurrence sequences are // matched at an interior point and are normalized from the unitary // property of 3j coefficients and Wigner's phase convention. // // The algorithm is suited to applications in which large quantum // numbers arise, such as in molecular dynamics. // // References // ---------- // 1. Abramowitz, M., and Stegun, I. A., Eds., Handbook of Mathematical // Functions with Formulas, Graphs and Mathematical Tables, NBS Applied // Mathematics Series 55, June 1964 and subsequent printings. // 2. Messiah, Albert., Quantum Mechanics, Volume II, North-Holland // Publishing Company, 1963. // 3. Schulten, Klaus and Gordon, Roy G., Exact recursive evaluation of 3j // and 6j coefficients for quantum-mechanical coupling of angular momenta, // J Math Phys, v 16, no. 10, October 1975, pp. 1961-1970. // 4. Schulten, Klaus and Gordon, Roy G., Semiclassical approximations to 3j // and 6j coefficients for quantum-mechanical coupling of angular momenta, // J Math Phys, v 16, no. 10, October 1975, pp. 1971-1988. // 5. Schulten, Klaus and Gordon, Roy G., Recursive evaluation of 3j and 6j // coefficients, Computer Phys Comm, v 11, 1976, pp. 269-278. // // Revision history // ---------------- // 01 Jan 1975 DATE WRITTEN // 15 May 1988 SLATEC prologue added by G. C. Nielson, NBS; parameters // HUGE and TINY revised to depend on D1MACH. // 29 Dec 1989 Prologue description rewritten; other prologue sections // revised; MMATCH (location of match point for recurrences) // removed from argument list; argument IER changed to serve // only as an error flag (previously, in cases without error, // it returned the number of scalings); number of error codes // increased to provide more precise error information; // program comments revised; SLATEC error handler calls // introduced to enable printing of error messages to meet // SLATEC standards. These changes were done by D. W. Lozier, // M. A. McClain and J. M. Smith of the National Institute // of Standards and Technology, formerly NBS. // 15 Apr 1991 Mixed type expressions eliminated; variable C1 initialized; // description of THRCOF expanded. These changes were done by // D. W. Lozier. // 04 Nov 2020 Translation to C by N. Tessore. // #include <math.h> #include <float.h> static inline int phase(int m) { return 1-2*(m&1); } int wigner_3j_m(double l1, double l2, double l3, double m1, double* m2min_out, double* m2max_out, double* thrcof, int ndim) { // variables int i, index, lstep, n, nfin, nfinp1, nfinp2, nlim, nstep2; double a1, a1s, c1, c1old, c2, cnorm, dv, m2, m2min, m2max, m3, newfac, oldfac, ratio, sign1, sign2, sum1, sum2, sumbac, sumfor, sumuni, thresh, x, x1, x2, x3, y, y1, y2, y3; // constants const double eps = .01; // HUGE is the square root of one twentieth of the largest floating // point number, approximately. const double huge = sqrt(DBL_MAX/20); const double srhuge = sqrt(huge); const double tiny = 1.0/huge; const double srtiny = 1.0/srhuge; // Check error condition 1. if((l1-fabs(m1)+eps < 0) || (fmod(l1+fabs(m1)+eps, 1) >= eps+eps)) return 1; // Check error condition 2. if((l1+l2-l3 < -eps) || (l1-l2+l3 < -eps) || (-l1+l2+l3 < -eps)) return 2; // Check error condition 3. if(fmod(l1+l2+l3+eps, 1) >= eps+eps) return 3; // Limits for m2 *m2min_out = m2min = fmax(-l2, -l3-m1); *m2max_out = m2max = fmin(l2, l3-m1); // Check error condition 4. if(fmod(m2max-m2min+eps, 1) >= eps+eps) return 4; // Check error condition 5. if(m2min >= m2max+eps) return 5; // Only report m2min and m2max. if(!thrcof) return 0; // Number of coefficients to compute. nfin = m2max-m2min+1+eps; // Check error condition 6. if(ndim < nfin) return 6; // Check whether m2 can take only one value, ie. m2min = m2max. if(m2min >= m2max-eps) { thrcof[0] = phase(fabs(l2-l3-m1)+eps)/sqrt(l1+l2+l3+1); return 0; } // This is reached in case that m2 and m3 take more than one value, // i.e. m2min < m2max. // Start of forward recursion from m2 = m2min m2 = m2min; thrcof[0] = srtiny; newfac = 0.; c1 = 0.; sum1 = tiny; for(lstep = 2;; lstep += 1) { m2 = m2 + 1; m3 = - m1 - m2; c1old = fabs(c1); oldfac = newfac; a1 = (l2-m2+1) * (l2+m2) * (l3+m3+1) * (l3-m3); newfac = sqrt(a1); dv = (l1+l2+l3+1)*(l2+l3-l1) - (l2-m2+1)*(l3+m3+1) - (l2+m2-1)*(l3-m3-1); c1 = - dv / newfac; // If m2 = m2min + 1, the third term in the recursion equation vanishes, // hence if(lstep == 2) { x = srtiny * c1; thrcof[1] = x; sum1 = sum1 + tiny * (c1*c1); if(lstep == nfin) goto nobac; } else { c2 = - oldfac / newfac; // Recursion to the next 3j coefficient x = c1 * thrcof[lstep-2] + c2 * thrcof[lstep-3]; thrcof[lstep-1] = x; sumfor = sum1; sum1 = sum1 + x*x; if(lstep == nfin) break; // See if last unnormalized 3j coefficient exceeds srhuge if(fabs(x) > srhuge) { // This is reached if last 3j coefficient larger than srhuge, // so that the recursion series thrcof[0], ... , thrcof[lstep-1] // has to be rescaled to prevent overflow for(i = 0; i < lstep; ++i) { if(fabs(thrcof[i]) < srtiny) thrcof[i] = 0; else thrcof[i] = thrcof[i] / srhuge; } sum1 = sum1 / huge; sumfor = sumfor / huge; x = x / srhuge; } // As long as abs(c1) is decreasing, the recursion proceeds towards // increasing 3j values and, hence, is numerically stable. Once // an increase of abs(c1) is detected, the recursion direction is // reversed. if(c1old <= fabs(c1)) break; } } // Keep three 3j coefficients around mmatch for comparison later // with backward recursion values. x1 = x; x2 = thrcof[lstep-2]; x3 = thrcof[lstep-3]; nstep2 = nfin - lstep + 3; // Starting backward recursion from m2max taking nstep2 steps, so // that forwards and backwards recursion overlap at the three points // m2 = mmatch+1, mmatch, mmatch-1. nfinp1 = nfin + 1; nfinp2 = nfin + 2; thrcof[nfin-1] = srtiny; sum2 = tiny; m2 = m2max + 2; for(lstep = 2;; lstep += 1) { m2 = m2 - 1; m3 = - m1 - m2; oldfac = newfac; a1s = (l2-m2+2) * (l2+m2-1) * (l3+m3+2) * (l3-m3-1); newfac = sqrt(a1s); dv = (l1+l2+l3+1)*(l2+l3-l1) - (l2-m2+1)*(l3+m3+1) - (l2+m2-1)*(l3-m3-1); c1 = - dv / newfac; // If m2 = m2max + 1 the third term in the recursion equation vanishes if(lstep == 2) { y = srtiny * c1; thrcof[nfin-2] = y; if(lstep == nstep2) break; sumbac = sum2; sum2 = sum2 + y*y; } else { c2 = - oldfac / newfac; // Recursion to the next 3j coefficient y = c1 * thrcof[nfinp1-lstep] + c2 * thrcof[nfinp2-lstep]; if(lstep == nstep2) break; thrcof[nfin-lstep] = y; sumbac = sum2; sum2 = sum2 + y*y; // See if last 3j coefficient exceeds srhuge if(fabs(y) > srhuge) { // This is reached if last 3j coefficient larger than SRHUGE, // so that the recursion series thrcof[nfin-1], ... , // thrcof[nfin-lstep] has to be rescaled to prevent overflow. for(i = 1; i <= lstep; ++i) { index = nfin - i; if(fabs(thrcof[index]) < srtiny) thrcof[index] = 0; else thrcof[index] = thrcof[index] / srhuge; } sum2 = sum2 / huge; sumbac = sumbac / huge; } } } // The forward recursion 3j coefficients x1, x2, x3 are to be matched // with the corresponding backward recursion values y1, y2, y3. y3 = y; y2 = thrcof[nfinp1-lstep]; y1 = thrcof[nfinp2-lstep]; // Determine now ratio such that yi = ratio * xi (i=1,2,3) holds // with minimal error. ratio = ( x1*y1 + x2*y2 + x3*y3 ) / ( x1*x1 + x2*x2 + x3*x3 ); nlim = nfin - nstep2 + 1; if(fabs(ratio) >= 1) { for(n = 0; n < nlim; ++n) thrcof[n] = ratio * thrcof[n]; sumuni = ratio * ratio * sumfor + sumbac; } else { ratio = 1 / ratio; for(n = nlim; n < nfin; ++n) thrcof[n] = ratio * thrcof[n]; sumuni = sumfor + ratio*ratio*sumbac; } goto norm; nobac: sumuni = sum1; // Normalize 3j coefficients norm: cnorm = 1 / sqrt((l1+l1+1) * sumuni); // Sign convention for last 3j coefficient determines overall phase sign1 = copysign(1, thrcof[nfin-1]); sign2 = phase(fabs(l2-l3-m1)+eps); if(sign1*sign2 < 0) cnorm = - cnorm; if(fabs(cnorm) < 1) { thresh = tiny / fabs(cnorm); for(n = 0; n < nfin; ++n) { if(fabs(thrcof[n]) < thresh) thrcof[n] = 0; else thrcof[n] = cnorm * thrcof[n]; } } else { for(n = 0; n < nfin; ++n) thrcof[n] = cnorm * thrcof[n]; } return 0; }
the_stack_data/178265072.c
int foo(void); int foo(void) { int p = 1; int i = 0; i = p ^ 2; return i; }
the_stack_data/45451401.c
#include <stdlib.h> static unsigned temper(unsigned x) { x ^= x>>11; x ^= x<<7 & 0x9D2C5680; x ^= x<<15 & 0xEFC60000; x ^= x>>18; return x; } int rand_r(unsigned *seed) { return temper(*seed = *seed * 1103515245 + 12345)/2; }
the_stack_data/24257.c
/* * Level_1 Exercise 1.4.2: program to ask for text input from the keyboard. The output of this * program should be the amount of characters, the amount of words and the amount of * newlines that have been typed. Multiple consecutive spaces shouldnot be counted as * multiple words. Using do while * * @file CheckAmountV2.c * @author Chunyu Yuan * @version 1.0 11/29/2020 * */ // Preprocessor for include file #include <stdio.h> // C tyle I/O /* * Controls operation of the program * Return type of main() expects an int * * @function main() * @param none * @return 0 */ int main() { int num_char = 0; //declare the number of the characters, initiate 0 int num_line = 1; //declare the number of the lines, initiate 1 int num_word = 0; // declare the number of words, initiate 0 char cur_char; // current typed character char pre_char = ' '; // previous typed character printf("Please input the text, (CTRL + D) stop: \n"); // do - while loop, promote user to input the text do { cur_char = getchar(); //get the character which user typed // check if user typed the shutdown code CTRL + D, (its ASCII-value equals 4) // if not, update the number of characters, lines and words if (cur_char != 4) { ++num_char; // increase the number of characters since user typed if (cur_char == '\n') // check if the user type the enter key to start a newline ++num_line; // increase the number of lines if (cur_char != ' ' && pre_char == ' ') // check if user typed consecutive spaces to make sure user typed word ++num_word; // increase the number of words pre_char = cur_char; // update previous typed character to new typed character } } while (cur_char != 4); // check if user typed the shutdown code CTRL + D to jump out of the loop printf(" number of lines = %d\n number of characters = %d\n number of words = %d", num_line, num_char, num_word);// print the results return 0; }
the_stack_data/237641951.c
/* f2c.h -- Standard Fortran to C header file */ /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ #ifndef F2C_INCLUDE #define F2C_INCLUDE #include <math.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <complex.h> #ifdef complex #undef complex #endif #ifdef I #undef I #endif #if defined(_WIN64) typedef long long BLASLONG; typedef unsigned long long BLASULONG; #else typedef long BLASLONG; typedef unsigned long BLASULONG; #endif #ifdef LAPACK_ILP64 typedef BLASLONG blasint; #if defined(_WIN64) #define blasabs(x) llabs(x) #else #define blasabs(x) labs(x) #endif #else typedef int blasint; #define blasabs(x) abs(x) #endif typedef blasint integer; typedef unsigned int uinteger; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;} static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;} static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;} static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;} #define pCf(z) (*_pCf(z)) #define pCd(z) (*_pCd(z)) typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ typedef int flag; typedef int ftnlen; typedef int ftnint; /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ integer1 g; shortint h; integer i; /* longint j; */ real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; #define abs(x) ((x) >= 0 ? (x) : -(x)) #define dabs(x) (fabs(x)) #define f2cmin(a,b) ((a) <= (b) ? (a) : (b)) #define f2cmax(a,b) ((a) >= (b) ? (a) : (b)) #define dmin(a,b) (f2cmin(a,b)) #define dmax(a,b) (f2cmax(a,b)) #define bit_test(a,b) ((a) >> (b) & 1) #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b))) #define bit_set(a,b) ((a) | ((uinteger)1 << (b))) #define abort_() { sig_die("Fortran abort routine called", 1); } #define c_abs(z) (cabsf(Cf(z))) #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); } #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);} #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);} #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));} #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));} #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));} //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));} #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));} #define d_abs(x) (fabs(*(x))) #define d_acos(x) (acos(*(x))) #define d_asin(x) (asin(*(x))) #define d_atan(x) (atan(*(x))) #define d_atn2(x, y) (atan2(*(x),*(y))) #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); } #define r_cnjg(R, Z) { pCf(R) = conj(Cf(Z)); } #define d_cos(x) (cos(*(x))) #define d_cosh(x) (cosh(*(x))) #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 ) #define d_exp(x) (exp(*(x))) #define d_imag(z) (cimag(Cd(z))) #define r_imag(z) (cimag(Cf(z))) #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define d_log(x) (log(*(x))) #define d_mod(x, y) (fmod(*(x), *(y))) #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x))) #define d_nint(x) u_nint(*(x)) #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a))) #define d_sign(a,b) u_sign(*(a),*(b)) #define r_sign(a,b) u_sign(*(a),*(b)) #define d_sin(x) (sin(*(x))) #define d_sinh(x) (sinh(*(x))) #define d_sqrt(x) (sqrt(*(x))) #define d_tan(x) (tan(*(x))) #define d_tanh(x) (tanh(*(x))) #define i_abs(x) abs(*(x)) #define i_dnnt(x) ((integer)u_nint(*(x))) #define i_len(s, n) (n) #define i_nint(x) ((integer)u_nint(*(x))) #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b))) #define pow_dd(ap, bp) ( pow(*(ap), *(bp))) #define pow_si(B,E) spow_ui(*(B),*(E)) #define pow_ri(B,E) spow_ui(*(B),*(E)) #define pow_di(B,E) dpow_ui(*(B),*(E)) #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));} #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));} #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));} #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; } #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d)))) #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; } #define sig_die(s, kill) { exit(1); } #define s_stop(s, n) {exit(0);} static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define z_abs(z) (cabs(Cd(z))) #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));} #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));} #define myexit_() break; #define mycycle() continue; #define myceiling(w) {ceil(w)} #define myhuge(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)} /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef logical (*L_fp)(...); #else typedef logical (*L_fp)(); #endif static float spow_ui(float x, integer n) { float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static double dpow_ui(double x, integer n) { double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static _Complex float cpow_ui(_Complex float x, integer n) { _Complex float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static _Complex double zpow_ui(_Complex double x, integer n) { _Complex double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static integer pow_ii(integer x, integer n) { integer pow; unsigned long int u; if (n <= 0) { if (n == 0 || x == 1) pow = 1; else if (x != -1) pow = x == 0 ? 1/x : 0; else n = -n; } if ((n > 0) || !(n == 0 || x == 1 || x != -1)) { u = n; for(pow = 1; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static integer dmaxloc_(double *w, integer s, integer e, integer *n) { double m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static integer smaxloc_(float *w, integer s, integer e, integer *n) { float m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i])) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i])) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i]) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i]) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } #endif /* -- translated by f2c (version 20000121). You must link the resulting object file with the libraries: -lf2c -lm (in that order) */ /* > \brief \b SLAQGE scales a general rectangular matrix, using row and column scaling factors computed by sg eequ. */ /* =========== DOCUMENTATION =========== */ /* Online html documentation available at */ /* http://www.netlib.org/lapack/explore-html/ */ /* > \htmlonly */ /* > Download SLAQGE + dependencies */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/slaqge. f"> */ /* > [TGZ]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/slaqge. f"> */ /* > [ZIP]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/slaqge. f"> */ /* > [TXT]</a> */ /* > \endhtmlonly */ /* Definition: */ /* =========== */ /* SUBROUTINE SLAQGE( M, N, A, LDA, R, C, ROWCND, COLCND, AMAX, */ /* EQUED ) */ /* CHARACTER EQUED */ /* INTEGER LDA, M, N */ /* REAL AMAX, COLCND, ROWCND */ /* REAL A( LDA, * ), C( * ), R( * ) */ /* > \par Purpose: */ /* ============= */ /* > */ /* > \verbatim */ /* > */ /* > SLAQGE equilibrates a general M by N matrix A using the row and */ /* > column scaling factors in the vectors R and C. */ /* > \endverbatim */ /* Arguments: */ /* ========== */ /* > \param[in] M */ /* > \verbatim */ /* > M is INTEGER */ /* > The number of rows of the matrix A. M >= 0. */ /* > \endverbatim */ /* > */ /* > \param[in] N */ /* > \verbatim */ /* > N is INTEGER */ /* > The number of columns of the matrix A. N >= 0. */ /* > \endverbatim */ /* > */ /* > \param[in,out] A */ /* > \verbatim */ /* > A is REAL array, dimension (LDA,N) */ /* > On entry, the M by N matrix A. */ /* > On exit, the equilibrated matrix. See EQUED for the form of */ /* > the equilibrated matrix. */ /* > \endverbatim */ /* > */ /* > \param[in] LDA */ /* > \verbatim */ /* > LDA is INTEGER */ /* > The leading dimension of the array A. LDA >= f2cmax(M,1). */ /* > \endverbatim */ /* > */ /* > \param[in] R */ /* > \verbatim */ /* > R is REAL array, dimension (M) */ /* > The row scale factors for A. */ /* > \endverbatim */ /* > */ /* > \param[in] C */ /* > \verbatim */ /* > C is REAL array, dimension (N) */ /* > The column scale factors for A. */ /* > \endverbatim */ /* > */ /* > \param[in] ROWCND */ /* > \verbatim */ /* > ROWCND is REAL */ /* > Ratio of the smallest R(i) to the largest R(i). */ /* > \endverbatim */ /* > */ /* > \param[in] COLCND */ /* > \verbatim */ /* > COLCND is REAL */ /* > Ratio of the smallest C(i) to the largest C(i). */ /* > \endverbatim */ /* > */ /* > \param[in] AMAX */ /* > \verbatim */ /* > AMAX is REAL */ /* > Absolute value of largest matrix entry. */ /* > \endverbatim */ /* > */ /* > \param[out] EQUED */ /* > \verbatim */ /* > EQUED is CHARACTER*1 */ /* > Specifies the form of equilibration that was done. */ /* > = 'N': No equilibration */ /* > = 'R': Row equilibration, i.e., A has been premultiplied by */ /* > diag(R). */ /* > = 'C': Column equilibration, i.e., A has been postmultiplied */ /* > by diag(C). */ /* > = 'B': Both row and column equilibration, i.e., A has been */ /* > replaced by diag(R) * A * diag(C). */ /* > \endverbatim */ /* > \par Internal Parameters: */ /* ========================= */ /* > */ /* > \verbatim */ /* > THRESH is a threshold value used to decide if row or column scaling */ /* > should be done based on the ratio of the row or column scaling */ /* > factors. If ROWCND < THRESH, row scaling is done, and if */ /* > COLCND < THRESH, column scaling is done. */ /* > */ /* > LARGE and SMALL are threshold values used to decide if row scaling */ /* > should be done based on the absolute size of the largest matrix */ /* > element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */ /* > \endverbatim */ /* Authors: */ /* ======== */ /* > \author Univ. of Tennessee */ /* > \author Univ. of California Berkeley */ /* > \author Univ. of Colorado Denver */ /* > \author NAG Ltd. */ /* > \date December 2016 */ /* > \ingroup realGEauxiliary */ /* ===================================================================== */ /* Subroutine */ int slaqge_(integer *m, integer *n, real *a, integer *lda, real *r__, real *c__, real *rowcnd, real *colcnd, real *amax, char * equed) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ integer i__, j; real large, small, cj; extern real slamch_(char *); /* -- LAPACK auxiliary routine (version 3.7.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* December 2016 */ /* ===================================================================== */ /* Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --r__; --c__; /* Function Body */ if (*m <= 0 || *n <= 0) { *(unsigned char *)equed = 'N'; return 0; } /* Initialize LARGE and SMALL. */ small = slamch_("Safe minimum") / slamch_("Precision"); large = 1.f / small; if (*rowcnd >= .1f && *amax >= small && *amax <= large) { /* No row scaling */ if (*colcnd >= .1f) { /* No column scaling */ *(unsigned char *)equed = 'N'; } else { /* Column scaling */ i__1 = *n; for (j = 1; j <= i__1; ++j) { cj = c__[j]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = cj * a[i__ + j * a_dim1]; /* L10: */ } /* L20: */ } *(unsigned char *)equed = 'C'; } } else if (*colcnd >= .1f) { /* Row scaling, no column scaling */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = r__[i__] * a[i__ + j * a_dim1]; /* L30: */ } /* L40: */ } *(unsigned char *)equed = 'R'; } else { /* Row and column scaling */ i__1 = *n; for (j = 1; j <= i__1; ++j) { cj = c__[j]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = cj * r__[i__] * a[i__ + j * a_dim1]; /* L50: */ } /* L60: */ } *(unsigned char *)equed = 'B'; } return 0; /* End of SLAQGE */ } /* slaqge_ */
the_stack_data/45450789.c
/* * Copyright (c) 2003 Gunnar Ritter * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute * it freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * * 3. This notice may not be removed or altered from any source distribution. */ /* Sccsid @(#)memalign.c 1.7 (gritter) 1/22/06 */ #if defined (__FreeBSD__) || defined (__dietlibc__) || defined (_AIX) || \ defined (__NetBSD__) || defined (__OpenBSD__) || \ defined (__DragonFly__) || defined (__APPLE__) /* * FreeBSD malloc(3) promises to page-align the return of malloc() calls * if size is at least a page. This serves for a poor man's memalign() * implementation that matches our needs. */ #include <unistd.h> #include <stdlib.h> #include "memalign.h" void * memalign(size_t alignment, size_t size) { static long pagesize; if (pagesize == 0) pagesize = sysconf(_SC_PAGESIZE); if (alignment != pagesize) return NULL; if (size < pagesize) size = pagesize; return malloc(size); } #endif /* __FreeBSD__ || __dietlibc__ || _AIX || __NetBSD__ || __OpenBSD__ || __DragonFly__ || __APPLE__ */
the_stack_data/175141956.c
#include<stdio.h> #include<omp.h> int main(){ int a[1000]; int b[1000]; int c[1000]; int i; // #pragma omp parallel for for(i=0;i<1000;i++){ a[i] = i; b[i] = 1; c[i] = a[i] + b[i]; } for(i=0;i<1000;i++){ printf("%d\n",c[i]); } return 0; }
the_stack_data/228596.c
//C Hello wiki #include <stdio.h> int main() { printf("Hello Hackers.wiki!\n"); return 0; }
the_stack_data/69200.c
// RUN: %clang_cc1 -E -dM -triple avr-unknown-unknown /dev/null | FileCheck -match-full-lines %s // CHECK: #define AVR 1 // CHECK: #define __AVR 1 // CHECK: #define __AVR__ 1 // CHECK: #define __ELF__ 1
the_stack_data/1150964.c
#include<stdio.h> #include<limits.h> #include<float.h> main (){ printf ("\nThe range of the value of *char* is: [%i to %i]\n\n", CHAR_MIN, CHAR_MAX ); printf ("The range of the value of *short* is: [%i to %i]\n\n", SHRT_MIN, SHRT_MAX); printf ("The range of the value of *int* is: [%i to %i]\n\n", INT_MIN, INT_MAX); printf ("The range of the value of *long* is: [%i to %i]\n\n", LONG_MIN, LONG_MAX); printf ("The range of the value of *signed char* is: [%i to %i]\n\n", SCHAR_MIN, SCHAR_MAX); printf ("The maximum of the value of *unsigned char* is: [%i]\n\n", UCHAR_MAX); printf ("The maximum of the value of *unsigned short* is: [%i]\n\n", USHRT_MAX); printf ("The maximum of the value of *unsigned int* is: [%d]\n\n", 65536, UINT_MAX); printf ("The maximum of the value of *unsigned long* is: [%u]\n", ULONG_MAX); }
the_stack_data/61075488.c
#include <stdio.h> #include <stdlib.h> struct node { int data; struct node * next; }; struct node * head; main() { head = NULL; // add at begining struct node newNode; newNode.data = 100; newNode.next = head; head = &newNode; // add at begining struct node newNode2; newNode2.data = 200; newNode2.next = head; head = &newNode2; // add at begining struct node newNode3; newNode3.data = 300; newNode3.next = head; head = &newNode3; // add at begining struct node newNode4; newNode4.data = 400; newNode4.next = head; head = &newNode4; // add at begining struct node newNode5; newNode5.data = 500; newNode5.next = head; head = &newNode5; int i; struct node * cur; for(cur= head; cur != NULL; cur= cur->next) printf("%d ", cur->data); printf("\n"); }
the_stack_data/111078581.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <sys/stat.h> #include <fcntl.h> int main() { int fd = open("/proc/ibpb", 0); uint64_t t = 0; while (1) { int err = read(fd, &t, sizeof(t)); if (err != sizeof(t)) { perror("read"); exit(-1); } printf("%ld\n", t); } }
the_stack_data/175143405.c
/* $Id: vuxml.c,v 1.1 2014/03/05 21:15:11 weerd Exp $ */ /* * Copyright (c) 2004 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ static const char rcsid[] = "$Id: vuxml.c,v 1.1 2014/03/05 21:15:11 weerd Exp $"; #include <ctype.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> struct tag { unsigned char *name; unsigned char *options; struct tag *parent; struct tag *head; struct tag *next; struct tag *tail; }; struct tag *root = NULL, *parent = NULL; static void traverse(struct tag *t) { static char vid[128], date[128], topic[128]; static int state = 0; struct tag *u; switch (state) { case 0: if (!strcmp(t->name, "VULN") && !strncmp(t->options, "vid=\"", 5)) { strlcpy(vid, t->options + 5, sizeof(vid)); if (strlen(vid) > 0 && vid[strlen(vid) - 1] == '\"') vid[strlen(vid) - 1] = 0; state = 1; } break; case 1: if (!strcmp(t->name, "TOPIC")) state = 2; break; case 2: strlcpy(topic, t->options, sizeof(topic)); state = 3; break; case 3: if (!strcmp(t->name, "DATES")) state = 4; break; case 4: if (!strcmp(t->name, "ENTRY")) state = 5; break; case 5: strlcpy(date, t->options, sizeof(date)); printf("%s %s %s\n", date, vid, topic); state = 0; break; } for (u = t->head; u; u = u->next) traverse(u); } static unsigned char get(FILE *f) { static unsigned char buf[8192]; static unsigned high = 0, next = 0; if (next >= high) { next = 0; high = fread(buf, 1, sizeof(buf), f); if (high < 0) { perror("fread() failed"); exit(1); } else if (high == 0) return (0); } return buf[next++]; } static void opentag(FILE *f, unsigned char c) { unsigned char name[65535]; unsigned char opts[65535]; unsigned i; struct tag *tag; while (c == ' ' || c == '\t' || c == '\n') c = get(f); if (!c || c == '>') return; for (i = 0; c && c != ' ' && c != '>'; c = get(f)) name[i++] = toupper(c); name[i] = 0; i = 0; if (c == ' ') c = get(f); for (i = 0; c && c != '>'; c = get(f)) { if (c == '\t' || c == '\n') c = ' '; opts[i++] = c; } opts[i] = 0; tag = (struct tag *)malloc(sizeof(*tag)); if (tag == NULL) { perror("malloc() failed"); exit(1); } tag->name = strdup(name); if (tag->name == NULL) { perror("strdup() failed"); exit(1); } tag->options = strdup(opts); if (tag->options == NULL) { perror("strdup() failed"); exit(1); } tag->parent = parent; tag->head = tag->next = tag->tail = NULL; if (parent->tail == NULL) parent->head = parent->tail = tag; else { parent->tail->next = tag; parent->tail = tag; } if (strcmp(name, "BR") && strcmp(name, "P")) parent = tag; } static void closetag(FILE *f, unsigned char c) { unsigned char name[65535]; unsigned i; struct tag *old_parent; while (c == ' ' || c == '\t' || c == '\n') c = get(f); if (!c || c == '>') return; for (i = 0; c && c != '>'; c = get(f)) name[i++] = toupper(c); name[i] = 0; if (!strcmp(name, "BR") || !strcmp(name, "P")) return; old_parent = parent; while (parent != NULL && parent->parent != NULL && strcmp(name, parent->name)) parent = parent->parent; if (parent == NULL || parent->parent == NULL) { fprintf(stderr, "stack underflow (%s)\n", name); parent = old_parent; } else if (strcmp(name, parent->name)) fprintf(stderr, "non-matching closing tag (%s)\n", name); else parent = parent->parent; } static int empty(const char *s) { while (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r') s++; return (!*s); } static void texttag(FILE *f, unsigned char c) { unsigned char text[65535]; unsigned i; struct tag *tag; for (i = 0; c && c != '<'; c = get(f)) text[i++] = c; text[i] = 0; if (empty(text)) return; tag = (struct tag *)malloc(sizeof(*tag)); if (tag == NULL) { perror("malloc() failed"); exit(1); } tag->name = strdup(""); if (tag->name == NULL) { perror("strdup() failed"); exit(1); } tag->options = strdup(text); if (tag->options == NULL) { perror("strdup() failed"); exit(1); } tag->parent = parent; tag->head = tag->next = tag->tail = NULL; if (parent->tail == NULL) parent->head = parent->tail = tag; else { parent->tail->next = tag; parent->tail = tag; } } static struct tag * find_tag(struct tag *t, const char *name) { struct tag *h; if (!strcmp(t->name, name)) return (t); for (t = t->head; t; t = t->next) { h = find_tag(t, name); if (h) return (h); } return (NULL); } static void read_file(const char *filename) { FILE *f; unsigned char c; if (!strcmp(filename, "-")) f = stdin; else f = fopen(filename, "rb"); if (f == NULL) { fprintf(stderr, "fopen: %s: %s\n", filename, strerror(errno)); return; } root = malloc(sizeof(*root)); if (root == NULL) { fprintf(stderr, "malloc: %s\n", strerror(errno)); fclose(f); return; } root->name = strdup("root"); root->options = strdup(""); root->parent = NULL; root->head = root->next = root->tail = NULL; parent = root; while ((c = get(f))) { if (c != '<') texttag(f, c); if (!c) break; /* c == '<' */ c = get(f); if (c == '!' || c == '?') { while ((c = get(f)) != '>') ; } else if (c == '/') { c = get(f); closetag(f, c); } else opentag(f, c); } if (ferror(f)) { fprintf(stderr, "ferror: %s\n", strerror(errno)); free(root); root = NULL; } fclose(f); } int main(int argc, char *argv[]) { if (argc != 2) { fprintf(stderr, "%s <file>\n", argv[0]); return (1); } read_file(argv[1]); if (root == NULL) { fprintf(stderr, "read_file() failed, aborting\n"); return (1); } if (root->head) traverse(root->head); return (0); }
the_stack_data/237643402.c
/* Copyright 2014 Google LLC All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* american fuzzy lop - a trivial program to test the build -------------------------------------------------------- Written and maintained by Niki Gitinabard <[email protected]> */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> int main(int argc, char** argv) { // B1 char buf[128]; char* filename; filename = argv[1]; FILE *fp; /* opening file for reading */ fp = fopen(filename , "r"); int i = 0; int file_status = 1; if (fp != NULL) { // B2 i++; while (fgets(buf, 128, fp) != NULL) { // B3 i *= 2; if(strlen(buf) < 1) { //B4 printf("Hum?\n"); exit(1); } //B5 i *= 3; // if and else merged B6 set file status, B7 print if (buf[0] == '0') { // B6 file_status = 45; printf("Looks like a zero to me!\n"); } else { file_status = i; printf("A non-zero value? How quaint!\n"); } i += 5; } // B7 fclose(fp); } else { // B8 file_status = i; exit(2); } int j; for (j=0; j < 5000; j++) { // B9 i = j; } // B10 printf("%d\n", i); printf("%d\n", file_status); exit(0); }
the_stack_data/89746.c
#include <stdio.h> int max_c(int a0, int a1); int max_s(int a0, int a1); int main(int argc, char **argv) { int r; r = max_c(2, 4); printf("max_c(2, 4) = %d\n", r); r = max_s(2, 4); printf("max_s(2, 4) = %d\n", r); return 0; }
the_stack_data/61074700.c
/* * Copyright (c) 2017, 2018, Oracle and/or its affiliates. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials provided * with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used to * endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ char main(void) { return 42; }
the_stack_data/68188.c
// Write a C program for Upper triangular matrix #include<stdio.h> int main() { int n; printf("Enter no of rows of square matrix\n"); scanf("%d",&n); int a[n][n]; printf("Enter %d elements\n",n*n); for(int i=0;i<n;i++) for(int j=0;j<n;j++) scanf("%d",&a[i][j]); printf("Entered matrix is\n"); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) printf("%d\t",a[i][j]); printf("\n"); } printf("Upper triangular matrix is\n"); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) if(i<=j) printf("%d\t",a[i][j]); else printf("0\t"); printf("\n"); } } /*Output: Enter no of rows of square matrix 3 Enter 9 elements 1 2 3 4 5 6 7 8 9 Entered matrix is 1 2 3 4 5 6 7 8 9 Upper triangular matrix is 1 2 3 0 5 6 0 0 9*/
the_stack_data/167330495.c
/* Matrix Vector multiplication with copying out of bounds memory on the Accelerator. Vector a allocates more space than need, doesn't affect the result of the computation. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target map(to:a[0:C*C],b[0:C*C]) map(tofrom:c[0:C]) device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
the_stack_data/130871.c
int main() { float a, b; __ESBMC_assume(a==1 || a==0.5 || a==2 || a==3 || a==0.1); b=a; a/=2; a*=2; __ESBMC_assert(a==b, "float div works"); }
the_stack_data/97582.c
#include <assert.h> #include <stdlib.h> /* This is a different implementation for * Set. Since we don't want to copy all * of the files over, we separate this * different implementation into its own * folder, and then can compile from it * as needed, in the case where we use * this impelementation as opposed to the * one made before */ /* struct set and set are two differen things. * struct set is in the struct namespace, * and refers to the actual iplementation, * while set is a global id with type void *, * and is a type. Users can use this type if * they decide to include the header. */ struct set { unsigned count; }; struct object { unsigned count; struct set * in; }; /* We need these so that the new fn knows * how much memory to allocate for the * object based on its type */ static const size_t set_size = sizeof(struct set); static const size_t object_size = sizeof(struct object); const void * set = &set_size; const void * object = &object_size; /* NEW_H implementation */ /* Now, new is different, it allocates right * amount of memory. Of course, it has to * cast the void ptr to the right type to * do this, but this is fine, because we * know what the void ptr actually points to. */ void * new (const void * type, ...) { const size_t size = *(const size_t *)type; void * p = calloc(1, size); assert(p); return p; } void delete(void * item) { free(item); } /* SET_H implementation */ void * add(void * set_arg, const void * element_arg) { /* since we're not using the set adt type, * we can use the local variable set. But, * this level of overloading is getting bad - * we already have a struct set, a global * id set, and now a local variable set. * We'll strive to instead use the first * letter of the type for local id's, if * the id conflicts with the global type name */ struct set * s = set_arg; struct object * element = (void *)element_arg; assert(s); assert(element); if(!element -> in ) { element -> in = s; } else { assert(element -> in == s); } element->count++; s->count++; return element; } void * find (const void * set_arg, const void * element_arg) { const struct object * element = element_arg; assert(set_arg); assert(element); return element->in == set_arg ? (void *) element : 0; } void * drop(void * set_arg, const void * element_arg) { struct set * s = set_arg; struct object * element = find(s, element_arg); if(element) { element->count--; if(element->count == 0) { element->in = 0; } s->count--; } return element; } /* A new addition, the count function that lets * a client know how many elements are in a set. */ unsigned count(const void * set_arg) { const struct set * s = set_arg; assert(s); return s->count; } /* Copied over from the original set.c */ int differ(const void * a, const void * b) { return a != b; } /* Copied over from the origianl set.c */ int contains(const void * set_arg, const void * element_arg) { return find(set_arg, element_arg) != 0; }
the_stack_data/154827398.c
/* * @f pkt-ccnb.c * @b CCN lite - parse, create and manipulate CCNb formatted packets * * Copyright (C) 2011-15, Christian Tschudin, University of Basel * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * File history: * 2011-03-13 created (cft): orig name ccnl-parse-ccnb.c * 2014-03-17 renamed to prepare for a world with many wire formats * 2014-03-20 extracted from ccnl-core.c * 2014-11-05 merged from pkt-ccnb-dec.c and pkt-ccnb-enc.c */ #ifdef USE_SUITE_CCNB #include "ccnl-pkt-ccnb.h" int ccnl_ccnb_consume(int typ, int num, unsigned char **buf, int *len, unsigned char **valptr, int *vallen); // ---------------------------------------------------------------------- // ccnb parsing support int ccnl_ccnb_dehead(unsigned char **buf, int *len, int *num, int *typ) { int i; int val = 0; if (*len > 0 && **buf == 0) { // end *num = *typ = 0; *buf += 1; *len -= 1; return 0; } for (i = 0; (unsigned int) i < sizeof(i) && i < *len; i++) { unsigned char c = (*buf)[i]; if ( c & 0x80 ) { *num = (val << 4) | ((c >> 3) & 0xf); *typ = c & 0x7; *buf += i+1; *len -= i+1; return 0; } val = (val << 7) | c; } return -1; } static int ccnl_ccnb_hunt_for_end(unsigned char **buf, int *len, unsigned char **valptr, int *vallen) { int typ, num; while (ccnl_ccnb_dehead(buf, len, &num, &typ) == 0) { if (num==0 && typ==0) return 0; if (ccnl_ccnb_consume(typ, num, buf, len, valptr, vallen) < 0) return -1; } return -1; } int ccnl_ccnb_consume(int typ, int num, unsigned char **buf, int *len, unsigned char **valptr, int *vallen) { if (typ == CCN_TT_BLOB || typ == CCN_TT_UDATA) { if (valptr) *valptr = *buf; if (vallen) *vallen = num; *buf += num, *len -= num; return 0; } if (typ == CCN_TT_DTAG || typ == CCN_TT_DATTR) return ccnl_ccnb_hunt_for_end(buf, len, valptr, vallen); // case CCN_TT_TAG, CCN_TT_ATTR: // case DTAG, DATTR: return -1; } int ccnl_ccnb_data2uint(unsigned char *cp, int len) { int i, val; for (i = 0, val = 0; i < len; i++) if (isdigit(cp[i])) val = 10*val + cp[i] - '0'; else return -1; return val; } struct ccnl_pkt_s* ccnl_ccnb_bytes2pkt(unsigned char *start, unsigned char **data, int *datalen) { struct ccnl_pkt_s *pkt; unsigned char *cp; int num, typ, len, oldpos; struct ccnl_prefix_s *p; DEBUGMSG(TRACE, "ccnl_ccnb_extract\n"); //pkt = (struct ccnl_pkt_s *) ccnl_calloc(1, sizeof(*pkt)); pkt = (struct ccnl_pkt_s *) ccnl_calloc(1, sizeof(*pkt)); if (!pkt) return NULL; pkt->suite = CCNL_SUITE_CCNB; pkt->val.final_block_id = -1; pkt->s.ccnb.scope = 3; pkt->s.ccnb.aok = 3; pkt->s.ccnb.maxsuffix = CCNL_MAX_NAME_COMP; pkt->pfx = p = ccnl_prefix_new(CCNL_SUITE_CCNB, CCNL_MAX_NAME_COMP); if (!p) { ccnl_free(pkt); return NULL; } p->compcnt = 0; oldpos = *data - start; while (ccnl_ccnb_dehead(data, datalen, &num, &typ) == 0) { if (num==0 && typ==0) // end break; if (typ == CCN_TT_DTAG) { switch (num) { case CCN_DTAG_NAME: p->nameptr = start + oldpos; for (;;) { if (ccnl_ccnb_dehead(data, datalen, &num, &typ) != 0) goto Bail; if (num==0 && typ==0) break; if (typ == CCN_TT_DTAG && num == CCN_DTAG_COMPONENT && p->compcnt < CCNL_MAX_NAME_COMP) { if (ccnl_ccnb_hunt_for_end(data, datalen, p->comp + p->compcnt, p->complen + p->compcnt) < 0) goto Bail; p->compcnt++; } else { if (ccnl_ccnb_consume(typ, num, data, datalen, 0, 0) < 0) goto Bail; } } p->namelen = *data - p->nameptr; #ifdef USE_NFN if (p->compcnt > 0 && p->complen[p->compcnt-1] == 3 && !memcmp(p->comp[p->compcnt-1], "NFN", 3)) { p->nfnflags |= CCNL_PREFIX_NFN; p->compcnt--; } #ifdef USE_TIMEOUT_KEEPALIVE if (p->compcnt > 0 && p->complen[p->compcnt-1] == 5 && !memcmp(p->comp[p->compcnt-1], "ALIVE", 5)) { p->nfnflags |= CCNL_PREFIX_KEEPALIVE; p->compcnt--; } if (p->compcnt > 1 && p->complen[p->compcnt-2] == 12 && !memcmp(p->comp[p->compcnt-2], "INTERMEDIATE", 12)) { p->nfnflags |= CCNL_PREFIX_INTERMEDIATE; p->internum = ccnl_cmp2int(p->comp[p->compcnt-1], p->complen[p->compcnt-1]); p->compcnt -= 2; } #endif #endif break; case CCN_DTAG_CONTENT: if (ccnl_ccnb_consume(typ, num, data, datalen, &pkt->content, &pkt->contlen) < 0) goto Bail; oldpos = *data - start; continue; case CCN_DTAG_SCOPE: case CCN_DTAG_ANSWERORIGKIND: case CCN_DTAG_MINSUFFCOMP: case CCN_DTAG_MAXSUFFCOMP: case CCN_DTAG_NONCE: case CCN_DTAG_PUBPUBKDIGEST: if (ccnl_ccnb_hunt_for_end(data, datalen, &cp, &len) < 0) goto Bail; switch (num) { case CCN_DTAG_SCOPE: if (len == 1) pkt->s.ccnb.scope = isdigit(*cp) && (*cp < '3') ? *cp - '0' : -1; break; case CCN_DTAG_ANSWERORIGKIND: pkt->s.ccnb.aok = ccnl_ccnb_data2uint(cp, len); break; case CCN_DTAG_MINSUFFCOMP: pkt->s.ccnb.minsuffix = ccnl_ccnb_data2uint(cp, len); break; case CCN_DTAG_MAXSUFFCOMP: pkt->s.ccnb.maxsuffix = ccnl_ccnb_data2uint(cp, len); break; case CCN_DTAG_NONCE: if (!pkt->s.ccnb.nonce) pkt->s.ccnb.nonce = ccnl_buf_new(cp, len); break; case CCN_DTAG_PUBPUBKDIGEST: if (!pkt->s.ccnb.ppkd) pkt->s.ccnb.ppkd = ccnl_buf_new(cp, len); break; case CCN_DTAG_EXCLUDE: DEBUGMSG(WARNING, "ccnb 'exclude' field ignored\n"); break; } break; default: if (ccnl_ccnb_hunt_for_end(data, datalen, &cp, &len) < 0) goto Bail; } oldpos = *data - start; continue; } if (ccnl_ccnb_consume(typ, num, data, datalen, 0, 0) < 0) goto Bail; oldpos = *data - start; } pkt->pfx = p; pkt->buf = ccnl_buf_new(start, *data - start); // carefully rebase ptrs to new buf because of 64bit pointers: if (pkt->content) pkt->content = pkt->buf->data + (pkt->content - start); for (num = 0; num < p->compcnt; num++) p->comp[num] = pkt->buf->data + (p->comp[num] - start); if (p->nameptr) p->nameptr = pkt->buf->data + (p->nameptr - start); return pkt; Bail: free_packet(pkt); return NULL; } int ccnl_ccnb_unmkBinaryInt(unsigned char **data, int *datalen, unsigned int *result, unsigned char *width) { unsigned char *cp = *data; int len = *datalen, typ, num; unsigned int val = 0; if (ccnl_ccnb_dehead(&cp, &len, &num, &typ) != 0 || typ != CCN_TT_BLOB) return -1; if (width) { if (*width < num) num = *width; else *width = num; } // big endian (network order): while (num-- > 0 && len > 0) { val = (val << 8) | *cp++; len--; } *result = val; if (len < 1 || *cp != '\0') // no end-of-entry return -1; *data = cp+1; *datalen = len-1; return 0; } // ---------------------------------------------------------------------- #ifdef NEEDS_PREFIX_MATCHING // returns: 0=match, -1=otherwise int ccnl_ccnb_cMatch(struct ccnl_pkt_s *p, struct ccnl_content_s *c) { assert(p); assert(p->suite == CCNL_SUITE_CCNB); if (!ccnl_i_prefixof_c(p->pfx, p->s.ccnb.minsuffix, p->s.ccnb.maxsuffix, c)) return -1; if (p->s.ccnb.ppkd && !buf_equal(p->s.ccnb.ppkd, c->pkt->s.ccnb.ppkd)) return -1; // FIXME: should check stale bit in aok here return 0; } #endif // ---------------------------------------------------------------------- // ccnb encoding support #ifdef NEEDS_PACKET_CRAFTING int ccnl_ccnb_mkHeader(unsigned char *buf, unsigned int num, unsigned int tt) { unsigned char tmp[100]; int len = 0, i; *tmp = 0x80 | ((num & 0x0f) << 3) | tt; len = 1; num = num >> 4; while (num > 0) { tmp[len++] = num & 0x7f; num = num >> 7; } for (i = len-1; i >= 0; i--) *buf++ = tmp[i]; return len; } int ccnl_ccnb_addBlob(unsigned char *out, char *cp, int cnt) { int len; len = ccnl_ccnb_mkHeader(out, cnt, CCN_TT_BLOB); memcpy(out+len, cp, cnt); len += cnt; return len; } int ccnl_ccnb_mkField(unsigned char *out, unsigned int num, int typ, unsigned char *data, int datalen) { int len; len = ccnl_ccnb_mkHeader(out, num, CCN_TT_DTAG); len += ccnl_ccnb_mkHeader(out + len, datalen, typ); memcpy(out + len, data, datalen); len += datalen; out[len++] = 0; // end-of-field return len; } int ccnl_ccnb_mkBlob(unsigned char *out, unsigned int num, unsigned int tt, char *cp, int cnt) { (void) tt; return ccnl_ccnb_mkField(out, num, CCN_TT_BLOB, (unsigned char*) cp, cnt); } int ccnl_ccnb_mkStrBlob(unsigned char *out, unsigned int num, unsigned int tt, char *str) { (void) tt; return ccnl_ccnb_mkField(out, num, CCN_TT_BLOB, (unsigned char*) str, strlen(str)); } int ccnl_ccnb_mkBinaryInt(unsigned char *out, unsigned int num, unsigned int tt, unsigned int val, int bytes) { int len = ccnl_ccnb_mkHeader(out, num, tt); if (!bytes) { for (bytes = sizeof(val) - 1; bytes > 0; bytes--) if (val >> (8*bytes)) break; bytes++; } len += ccnl_ccnb_mkHeader(out+len, bytes, CCN_TT_BLOB); while (bytes > 0) { // big endian bytes--; out[len++] = 0x0ff & (val >> (8*bytes)); } out[len++] = 0; // end-of-entry return len; } int ccnl_ccnb_mkComponent(unsigned char *val, int vallen, unsigned char *out) { int len; len = ccnl_ccnb_mkHeader(out, CCN_DTAG_COMPONENT, CCN_TT_DTAG); // comp len += ccnl_ccnb_mkHeader(out+len, vallen, CCN_TT_BLOB); memcpy(out+len, val, vallen); len += vallen; out[len++] = 0; // end-of-component return len; } int ccnl_ccnb_mkName(struct ccnl_prefix_s *name, unsigned char *out) { int len, i; len = ccnl_ccnb_mkHeader(out, CCN_DTAG_NAME, CCN_TT_DTAG); // name for (i = 0; i < name->compcnt; i++) { len += ccnl_ccnb_mkComponent(name->comp[i], name->complen[i], out+len); } #ifdef USE_NFN if (name->nfnflags & CCNL_PREFIX_NFN) { len += ccnl_ccnb_mkComponent((unsigned char*) "NFN", 3, out+len); } #endif out[len++] = 0; // end-of-name return len; } // ---------------------------------------------------------------------- int ccnl_ccnb_fillInterest(struct ccnl_prefix_s *name, int *nonce, unsigned char *out, int outlen) { (void) outlen; int len = 0; len = ccnl_ccnb_mkHeader(out, CCN_DTAG_INTEREST, CCN_TT_DTAG); // interest len += ccnl_ccnb_mkName(name, out+len); if (nonce) { len += ccnl_ccnb_mkHeader(out+len, CCN_DTAG_NONCE, CCN_TT_DTAG); len += ccnl_ccnb_mkHeader(out+len, sizeof(unsigned int), CCN_TT_BLOB); memcpy(out+len, (void*)nonce, sizeof(unsigned int)); len += sizeof(unsigned int); } out[len++] = 0; // end-of-interest return len; } int ccnl_ccnb_fillContent(struct ccnl_prefix_s *name, unsigned char *data, int datalen, int *contentpos, unsigned char *out) { int len = 0; len = ccnl_ccnb_mkHeader(out, CCN_DTAG_CONTENTOBJ, CCN_TT_DTAG); len += ccnl_ccnb_mkName(name, out+len); len += ccnl_ccnb_mkHeader(out+len, CCN_DTAG_CONTENT, CCN_TT_DTAG); len += ccnl_ccnb_mkHeader(out+len, datalen, CCN_TT_BLOB); if (contentpos) *contentpos = len; memcpy(out+len, data, datalen); if (contentpos) *contentpos = len; len += datalen; out[len++] = 0; // end-of-content out[len++] = 0; // end-of-content obj return len; } #endif // NEEDS_PACKET_CRAFTING #endif // USE_SUITE_CCNB // eof
the_stack_data/8685.c
#include<stdio.h> #include<stdlib.h> struct po{ int asd,ttm; struct po*p; }; typedef struct po pontero; void inserir (int val,int mrm,pontero*cha){ pontero*neww; neww=malloc(sizeof(pontero)); neww->asd=val; neww->ttm=mrm; neww->p=cha->p; cha->p=neww; } void remover (pontero*cha){ if(cha->p==NULL){ printf("lista vazia"); }else if(cha->p->p==NULL){ pontero*morto; morto=cha->p; cha->p=morto->p; free(morto); }else{ remover(cha->p); } } void mostrar (pontero*cha){ if(cha->p!=NULL){ printf("valores armazenados:%i %i \n",cha->p->asd,cha->p->ttm); mostrar(cha->p); } } main(){ pontero*cha; cha=malloc(sizeof(pontero)); cha->p=NULL; int val,mrm; }
the_stack_data/50782.c
#ifdef __APPLE__ #include "frida-gumjs.h" #include "debug.h" #include "lib.h" #include "util.h" extern mach_port_t mach_task_self(); extern void gum_darwin_enumerate_modules(mach_port_t task, GumFoundModuleFunc func, gpointer user_data); static guint64 text_base = 0; static guint64 text_limit = 0; static gboolean lib_get_main_module(const GumModuleDetails *details, gpointer user_data) { GumDarwinModule **ret = (GumDarwinModule **)user_data; GumDarwinModule * module = gum_darwin_module_new_from_memory( details->path, mach_task_self(), details->range->base_address, GUM_DARWIN_MODULE_FLAGS_NONE, NULL); OKF("Found main module: %s", module->name); *ret = module; return FALSE; } gboolean lib_get_text_section(const GumDarwinSectionDetails *details, gpointer user_data) { UNUSED_PARAMETER(user_data); static size_t idx = 0; char text_name[] = "__text"; OKF("Section: %2lu - base: 0x%016" G_GINT64_MODIFIER "X size: 0x%016" G_GINT64_MODIFIER "X %s", idx++, details->vm_address, details->vm_address + details->size, details->section_name); if (memcmp(details->section_name, text_name, sizeof(text_name)) == 0 && text_base == 0) { text_base = details->vm_address; text_limit = details->vm_address + details->size; OKF("> text_addr: 0x%016" G_GINT64_MODIFIER "X", text_base); OKF("> text_limit: 0x%016" G_GINT64_MODIFIER "X", text_limit); } return TRUE; } void lib_config(void) { } void lib_init(void) { GumDarwinModule *module = NULL; gum_darwin_enumerate_modules(mach_task_self(), lib_get_main_module, &module); gum_darwin_module_enumerate_sections(module, lib_get_text_section, NULL); } guint64 lib_get_text_base(void) { if (text_base == 0) FATAL("Lib not initialized"); return text_base; } guint64 lib_get_text_limit(void) { if (text_limit == 0) FATAL("Lib not initialized"); return text_limit; } #endif
the_stack_data/158233.c
/* * eChronos Real-Time Operating System * Copyright (c) 2017, Commonwealth Scientific and Industrial Research * Organisation (CSIRO) ABN 41 687 119 230. * * All rights reserved. CSIRO is willing to grant you a licence to the eChronos * real-time operating system under the terms of the CSIRO_BSD_MIT license. See * the file "LICENSE_CSIRO_BSD_MIT.txt" for details. * * @TAG(CSIRO_BSD_MIT) */ /*<module> <code_gen>template</code_gen> </module>*/ #include <stdio.h> int main(void) { puts("{{phrase}}"); return 0; }
the_stack_data/170453067.c
#include<stdio.h> void fun() { printf("lmoaaoo"); }
the_stack_data/118889211.c
//70分//Clear vers /* Week 16 Question 1 Description 數迴是一個我非常喜歡的益智遊戲,數迴詳細的規則請參考以下的網站 https://zh.puzzle-loop.com/ 這個學期的最後一個作業請各位同學寫一個程式來解數迴 如果能夠設計一個資料結構用來儲存這個遊戲,並解釋你打算如何用這樣的結構嘗試解決遊戲,那你可以拿到70分 如果能夠寫一個程式用你設計的資料結構,自行設計輸入輸出格式,嘗試解開5 X 5的普通難度但是沒有成功,那你可以拿到80分 如果能夠寫一個程式用你設計的資料結構,自行設計輸入輸出格式,解開5 X 5的普通難度,那你可以拿到95分 如果能夠寫一個程式用你設計的資料結構,自行設計輸入輸出格式,解開5 X 5的困難難度,那你可以拿到100分 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define wall 9 #define dot 8 void main() { char maps[13][13]; for (int i = 0; i < 13; i++) for (int j = 0; j < 13; j++) maps[i][j] = 32; for (int i = 0; i < 13; i++) { maps[0][i] = wall; maps[i][0] = wall; maps[i][12] = wall; maps[12][i] = wall; } for (int i = 1; i <= 11; i += 2) { for (int j = 1; j <= 11; j += 2) maps[i][j] = dot; } char tmp[5]; for (int i = 1; i <= 5; i++) { gets(tmp); maps[2 * i][2] = tmp[0]; //printf("%c\n", tmp[0]); //printf("maps:%c\n", maps[2 * i][2]); maps[2 * i][4] = tmp[1]; maps[2 * i][6] = tmp[2]; maps[2 * i][8] = tmp[3]; maps[2 * i][10] = tmp[4]; } printf("\n=================tmp=======================================\n"); for (int i = 0; i < 13; i++) { for (int j = 0; j < 13; j++) { if (maps[i][j] == 32) printf("+"); else if (maps[i][j] == wall) printf("#"); else if (maps[i][j] == dot) printf("."); else if (maps[i][j] >= 48 && maps[i][j] <= 57) printf("%c", maps[i][j]); else printf("!"); } printf("\n"); } printf("\n========================================================\n"); }
the_stack_data/211079784.c
#include <stdio.h> // for printf function // Function prototypes void mergesort(int[], int, int); void merge(int[], int, int, int); int main() { int arr_size = 6; int arr[6] = {10, 9, 8, 7, 6, 5}; mergesort(arr, 0, arr_size); // Print sorted array for (int i = 0; i < arr_size; i++) { printf("%d ", arr[i]); } printf("\n"); return 0; } /* * Recursively split the array and call the merge function * Main entry point of Merge Sort */ void mergesort(int arr[], int l, int r) { if(l >= r) return; int m = (l + r) / 2; mergesort(arr, l, m); mergesort(arr, m+1, r); merge(arr, l, m, r); } // Merges the arrays in ascending order of elements void merge(int arr[], int l, int m, int r) { int i, j, k; int n1 = m - l + 1; int n2 = r - m; int L[n1], M[n2]; for(i = 0; i < n1; ++i) L[i] = arr[l+i]; for(j = 0; j < n2; ++j) M[j] = arr[m + 1 + j]; i = 0; j = 0; k = l; while(i < n1 && j < n2) { if(L[i] <= M[j]) { arr[k] = L[i]; ++i; } else { arr[k] = M[j]; ++j; } ++k; } while(i < n1) { arr[k] = L[i]; ++i; ++k; } while(j < n2) { arr[k] = M[j]; ++j; ++k; } }
the_stack_data/11918.c
#include <stdio.h> int main() { for(int a; scanf("%d", &a) != EOF; printf("%d\n", a)) ; return 0; }
the_stack_data/330731.c
/** * @file rand.c * @provides srand, rand. * * $Id: rand.c 2020 2009-08-13 17:50:08Z mschul $ */ /* Embedded Xinu, Copyright (C) 2009. All rights reserved. */ static unsigned long randx = 1; /** * Sets the random seed. * @param x random seed */ void srand(unsigned long x) { randx = x; } /** * Generates a random long. * @return random long */ unsigned long rand(void) { return (((randx = randx * 1103515245 + 12345) >> 16) & 077777); }
the_stack_data/7693.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_iterative_factorial.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: anajmi <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2021/07/07 11:08:20 by anajmi #+# #+# */ /* Updated: 2021/07/07 12:16:45 by anajmi ### ########.fr */ /* */ /* ************************************************************************** */ int ft_iterative_factorial(int nb) { int fact; int i; fact = 1; if (nb == 0 || nb == 1) return (fact); else if (nb < 0) return (0); i = 1; while (i <= nb) { fact = fact * i; i++; } return (fact); }
the_stack_data/3263383.c
// // Created by jrx on 2020/5/11. // /* 信号量有关的操作: 1.定义 struct semaphore sem; 2.初始化 static inline void sema_init(struct semaphore *sem,int val) //val是信号量初始值,表示同时只能有val个进程访问共享资源(执行临界区代码) 3.获取信号量 //如果没有获取到信号量,会导致当前进程休眠,中断仍然无法唤醒,因此不能在中断上下文使用 extern void down(struct semaphore *sem) //与down类似,只是在休眠过程中可以被中断信号打断。成功获取信号量返回0. extern int _must_check down_interruptible(struct semphore *sem) //不管是否获取信号量,都会立即返回,成功返回0.不会休眠,可以用在中断上下文 extern int _must_check down_trylock(struct semaphore *sem) 4.释放信号量 extern void up(struct semaphore *sem) 使用: struct semaphore sem; sema_init(&sem,1); down(&sem); //临界区代码 ....... up(&sem); 互斥体(mutex) 1.定义 struct mutex my_mutex; 2.初始化 mutex_innit(&my_mutex) 3.获取信号量 void mutex_lock(struct mutex *lock); //上下这两个区别仅仅是阻塞时是否可被中断打断。上面这个会被打断 int mutex_lock_terruptible(struuct mutex *lock); int mutes_trylock(struct mutex *lock);//获取互斥体不会被阻塞 4.释放信号量 void mutex_unlock(struct mutex *lock); //完成量completion //完成量用于一个执行单元等待另一个执行单元完成某项工作。即在执行某段代码之前必须要执行另一段代码,就要用完成量 1.定义 struct completion my_completion; 2.初始化 init_completion(&my_completion) //步骤1、2可以合并:DECLARE_COMPLETION(my_completion) 3.等待完成量 //如果 //completion.done ==0时;就会通过阻塞的方式等待另一个completion完成。每调用一次wait_for_completion,done的值会-1 void wait_for_completion(struct completion *c) //上下两个的区别是,上面那个在等待时不能被中断打断 int wait_for_completion_interruptible(struct completion *c) 4.唤醒完成量 void completion(struct completion *c)//用于唤醒一个等待的执行单元,done会+1 void completion_all(struct completion *c)//用于唤醒所有等待统一完成量的执行单元,donw会变成int的最大值,即2147483647 */
the_stack_data/864637.c
#include <stdio.h> #include <stdlib.h> #include <math.h> void mat2d_prod(double *c, int mc, int nc, double *a, int ma, int na, double *b, int mb, int nb) { // matrix production c=a*b int i, j, k; double aik, bkj, cij; for (i=0;i<mc;i++) for (j=0;j<nc;j++) { cij = 0.0; for (k=0;k<nb;k++) { aik = *((a+i*na)+k); bkj = *((b+k*nb)+j); cij = cij + aik*bkj; } *((c+i*nc)+j) = cij; } }
the_stack_data/243892128.c
/* { dg-do compile } */ /* { dg-options "-O -fwrapv -fdump-tree-fre-details" } */ /* LLVM LOCAL test not applicable */ /* { dg-require-fdump "" } */ /* From PR14844. */ int foo (int a, int b) { long long aa = a; long long bb = b; return aa + bb; } /* { dg-final { scan-tree-dump "Replaced \\\(int\\\) aa_.*with a_" "fre" } } */ /* { dg-final { cleanup-tree-dump "fre" } } */
the_stack_data/154829006.c
// KASAN: invalid-free in rsi_91x_deinit // https://syzkaller.appspot.com/bug?id=426fbebc1eac728afa08e52b1bcf8171c9413e29 // status:open // autogenerated by syzkaller (https://github.com/google/syzkaller) #define _GNU_SOURCE #include <endian.h> #include <errno.h> #include <fcntl.h> #include <stdarg.h> #include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mount.h> #include <sys/stat.h> #include <sys/syscall.h> #include <sys/types.h> #include <unistd.h> #include <linux/usb/ch9.h> unsigned long long procid; static void sleep_ms(uint64_t ms) { usleep(ms * 1000); } #define USB_MAX_EP_NUM 32 struct usb_device_index { struct usb_device_descriptor* dev; struct usb_config_descriptor* config; unsigned config_length; struct usb_interface_descriptor* iface; struct usb_endpoint_descriptor* eps[USB_MAX_EP_NUM]; unsigned eps_num; }; static bool parse_usb_descriptor(char* buffer, size_t length, struct usb_device_index* index) { if (length < sizeof(*index->dev) + sizeof(*index->config) + sizeof(*index->iface)) return false; index->dev = (struct usb_device_descriptor*)buffer; index->config = (struct usb_config_descriptor*)(buffer + sizeof(*index->dev)); index->config_length = length - sizeof(*index->dev); index->iface = (struct usb_interface_descriptor*)(buffer + sizeof(*index->dev) + sizeof(*index->config)); index->eps_num = 0; size_t offset = 0; while (true) { if (offset == length) break; if (offset + 1 < length) break; uint8_t length = buffer[offset]; uint8_t type = buffer[offset + 1]; if (type == USB_DT_ENDPOINT) { index->eps[index->eps_num] = (struct usb_endpoint_descriptor*)(buffer + offset); index->eps_num++; } if (index->eps_num == USB_MAX_EP_NUM) break; offset += length; } return true; } enum usb_fuzzer_event_type { USB_FUZZER_EVENT_INVALID, USB_FUZZER_EVENT_CONNECT, USB_FUZZER_EVENT_DISCONNECT, USB_FUZZER_EVENT_SUSPEND, USB_FUZZER_EVENT_RESUME, USB_FUZZER_EVENT_CONTROL, }; struct usb_fuzzer_event { uint32_t type; uint32_t length; char data[0]; }; struct usb_fuzzer_init { uint64_t speed; const char* driver_name; const char* device_name; }; struct usb_fuzzer_ep_io { uint16_t ep; uint16_t flags; uint32_t length; char data[0]; }; #define USB_FUZZER_IOCTL_INIT _IOW('U', 0, struct usb_fuzzer_init) #define USB_FUZZER_IOCTL_RUN _IO('U', 1) #define USB_FUZZER_IOCTL_EP0_READ _IOWR('U', 2, struct usb_fuzzer_event) #define USB_FUZZER_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_fuzzer_ep_io) #define USB_FUZZER_IOCTL_EP_ENABLE _IOW('U', 4, struct usb_endpoint_descriptor) #define USB_FUZZER_IOCTL_EP_WRITE _IOW('U', 6, struct usb_fuzzer_ep_io) #define USB_FUZZER_IOCTL_CONFIGURE _IO('U', 8) #define USB_FUZZER_IOCTL_VBUS_DRAW _IOW('U', 9, uint32_t) int usb_fuzzer_open() { return open("/sys/kernel/debug/usb-fuzzer", O_RDWR); } int usb_fuzzer_init(int fd, uint32_t speed, const char* driver, const char* device) { struct usb_fuzzer_init arg; arg.speed = speed; arg.driver_name = driver; arg.device_name = device; return ioctl(fd, USB_FUZZER_IOCTL_INIT, &arg); } int usb_fuzzer_run(int fd) { return ioctl(fd, USB_FUZZER_IOCTL_RUN, 0); } int usb_fuzzer_ep0_read(int fd, struct usb_fuzzer_event* event) { return ioctl(fd, USB_FUZZER_IOCTL_EP0_READ, event); } int usb_fuzzer_ep0_write(int fd, struct usb_fuzzer_ep_io* io) { return ioctl(fd, USB_FUZZER_IOCTL_EP0_WRITE, io); } int usb_fuzzer_ep_write(int fd, struct usb_fuzzer_ep_io* io) { return ioctl(fd, USB_FUZZER_IOCTL_EP_WRITE, io); } int usb_fuzzer_ep_enable(int fd, struct usb_endpoint_descriptor* desc) { return ioctl(fd, USB_FUZZER_IOCTL_EP_ENABLE, desc); } int usb_fuzzer_configure(int fd) { return ioctl(fd, USB_FUZZER_IOCTL_CONFIGURE, 0); } int usb_fuzzer_vbus_draw(int fd, uint32_t power) { return ioctl(fd, USB_FUZZER_IOCTL_VBUS_DRAW, power); } #define USB_MAX_PACKET_SIZE 1024 struct usb_fuzzer_control_event { struct usb_fuzzer_event inner; struct usb_ctrlrequest ctrl; }; struct usb_fuzzer_ep_io_data { struct usb_fuzzer_ep_io inner; char data[USB_MAX_PACKET_SIZE]; }; struct vusb_connect_string_descriptor { uint32_t len; char* str; } __attribute__((packed)); struct vusb_connect_descriptors { uint32_t qual_len; char* qual; uint32_t bos_len; char* bos; uint32_t strs_len; struct vusb_connect_string_descriptor strs[0]; } __attribute__((packed)); static volatile long syz_usb_connect(volatile long a0, volatile long a1, volatile long a2, volatile long a3) { int64_t speed = a0; int64_t dev_len = a1; char* dev = (char*)a2; struct vusb_connect_descriptors* conn_descs = (struct vusb_connect_descriptors*)a3; if (!dev) return -1; struct usb_device_index index; memset(&index, 0, sizeof(index)); int rv = parse_usb_descriptor(dev, dev_len, &index); if (!rv) return -1; int fd = usb_fuzzer_open(); if (fd < 0) return -1; char device[32]; sprintf(&device[0], "dummy_udc.%llu", procid); rv = usb_fuzzer_init(fd, speed, "dummy_udc", &device[0]); if (rv < 0) return -1; rv = usb_fuzzer_run(fd); if (rv < 0) return -1; bool done = false; while (!done) { char* response_data = NULL; uint32_t response_length = 0; unsigned ep; uint8_t str_idx; struct usb_fuzzer_control_event event; event.inner.type = 0; event.inner.length = sizeof(event.ctrl); rv = usb_fuzzer_ep0_read(fd, (struct usb_fuzzer_event*)&event); if (rv < 0) return -1; if (event.inner.type != USB_FUZZER_EVENT_CONTROL) continue; switch (event.ctrl.bRequestType & USB_TYPE_MASK) { case USB_TYPE_STANDARD: switch (event.ctrl.bRequest) { case USB_REQ_GET_DESCRIPTOR: switch (event.ctrl.wValue >> 8) { case USB_DT_DEVICE: response_data = (char*)index.dev; response_length = sizeof(*index.dev); goto reply; case USB_DT_CONFIG: response_data = (char*)index.config; response_length = index.config_length; goto reply; case USB_DT_STRING: str_idx = (uint8_t)event.ctrl.wValue; if (str_idx >= conn_descs->strs_len) goto reply; response_data = conn_descs->strs[str_idx].str; response_length = conn_descs->strs[str_idx].len; goto reply; case USB_DT_BOS: response_data = conn_descs->bos; response_length = conn_descs->bos_len; goto reply; case USB_DT_DEVICE_QUALIFIER: response_data = conn_descs->qual; response_length = conn_descs->qual_len; goto reply; default: exit(1); continue; } break; case USB_REQ_SET_CONFIGURATION: rv = usb_fuzzer_vbus_draw(fd, index.config->bMaxPower); if (rv < 0) return -1; rv = usb_fuzzer_configure(fd); if (rv < 0) return -1; for (ep = 0; ep < index.eps_num; ep++) { rv = usb_fuzzer_ep_enable(fd, index.eps[ep]); if (rv < 0) exit(1); } done = true; goto reply; default: exit(1); continue; } break; default: exit(1); continue; } struct usb_fuzzer_ep_io_data response; reply: response.inner.ep = 0; response.inner.flags = 0; if (response_length > sizeof(response.data)) response_length = 0; response.inner.length = response_length; if (response_data) memcpy(&response.data[0], response_data, response_length); if (event.ctrl.wLength < response.inner.length) response.inner.length = event.ctrl.wLength; usb_fuzzer_ep0_write(fd, (struct usb_fuzzer_ep_io*)&response); } sleep_ms(200); return fd; } int main(void) { syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0); *(uint8_t*)0x20000000 = 0x12; *(uint8_t*)0x20000001 = 1; *(uint16_t*)0x20000002 = 0; *(uint8_t*)0x20000004 = 0x6e; *(uint8_t*)0x20000005 = 0x4e; *(uint8_t*)0x20000006 = 0x10; *(uint8_t*)0x20000007 = 8; *(uint16_t*)0x20000008 = 0x1618; *(uint16_t*)0x2000000a = 0x9113; *(uint16_t*)0x2000000c = 0x3221; *(uint8_t*)0x2000000e = 0; *(uint8_t*)0x2000000f = 0; *(uint8_t*)0x20000010 = 0; *(uint8_t*)0x20000011 = 1; *(uint8_t*)0x20000012 = 9; *(uint8_t*)0x20000013 = 2; *(uint16_t*)0x20000014 = 0x40; *(uint8_t*)0x20000016 = 1; *(uint8_t*)0x20000017 = 0xfc; *(uint8_t*)0x20000018 = 0; *(uint8_t*)0x20000019 = 0x60; *(uint8_t*)0x2000001a = 0; *(uint8_t*)0x2000001b = 9; *(uint8_t*)0x2000001c = 4; *(uint8_t*)0x2000001d = 0x73; *(uint8_t*)0x2000001e = 0; *(uint8_t*)0x2000001f = 4; *(uint8_t*)0x20000020 = 0xf8; *(uint8_t*)0x20000021 = 0xfa; *(uint8_t*)0x20000022 = 0xb0; *(uint8_t*)0x20000023 = 0; *(uint8_t*)0x20000024 = 7; *(uint8_t*)0x20000025 = 5; *(uint8_t*)0x20000026 = 3; *(uint8_t*)0x20000027 = 0xa; *(uint16_t*)0x20000028 = 4; *(uint8_t*)0x2000002a = 2; *(uint8_t*)0x2000002b = 0x19; *(uint8_t*)0x2000002c = 1; *(uint8_t*)0x2000002d = 2; *(uint8_t*)0x2000002e = 2; *(uint8_t*)0x2000002f = 7; *(uint8_t*)0x20000030 = 5; *(uint8_t*)0x20000031 = 2; *(uint8_t*)0x20000032 = 3; *(uint16_t*)0x20000033 = 0; *(uint8_t*)0x20000035 = 0x7e; *(uint8_t*)0x20000036 = 3; *(uint8_t*)0x20000037 = 4; *(uint8_t*)0x20000038 = 7; *(uint8_t*)0x20000039 = 5; *(uint8_t*)0x2000003a = 5; *(uint8_t*)0x2000003b = 7; *(uint16_t*)0x2000003c = 0xff; *(uint8_t*)0x2000003e = 5; *(uint8_t*)0x2000003f = 1; *(uint8_t*)0x20000040 = 0xa3; *(uint8_t*)0x20000041 = 2; *(uint8_t*)0x20000042 = 0x23; *(uint8_t*)0x20000043 = 2; *(uint8_t*)0x20000044 = 0x2f; *(uint8_t*)0x20000045 = 7; *(uint8_t*)0x20000046 = 5; *(uint8_t*)0x20000047 = 9; *(uint8_t*)0x20000048 = 0x15; *(uint16_t*)0x20000049 = 0x6d; *(uint8_t*)0x2000004b = 7; *(uint8_t*)0x2000004c = 0xd; *(uint8_t*)0x2000004d = 0x20; *(uint8_t*)0x2000004e = 2; *(uint8_t*)0x2000004f = 0xf; *(uint8_t*)0x20000050 = 2; *(uint8_t*)0x20000051 = 0x13; syz_usb_connect(9, 0x52, 0x20000000, 0); return 0; }
the_stack_data/98594.c
#include <stdio.h> #include <stdlib.h> #include <time.h> struct book { int a; int b; }; int main(int argc, char const *argv[]) { struct book book; book.a = 0; book.b = 0; int a = 2; a = 3 + a - a*2; printf("%d\n", a); time_t now = time(NULL); printf("%ld\n", now); return 0; }
the_stack_data/529103.c
void __VERIFIER_assert(int cond) { if (!(cond)) { ERROR: goto ERROR; } return; } extern float __VERIFIER_nondet_float(); int main(void) { float a = __VERIFIER_nondet_float(); //interval from -infinity to infinity if (a < 0) { return 0; } // interval from 0 to infinity __VERIFIER_assert(a >= 1); return a; }
the_stack_data/148369.c
//===--- Adi.c ---- Alternating Direction Implicit ---------------*- C -*-===// // // This file implements the Alternating Direction Implicit(ADI) method which is // an iterative method used to solve partial differential equations. // //===----------------------------------------------------------------------===// #include <math.h> #include <stdio.h> #include <stdlib.h> #define MAX(A, b) ((A) > (b) ? (A) : (b)) #define NX 384 #define NY 384 #define NZ 384 void init(double (*A)[NY][NZ]); double iter(double (*A)[NY][NZ]); int main(int Argc, char *Argv[]) { double MaxEps, Eps; double(*A)[NY][NZ]; int It, ItMax, I, J, K; MaxEps = 0.01; ItMax = 100; A = (double(*)[NY][NZ])malloc(NX * NY * NZ * sizeof(double)); init(A); for (It = 1; It <= ItMax; It++) { Eps = iter(A); printf(" IT = %4i EPS = %14.3E\n", It, Eps); if (Eps < MaxEps) break; } free(A); printf(" ADI Benchmark Completed.\n"); printf(" Size = %4d x %4d x %4d\n", NX, NY, NZ); printf(" Iterations = %12d\n", ItMax); printf(" Operation type = double precision\n"); printf(" Verification = %12s\n", (fabs(Eps - 0.07249074) < 1e-6 ? "SUCCESSFUL" : "UNSUCCESSFUL")); printf(" END OF ADI Benchmark\n"); return 0; } void init(double (*A)[NY][NZ]) { int I, J, K; for (I = 0; I < NX; I++) for (J = 0; J < NY; J++) for (K = 0; K < NZ; K++) if (K == 0 || K == NZ - 1 || J == 0 || J == NY - 1 || I == 0 || I == NX - 1) A[I][J][K] = 10.0 * I / (NX - 1) + 10.0 * J / (NY - 1) + 10.0 * K / (NZ - 1); else A[I][J][K] = 0; } double iter(double(*A)[NY][NZ]) { int I, J, K; double Eps = 0; for (I = 1; I < NX - 1; I++) for (J = 1; J < NY - 1; J++) for (K = 1; K < NZ - 1; K++) A[I][J][K] = (A[I - 1][J][K] + A[I + 1][J][K]) / 2; for (I = 1; I < NX - 1; I++) for (J = 1; J < NY - 1; J++) for (K = 1; K < NZ - 1; K++) A[I][J][K] = (A[I][J - 1][K] + A[I][J + 1][K]) / 2; for (I = 1; I < NX - 1; I++) for (J = 1; J < NY - 1; J++) for (K = 1; K < NZ - 1; K++) { double Tmp1 = (A[I][J][K - 1] + A[I][J][K + 1]) / 2; double Tmp2 = fabs(A[I][J][K] - Tmp1); Eps = MAX(Eps, Tmp2); A[I][J][K] = Tmp1; } return Eps; }
the_stack_data/647462.c
/* This is a version (aka dlmalloc) of malloc/free/realloc written by Doug Lea and released to the public domain, as explained at http://creativecommons.org/licenses/publicdomain. Send questions, comments, complaints, performance data, etc to [email protected] * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) Note: There may be an updated version of this malloc obtainable at ftp://gee.cs.oswego.edu/pub/misc/malloc.c Check before installing! * Quickstart This library is all in one file to simplify the most common usage: ftp it, compile it (-O3), and link it into another program. All of the compile-time options default to reasonable values for use on most platforms. You might later want to step through various compile-time and dynamic tuning options. For convenience, an include file for code using this malloc is at: ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h You don't really need this .h file unless you call functions not defined in your system include files. The .h file contains only the excerpts from this file needed for using this malloc on ANSI C/C++ systems, so long as you haven't changed compile-time options about naming and tuning parameters. If you do, then you can create your own malloc.h that does include all settings by cutting at the point indicated below. Note that you may already by default be using a C library containing a malloc that is based on some version of this malloc (for example in linux). You might still want to use the one in this file to customize settings or to avoid overheads associated with library versions. * Vital statistics: Supported pointer/size_t representation: 4 or 8 bytes size_t MUST be an unsigned type of the same width as pointers. (If you are using an ancient system that declares size_t as a signed type, or need it to be a different width than pointers, you can use a previous release of this malloc (e.g. 2.7.2) supporting these.) Alignment: 8 bytes (default) This suffices for nearly all current machines and C compilers. However, you can define MALLOC_ALIGNMENT to be wider than this if necessary (up to 128bytes), at the expense of using more space. Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) 8 or 16 bytes (if 8byte sizes) Each malloced chunk has a hidden word of overhead holding size and status information, and additional cross-check word if FOOTERS is defined. Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) 8-byte ptrs: 32 bytes (including overhead) Even a request for zero bytes (i.e., malloc(0)) returns a pointer to something of the minimum allocatable size. The maximum overhead wastage (i.e., number of extra bytes allocated than were requested in malloc) is less than or equal to the minimum size, except for requests >= mmap_threshold that are serviced via mmap(), where the worst case wastage is about 32 bytes plus the remainder from a system page (the minimal mmap unit); typically 4096 or 8192 bytes. Security: static-safe; optionally more or less The "security" of malloc refers to the ability of malicious code to accentuate the effects of errors (for example, freeing space that is not currently malloc'ed or overwriting past the ends of chunks) in code that calls malloc. This malloc guarantees not to modify any memory locations below the base of heap, i.e., static variables, even in the presence of usage errors. The routines additionally detect most improper frees and reallocs. All this holds as long as the static bookkeeping for malloc itself is not corrupted by some other means. This is only one aspect of security -- these checks do not, and cannot, detect all possible programming errors. If FOOTERS is defined nonzero, then each allocated chunk carries an additional check word to verify that it was malloced from its space. These check words are the same within each execution of a program using malloc, but differ across executions, so externally crafted fake chunks cannot be freed. This improves security by rejecting frees/reallocs that could corrupt heap memory, in addition to the checks preventing writes to statics that are always on. This may further improve security at the expense of time and space overhead. (Note that FOOTERS may also be worth using with MSPACES.) By default detected errors cause the program to abort (calling "abort()"). You can override this to instead proceed past errors by defining PROCEED_ON_ERROR. In this case, a bad free has no effect, and a malloc that encounters a bad address caused by user overwrites will ignore the bad address by dropping pointers and indices to all known memory. This may be appropriate for programs that should continue if at all possible in the face of programming errors, although they may run out of memory because dropped memory is never reclaimed. If you don't like either of these options, you can define CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything else. And if if you are sure that your program using malloc has no errors or vulnerabilities, you can define INSECURE to 1, which might (or might not) provide a small performance improvement. Thread-safety: NOT thread-safe unless USE_LOCKS defined When USE_LOCKS is defined, each public call to malloc, free, etc is surrounded with either a pthread mutex or a win32 spinlock (depending on WIN32). This is not especially fast, and can be a major bottleneck. It is designed only to provide minimal protection in concurrent environments, and to provide a basis for extensions. If you are using malloc in a concurrent program, consider instead using ptmalloc, which is derived from a version of this malloc. (See http://www.malloc.de). System requirements: Any combination of MORECORE and/or MMAP/MUNMAP This malloc can use unix sbrk or any emulation (invoked using the CALL_MORECORE macro) and/or mmap/munmap or any emulation (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system memory. On most unix systems, it tends to work best if both MORECORE and MMAP are enabled. On Win32, it uses emulations based on VirtualAlloc. It also uses common C library functions like memset. Compliance: I believe it is compliant with the Single Unix Specification (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably others as well. * Overview of algorithms This is not the fastest, most space-conserving, most portable, or most tunable malloc ever written. However it is among the fastest while also being among the most space-conserving, portable and tunable. Consistent balance across these factors results in a good general-purpose allocator for malloc-intensive programs. In most ways, this malloc is a best-fit allocator. Generally, it chooses the best-fitting existing chunk for a request, with ties broken in approximately least-recently-used order. (This strategy normally maintains low fragmentation.) However, for requests less than 256bytes, it deviates from best-fit when there is not an exactly fitting available chunk by preferring to use space adjacent to that used for the previous small request, as well as by breaking ties in approximately most-recently-used order. (These enhance locality of series of small allocations.) And for very large requests (>= 256Kb by default), it relies on system memory mapping facilities, if supported. (This helps avoid carrying around and possibly fragmenting memory used only for large chunks.) All operations (except malloc_stats and mallinfo) have execution times that are bounded by a constant factor of the number of bits in a size_t, not counting any clearing in calloc or copying in realloc, or actions surrounding MORECORE and MMAP that have times proportional to the number of non-contiguous regions returned by system allocation routines, which is often just 1. The implementation is not very modular and seriously overuses macros. Perhaps someday all C compilers will do as good a job inlining modular code as can now be done by brute-force expansion, but now, enough of them seem not to. Some compilers issue a lot of warnings about code that is dead/unreachable only on some platforms, and also about intentional uses of negation on unsigned types. All known cases of each can be ignored. For a longer but out of date high-level description, see http://gee.cs.oswego.edu/dl/html/malloc.html * MSPACES If MSPACES is defined, then in addition to malloc, free, etc., this file also defines mspace_malloc, mspace_free, etc. These are versions of malloc routines that take an "mspace" argument obtained using create_mspace, to control all internal bookkeeping. If ONLY_MSPACES is defined, only these versions are compiled. So if you would like to use this allocator for only some allocations, and your system malloc for others, you can compile with ONLY_MSPACES and then do something like... static mspace mymspace = create_mspace(0,0); // for example #define mymalloc(bytes) mspace_malloc(mymspace, bytes) (Note: If you only need one instance of an mspace, you can instead use "USE_DL_PREFIX" to relabel the global malloc.) You can similarly create thread-local allocators by storing mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); } void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. You cannot allocate from one and free to another (although conformance is only weakly checked, so usage errors are not always caught). If FOOTERS is defined, then each chunk carries around a tag indicating its originating mspace, and frees are directed to their originating spaces. ------------------------- Compile-time options --------------------------- Be careful in setting #define values for numerical constants of type size_t. On some systems, literal values are not automatically extended to size_t precision unless they are explicitly casted. WIN32 default: defined if _WIN32 defined Defining WIN32 sets up defaults for MS environment and compilers. Otherwise defaults are for unix. MALLOC_ALIGNMENT default: (size_t)8 Controls the minimum alignment for malloc'ed chunks. It must be a power of two and at least 8, even on machines for which smaller alignments would suffice. It may be defined as larger than this though. Note however that code and data structures are optimized for the case of 8-byte alignment. MSPACES default: 0 (false) If true, compile in support for independent allocation spaces. This is only supported if HAVE_MMAP is true. ONLY_MSPACES default: 0 (false) If true, only compile in mspace versions, not regular versions. USE_LOCKS default: 0 (false) Causes each call to each public routine to be surrounded with pthread or WIN32 mutex lock/unlock. (If set true, this can be overridden on a per-mspace basis for mspace versions.) FOOTERS default: 0 If true, provide extra checking and dispatching by placing information in the footers of allocated chunks. This adds space and time overhead. INSECURE default: 0 If true, omit checks for usage errors and heap space overwrites. USE_DL_PREFIX default: NOT defined Causes compiler to prefix all public routines with the string 'dl'. This can be useful when you only want to use this malloc in one part of a program, using your regular system malloc elsewhere. ABORT default: defined as abort() Defines how to abort on failed checks. On most systems, a failed check cannot die with an "assert" or even print an informative message, because the underlying print routines in turn call malloc, which will fail again. Generally, the best policy is to simply call abort(). It's not very useful to do more than this because many errors due to overwriting will show up as address faults (null, odd addresses etc) rather than malloc-triggered checks, so will also abort. Also, most compilers know that abort() does not return, so can better optimize code conditionally calling it. PROCEED_ON_ERROR default: defined as 0 (false) Controls whether detected bad addresses cause them to bypassed rather than aborting. If set, detected bad arguments to free and realloc are ignored. And all bookkeeping information is zeroed out upon a detected overwrite of freed heap space, thus losing the ability to ever return it from malloc again, but enabling the application to proceed. If PROCEED_ON_ERROR is defined, the static variable malloc_corruption_error_count is compiled in and can be examined to see if errors have occurred. This option generates slower code than the default abort policy. DEBUG default: NOT defined The DEBUG setting is mainly intended for people trying to modify this code or diagnose problems when porting to new platforms. However, it may also be able to better isolate user errors than just using runtime checks. The assertions in the check routines spell out in more detail the assumptions and invariants underlying the algorithms. The checking is fairly extensive, and will slow down execution noticeably. Calling malloc_stats or mallinfo with DEBUG set will attempt to check every non-mmapped allocated and free chunk in the course of computing the summaries. ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) Debugging assertion failures can be nearly impossible if your version of the assert macro causes malloc to be called, which will lead to a cascade of further failures, blowing the runtime stack. ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), which will usually make debugging easier. MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 The action to take before "return 0" when malloc fails to be able to return memory because there is none available. HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES True if this system supports sbrk or an emulation of it. MORECORE default: sbrk The name of the sbrk-style system routine to call to obtain more memory. See below for guidance on writing custom MORECORE functions. The type of the argument to sbrk/MORECORE varies across systems. It cannot be size_t, because it supports negative arguments, so it is normally the signed type of the same width as size_t (sometimes declared as "intptr_t"). It doesn't much matter though. Internally, we only call it with arguments less than half the max value of a size_t, which should work across all reasonable possibilities, although sometimes generating compiler warnings. See near the end of this file for guidelines for creating a custom version of MORECORE. MORECORE_CONTIGUOUS default: 1 (true) If true, take advantage of fact that consecutive calls to MORECORE with positive arguments always return contiguous increasing addresses. This is true of unix sbrk. It does not hurt too much to set it true anyway, since malloc copes with non-contiguities. Setting it false when definitely non-contiguous saves time and possibly wasted space it would take to discover this though. MORECORE_CANNOT_TRIM default: NOT defined True if MORECORE cannot release space back to the system when given negative arguments. This is generally necessary only if you are using a hand-crafted MORECORE function that cannot handle negative arguments. HAVE_MMAP default: 1 (true) True if this system supports mmap or an emulation of it. If so, and HAVE_MORECORE is not true, MMAP is used for all system allocation. If set and HAVE_MORECORE is true as well, MMAP is primarily used to directly allocate very large blocks. It is also used as a backup strategy in cases where MORECORE fails to provide space from system. Note: A single call to MUNMAP is assumed to be able to unmap memory that may have be allocated using multiple calls to MMAP, so long as they are adjacent. HAVE_MREMAP default: 1 on linux, else 0 If true realloc() uses mremap() to re-allocate large blocks and extend or shrink allocation spaces. MMAP_CLEARS default: 1 on unix True if mmap clears memory so calloc doesn't need to. This is true for standard unix mmap using /dev/zero. USE_BUILTIN_FFS default: 0 (i.e., not used) Causes malloc to use the builtin ffs() function to compute indices. Some compilers may recognize and intrinsify ffs to be faster than the supplied C version. Also, the case of x86 using gcc is special-cased to an asm instruction, so is already as fast as it can be, and so this setting has no effect. (On most x86s, the asm version is only slightly faster than the C version.) malloc_getpagesize default: derive from system includes, or 4096. The system page size. To the extent possible, this malloc manages memory from the system in page-size units. This may be (and usually is) a function rather than a constant. This is ignored if WIN32, where page size is determined using getSystemInfo during initialization. USE_DEV_RANDOM default: 0 (i.e., not used) Causes malloc to use /dev/random to initialize secure magic seed for stamping footers. Otherwise, the current time is used. NO_MALLINFO default: 0 If defined, don't compile "mallinfo". This can be a simple way of dealing with mismatches between system declarations and those in this file. MALLINFO_FIELD_TYPE default: size_t The type of the fields in the mallinfo struct. This was originally defined as "int" in SVID etc, but is more usefully defined as size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set REALLOC_ZERO_BYTES_FREES default: not defined This should be set if a call to realloc with zero bytes should be the same as a call to free. Some people think it should. Otherwise, since this malloc returns a unique pointer for malloc(0), so does realloc(p, 0). LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H LACKS_STDLIB_H default: NOT defined unless on WIN32 Define these if your system does not have these header files. You might need to manually insert some of the declarations they provide. DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, system_info.dwAllocationGranularity in WIN32, otherwise 64K. Also settable using mallopt(M_GRANULARITY, x) The unit for allocating and deallocating memory from the system. On most systems with contiguous MORECORE, there is no reason to make this more than a page. However, systems with MMAP tend to either require or encourage larger granularities. You can increase this value to prevent system allocation functions to be called so often, especially if they are slow. The value must be at least one page and must be a power of two. Setting to 0 causes initialization to either page size or win32 region size. (Note: In previous versions of malloc, the equivalent of this option was called "TOP_PAD") DEFAULT_TRIM_THRESHOLD default: 2MB Also settable using mallopt(M_TRIM_THRESHOLD, x) The maximum amount of unused top-most memory to keep before releasing via malloc_trim in free(). Automatic trimming is mainly useful in long-lived programs using contiguous MORECORE. Because trimming via sbrk can be slow on some systems, and can sometimes be wasteful (in cases where programs immediately afterward allocate more large chunks) the value should be high enough so that your overall system performance would improve by releasing this much memory. As a rough guide, you might set to a value close to the average size of a process (program) running on your system. Releasing this much memory would allow such a process to run in memory. Generally, it is worth tuning trim thresholds when a program undergoes phases where several large chunks are allocated and released in ways that can reuse each other's storage, perhaps mixed with phases where there are no such chunks at all. The trim value must be greater than page size to have any useful effect. To disable trimming completely, you can set to MAX_SIZE_T. Note that the trick some people use of mallocing a huge space and then freeing it at program startup, in an attempt to reserve system memory, doesn't have the intended effect under automatic trimming, since that memory will immediately be returned to the system. DEFAULT_MMAP_THRESHOLD default: 256K Also settable using mallopt(M_MMAP_THRESHOLD, x) The request size threshold for using MMAP to directly service a request. Requests of at least this size that cannot be allocated using already-existing space will be serviced via mmap. (If enough normal freed space already exists it is used instead.) Using mmap segregates relatively large chunks of memory so that they can be individually obtained and released from the host system. A request serviced through mmap is never reused by any other request (at least not directly; the system may just so happen to remap successive requests to the same locations). Segregating space in this way has the benefits that: Mmapped space can always be individually released back to the system, which helps keep the system level memory demands of a long-lived program low. Also, mapped memory doesn't become `locked' between other chunks, as can happen with normally allocated chunks, which means that even trimming via malloc_trim would not release them. However, it has the disadvantage that the space cannot be reclaimed, consolidated, and then used to service later requests, as happens with normal chunks. The advantages of mmap nearly always outweigh disadvantages for "large" chunks, but the value of "large" may vary across systems. The default is an empirically derived value that works well in most systems. You can disable mmap by setting to MAX_SIZE_T. */ #ifndef WIN32 #ifdef _WIN32 #define WIN32 1 #endif /* _WIN32 */ #endif /* WIN32 */ #ifdef WIN32 #define WIN32_LEAN_AND_MEAN #include <windows.h> #define HAVE_MMAP 1 #define HAVE_MORECORE 0 #define LACKS_UNISTD_H #define LACKS_SYS_PARAM_H #define LACKS_SYS_MMAN_H #define LACKS_STRING_H #define LACKS_STRINGS_H #define LACKS_SYS_TYPES_H #define LACKS_ERRNO_H #define MALLOC_FAILURE_ACTION #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ #endif /* WIN32 */ #if defined(DARWIN) || defined(_DARWIN) /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ #ifndef HAVE_MORECORE #define HAVE_MORECORE 0 #define HAVE_MMAP 1 #endif /* HAVE_MORECORE */ #endif /* DARWIN */ #ifndef LACKS_SYS_TYPES_H #include <sys/types.h> /* For size_t */ #endif /* LACKS_SYS_TYPES_H */ /* The maximum possible size_t value has all bits set */ #define MAX_SIZE_T (~(size_t)0) #ifndef ONLY_MSPACES #define ONLY_MSPACES 0 #endif /* ONLY_MSPACES */ #ifndef MSPACES #if ONLY_MSPACES #define MSPACES 1 #else /* ONLY_MSPACES */ #define MSPACES 0 #endif /* ONLY_MSPACES */ #endif /* MSPACES */ #ifndef MALLOC_ALIGNMENT #define MALLOC_ALIGNMENT ((size_t)8U) #endif /* MALLOC_ALIGNMENT */ #ifndef FOOTERS #define FOOTERS 0 #endif /* FOOTERS */ #ifndef ABORT #define ABORT abort() #endif /* ABORT */ #ifndef ABORT_ON_ASSERT_FAILURE #define ABORT_ON_ASSERT_FAILURE 1 #endif /* ABORT_ON_ASSERT_FAILURE */ #ifndef PROCEED_ON_ERROR #define PROCEED_ON_ERROR 0 #endif /* PROCEED_ON_ERROR */ #ifndef USE_LOCKS #define USE_LOCKS 0 #endif /* USE_LOCKS */ #ifndef INSECURE #define INSECURE 0 #endif /* INSECURE */ #ifndef HAVE_MMAP #define HAVE_MMAP 1 #endif /* HAVE_MMAP */ #ifndef MMAP_CLEARS #define MMAP_CLEARS 1 #endif /* MMAP_CLEARS */ #ifndef HAVE_MREMAP #ifdef linux #define HAVE_MREMAP 1 #else /* linux */ #define HAVE_MREMAP 0 #endif /* linux */ #endif /* HAVE_MREMAP */ #ifndef MALLOC_FAILURE_ACTION #define MALLOC_FAILURE_ACTION errno = ENOMEM; #endif /* MALLOC_FAILURE_ACTION */ #ifndef HAVE_MORECORE #if ONLY_MSPACES #define HAVE_MORECORE 0 #else /* ONLY_MSPACES */ #define HAVE_MORECORE 1 #endif /* ONLY_MSPACES */ #endif /* HAVE_MORECORE */ #if !HAVE_MORECORE #define MORECORE_CONTIGUOUS 0 #else /* !HAVE_MORECORE */ #ifndef MORECORE #define MORECORE sbrk #endif /* MORECORE */ #ifndef MORECORE_CONTIGUOUS #define MORECORE_CONTIGUOUS 1 #endif /* MORECORE_CONTIGUOUS */ #endif /* HAVE_MORECORE */ #ifndef DEFAULT_GRANULARITY #if MORECORE_CONTIGUOUS #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ #else /* MORECORE_CONTIGUOUS */ #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) #endif /* MORECORE_CONTIGUOUS */ #endif /* DEFAULT_GRANULARITY */ #ifndef DEFAULT_TRIM_THRESHOLD #ifndef MORECORE_CANNOT_TRIM #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) #else /* MORECORE_CANNOT_TRIM */ #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T #endif /* MORECORE_CANNOT_TRIM */ #endif /* DEFAULT_TRIM_THRESHOLD */ #ifndef DEFAULT_MMAP_THRESHOLD #if HAVE_MMAP #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) #else /* HAVE_MMAP */ #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T #endif /* HAVE_MMAP */ #endif /* DEFAULT_MMAP_THRESHOLD */ #ifndef USE_BUILTIN_FFS #define USE_BUILTIN_FFS 0 #endif /* USE_BUILTIN_FFS */ #ifndef USE_DEV_RANDOM #define USE_DEV_RANDOM 0 #endif /* USE_DEV_RANDOM */ #ifndef NO_MALLINFO #define NO_MALLINFO 0 #endif /* NO_MALLINFO */ #ifndef MALLINFO_FIELD_TYPE #define MALLINFO_FIELD_TYPE size_t #endif /* MALLINFO_FIELD_TYPE */ /* mallopt tuning options. SVID/XPG defines four standard parameter numbers for mallopt, normally defined in malloc.h. None of these are used in this malloc, so setting them has no effect. But this malloc does support the following options. */ #define M_TRIM_THRESHOLD (-1) #define M_GRANULARITY (-2) #define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ #if !NO_MALLINFO /* This version of malloc supports the standard SVID/XPG mallinfo routine that returns a struct containing usage properties and statistics. It should work on any system that has a /usr/include/malloc.h defining struct mallinfo. The main declaration needed is the mallinfo struct that is returned (by-copy) by mallinfo(). The malloinfo struct contains a bunch of fields that are not even meaningful in this version of malloc. These fields are are instead filled by mallinfo() with other numbers that might be of interest. HAVE_USR_INCLUDE_MALLOC_H should be set if you have a /usr/include/malloc.h file that includes a declaration of struct mallinfo. If so, it is included; else a compliant version is declared below. These must be precisely the same for mallinfo() to work. The original SVID version of this struct, defined on most systems with mallinfo, declares all fields as ints. But some others define as unsigned long. If your system defines the fields using a type of different width than listed here, you MUST #include your system version and #define HAVE_USR_INCLUDE_MALLOC_H. */ /* #define HAVE_USR_INCLUDE_MALLOC_H */ #ifdef HAVE_USR_INCLUDE_MALLOC_H #include "/usr/include/malloc.h" #else /* HAVE_USR_INCLUDE_MALLOC_H */ struct mallinfo { MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ MALLINFO_FIELD_TYPE smblks; /* always 0 */ MALLINFO_FIELD_TYPE hblks; /* always 0 */ MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ MALLINFO_FIELD_TYPE fordblks; /* total free space */ MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ }; #endif /* HAVE_USR_INCLUDE_MALLOC_H */ #endif /* NO_MALLINFO */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #if !ONLY_MSPACES /* ------------------- Declarations of public routines ------------------- */ #ifndef USE_DL_PREFIX #define dlcalloc calloc #define dlfree free #define dlmalloc malloc #define dlmemalign memalign #define dlrealloc realloc #define dlvalloc valloc #define dlpvalloc pvalloc #define dlmallinfo mallinfo #define dlmallopt mallopt #define dlmalloc_trim malloc_trim #define dlmalloc_stats malloc_stats #define dlmalloc_usable_size malloc_usable_size #define dlmalloc_footprint malloc_footprint #define dlmalloc_max_footprint malloc_max_footprint #define dlindependent_calloc independent_calloc #define dlindependent_comalloc independent_comalloc #endif /* USE_DL_PREFIX */ /* malloc(size_t n) Returns a pointer to a newly allocated chunk of at least n bytes, or null if no space is available, in which case errno is set to ENOMEM on ANSI C systems. If n is zero, malloc returns a minimum-sized chunk. (The minimum size is 16 bytes on most 32bit systems, and 32 bytes on 64bit systems.) Note that size_t is an unsigned type, so calls with arguments that would be negative if signed are interpreted as requests for huge amounts of space, which will often fail. The maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ void* dlmalloc(size_t); /* free(void* p) Releases the chunk of memory pointed to by p, that had been previously allocated using malloc or a related routine such as realloc. It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ void dlfree(void*); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ void* dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) Returns a pointer to a chunk of size n that contains the same data as does chunk p up to the minimum of (n, p's size) bytes, or null if no space is available. The returned pointer may or may not be the same as p. The algorithm prefers extending p in most cases when possible, otherwise it employs the equivalent of a malloc-copy-free sequence. If p is null, realloc is equivalent to malloc. If space is not available, realloc returns null, errno is set (if on ANSI) and p is NOT freed. if n is for fewer bytes than already held by p, the newly unused space is lopped off and freed if possible. realloc with a size argument of zero (re)allocates a minimum-sized chunk. The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ void* dlrealloc(void*, size_t); /* memalign(size_t alignment, size_t n); Returns a pointer to a newly allocated chunk of n bytes, aligned in accord with the alignment argument. The alignment argument should be a power of two. If the argument is not a power of two, the nearest greater power is used. 8-byte alignment is guaranteed by normal malloc calls, so don't bother calling memalign with an argument of 8 or less. Overreliance on memalign is a sure way to fragment space. */ void* dlmemalign(size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ void* dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) Sets tunable parameters The format is to provide a (parameter-number, parameter-value) pair. mallopt then sets the corresponding parameter to the argument value if it can (i.e., so long as the value is meaningful), and returns 1 if successful else 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, normally defined in malloc.h. None of these are use in this malloc, so setting them has no effect. But this malloc also supports other options in mallopt. See below for details. Briefly, supported parameters are as follows (listed defaults are for "typical" configurations). Symbol param # default allowed param values M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) M_GRANULARITY -2 page size any power of 2 >= page size M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) */ int dlmallopt(int, int); /* malloc_footprint(); Returns the number of bytes obtained from the system. The total number of bytes allocated by malloc, realloc etc., is less than this value. Unlike mallinfo, this function returns only a precomputed result, so can be called frequently to monitor memory consumption. Even if locks are otherwise defined, this function does not use them, so results might not be up to date. */ size_t dlmalloc_footprint(void); /* malloc_max_footprint(); Returns the maximum number of bytes obtained from the system. This value will be greater than current footprint if deallocated space has been reclaimed by the system. The peak number of bytes allocated by malloc, realloc etc., is less than this value. Unlike mallinfo, this function returns only a precomputed result, so can be called frequently to monitor memory consumption. Even if locks are otherwise defined, this function does not use them, so results might not be up to date. */ size_t dlmalloc_max_footprint(void); #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: arena: current total non-mmapped bytes allocated from system ordblks: the number of free chunks smblks: always zero. hblks: current number of mmapped regions hblkhd: total bytes held in mmapped regions usmblks: the maximum total allocated space. This will be greater than current total if trimming has occurred. fsmblks: always zero uordblks: current total allocated space (normal or mmapped) fordblks: total free space keepcost: the maximum number of bytes that could ideally be released back to system via malloc_trim. ("ideally" means that it ignores page restrictions etc.) Because these fields are ints, but internal bookkeeping may be kept as longs, the reported values may wrap around zero and thus be inaccurate. */ struct mallinfo dlmallinfo(void); #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); independent_calloc is similar to calloc, but instead of returning a single cleared space, it returns an array of pointers to n_elements independent elements that can hold contents of size elem_size, each of which starts out cleared, and can be independently freed, realloc'ed etc. The elements are guaranteed to be adjacently allocated (this is not guaranteed to occur with multiple callocs or mallocs), which may also improve cache locality in some applications. The "chunks" argument is optional (i.e., may be null, which is probably the most typical usage). If it is null, the returned array is itself dynamically allocated and should also be freed when it is no longer needed. Otherwise, the chunks array must be of at least n_elements in length. It is filled in with the pointers to the chunks. In either case, independent_calloc returns this pointer array, or null if the allocation failed. If n_elements is zero and "chunks" is null, it returns a chunk representing an array with zero elements (which should be freed if not wanted). Each element must be individually freed when it is no longer needed. If you'd like to instead be able to free all at once, you should instead use regular calloc and assign pointers into this space to represent elements. (In this case though, you cannot independently free elements.) independent_calloc simplifies and speeds up implementations of many kinds of pools. It may also be useful when constructing large data structures that initially have a fixed number of fixed-sized nodes, but the number is not known at compile time, and some of the nodes may later need to be freed. For example: struct Node { int item; struct Node* next; }; struct Node* build_list() { struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); if (pool == 0) die(); // organize into a linked list... struct Node* first = pool[0]; for (i = 0; i < n-1; ++i) pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; } */ void** dlindependent_calloc(size_t, size_t, void**); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); independent_comalloc allocates, all at once, a set of n_elements chunks with sizes indicated in the "sizes" array. It returns an array of pointers to these elements, each of which can be independently freed, realloc'ed etc. The elements are guaranteed to be adjacently allocated (this is not guaranteed to occur with multiple callocs or mallocs), which may also improve cache locality in some applications. The "chunks" argument is optional (i.e., may be null). If it is null the returned array is itself dynamically allocated and should also be freed when it is no longer needed. Otherwise, the chunks array must be of at least n_elements in length. It is filled in with the pointers to the chunks. In either case, independent_comalloc returns this pointer array, or null if the allocation failed. If n_elements is zero and chunks is null, it returns a chunk representing an array with zero elements (which should be freed if not wanted). Each element must be individually freed when it is no longer needed. If you'd like to instead be able to free all at once, you should instead use a single regular malloc, and assign pointers at particular offsets in the aggregate space. (In this case though, you cannot independently free elements.) independent_comallac differs from independent_calloc in that each element may have a different size, and also that it does not automatically clear elements. independent_comalloc can be used to speed up allocation in cases where several structs or objects must always be allocated at the same time. For example: struct Head { ... } struct Foot { ... } void send_message(char* msg) { int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; if (independent_comalloc(3, sizes, chunks) == 0) die(); struct Head* head = (struct Head*)(chunks[0]); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... } In general though, independent_comalloc is worth using only for larger values of n_elements. For small values, you probably won't detect enough difference from series of malloc calls to bother. Overuse of independent_comalloc can increase overall memory usage, since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ void** dlindependent_comalloc(size_t, size_t*, void**); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ void* dlpvalloc(size_t); /* malloc_trim(size_t pad); If possible, gives memory back to the system (via negative arguments to sbrk) if there is unused memory at the `high' end of the malloc pool or in unused MMAP segments. You can call this after freeing large blocks of memory to potentially reduce the system-level memory requirements of a program. However, it cannot guarantee to reduce memory. Under some allocation patterns, some large free blocks of memory will be locked between two used chunks, so they cannot be given back to the system. The `pad' argument to malloc_trim represents the amount of free trailing space to leave untrimmed. If this argument is zero, only the minimum amount of memory to maintain internal data structures will be left. Non-zero arguments can be supplied to maintain enough trailing space to service future expected allocations without having to re-obtain memory from the system. Malloc_trim returns 1 if it actually released any memory, else 0. */ int dlmalloc_trim(size_t); /* malloc_usable_size(void* p); Returns the number of bytes you can actually use in an allocated chunk, which may be more than you requested (although often not) due to alignment and minimum size constraints. You can use this many bytes without worrying about overwriting other allocated objects. This is not a particularly great programming practice. malloc_usable_size can be more useful in debugging and assertions, for example: p = malloc(n); assert(malloc_usable_size(p) >= 256); */ size_t dlmalloc_usable_size(void*); /* malloc_stats(); Prints on stderr the amount of space obtained from the system (both via sbrk and mmap), the maximum amount (which may be more than current if malloc_trim and/or munmap got called), and the current number of bytes allocated via malloc (or realloc, etc) but not yet freed. Note that this is the number of bytes allocated, not the number requested. It will be larger than the number requested because of alignment and bookkeeping overhead. Because it includes alignment wastage as being in use, this figure may be greater than zero even when no user-level chunks are allocated. The reported current and maximum system memory can be inaccurate if a program makes other calls to system memory allocation functions (normally sbrk) outside of malloc. malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ void dlmalloc_stats(void); #endif /* ONLY_MSPACES */ #if MSPACES /* mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ typedef void* mspace; /* create_mspace creates and returns a new independent space with the given initial capacity, or, if 0, the default granularity size. It returns null if there is no system memory available to create the space. If argument locked is non-zero, the space uses a separate lock to control access. The capacity of the space will grow dynamically as needed to service mspace_malloc requests. You can control the sizes of incremental increases of this space by compiling with a different DEFAULT_GRANULARITY or dynamically setting with mallopt(M_GRANULARITY, value). */ mspace create_mspace(size_t capacity, int locked); /* destroy_mspace destroys the given space, and attempts to return all of its memory back to the system, returning the total number of bytes freed. After destruction, the results of access to all memory used by the space become undefined. */ size_t destroy_mspace(mspace msp); /* create_mspace_with_base uses the memory supplied as the initial base of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this space is used for bookkeeping, so the capacity must be at least this large. (Otherwise 0 is returned.) When this initial space is exhausted, additional memory will be obtained from the system. Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ mspace create_mspace_with_base(void* base, size_t capacity, int locked); /* mspace_malloc behaves as malloc, but operates within the given space. */ void* mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within the given space. If compiled with FOOTERS==1, mspace_free is not actually needed. free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ void mspace_free(mspace msp, void* mem); /* mspace_realloc behaves as realloc, but operates within the given space. If compiled with FOOTERS==1, mspace_realloc is not actually needed. realloc may be called instead of mspace_realloc because realloced chunks from any space are handled by their originating spaces. */ void* mspace_realloc(mspace msp, void* mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ void** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void* chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ void** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void* chunks[]); /* mspace_footprint() returns the number of bytes obtained from the system for this space. */ size_t mspace_footprint(mspace msp); /* mspace_max_footprint() returns the peak number of bytes obtained from the system for this space. */ size_t mspace_max_footprint(mspace msp); #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ struct mallinfo mspace_mallinfo(mspace msp); #endif /* NO_MALLINFO */ /* mspace_malloc_stats behaves as malloc_stats, but reports properties of the given space. */ void mspace_malloc_stats(mspace msp); /* mspace_trim behaves as malloc_trim, but operates within the given space. */ int mspace_trim(mspace msp, size_t pad); /* An alias for mallopt. */ int mspace_mallopt(int, int); #endif /* MSPACES */ #ifdef __cplusplus }; /* end of extern "C" */ #endif /* __cplusplus */ /* ======================================================================== To make a fully customizable malloc.h header file, cut everything above this line, put into file malloc.h, edit to suit, and #include it on the next line, as well as in programs that use this malloc. ======================================================================== */ /* #include "malloc.h" */ /*------------------------------ internal #includes ---------------------- */ #ifdef WIN32 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */ #endif /* WIN32 */ #include <stdio.h> /* for printing in malloc_stats */ #ifndef LACKS_ERRNO_H #include <errno.h> /* for MALLOC_FAILURE_ACTION */ #endif /* LACKS_ERRNO_H */ #if FOOTERS #include <time.h> /* for magic initialization */ #endif /* FOOTERS */ #ifndef LACKS_STDLIB_H #include <stdlib.h> /* for abort() */ #endif /* LACKS_STDLIB_H */ #ifdef DEBUG #if ABORT_ON_ASSERT_FAILURE #define assert(x) if(!(x)) ABORT #else /* ABORT_ON_ASSERT_FAILURE */ #include <assert.h> #endif /* ABORT_ON_ASSERT_FAILURE */ #else /* DEBUG */ #define assert(x) #endif /* DEBUG */ #ifndef LACKS_STRING_H #include <string.h> /* for memset etc */ #endif /* LACKS_STRING_H */ #if USE_BUILTIN_FFS #ifndef LACKS_STRINGS_H #include <strings.h> /* for ffs */ #endif /* LACKS_STRINGS_H */ #endif /* USE_BUILTIN_FFS */ #if HAVE_MMAP #ifndef LACKS_SYS_MMAN_H #include <sys/mman.h> /* for mmap */ #endif /* LACKS_SYS_MMAN_H */ #ifndef LACKS_FCNTL_H #include <fcntl.h> #endif /* LACKS_FCNTL_H */ #endif /* HAVE_MMAP */ #if HAVE_MORECORE #ifndef LACKS_UNISTD_H #include <unistd.h> /* for sbrk */ #else /* LACKS_UNISTD_H */ #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) extern void* sbrk(ptrdiff_t); #endif /* FreeBSD etc */ #endif /* LACKS_UNISTD_H */ #endif /* HAVE_MMAP */ #ifndef WIN32 #ifndef malloc_getpagesize # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ # ifndef _SC_PAGE_SIZE # define _SC_PAGE_SIZE _SC_PAGESIZE # endif # endif # ifdef _SC_PAGE_SIZE # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) # else # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) extern size_t getpagesize(); # define malloc_getpagesize getpagesize() # else # ifdef WIN32 /* use supplied emulation of getpagesize */ # define malloc_getpagesize getpagesize() # else # ifndef LACKS_SYS_PARAM_H # include <sys/param.h> # endif # ifdef EXEC_PAGESIZE # define malloc_getpagesize EXEC_PAGESIZE # else # ifdef NBPG # ifndef CLSIZE # define malloc_getpagesize NBPG # else # define malloc_getpagesize (NBPG * CLSIZE) # endif # else # ifdef NBPC # define malloc_getpagesize NBPC # else # ifdef PAGESIZE # define malloc_getpagesize PAGESIZE # else /* just guess */ # define malloc_getpagesize ((size_t)4096U) # endif # endif # endif # endif # endif # endif # endif #endif #endif /* ------------------- size_t and alignment properties -------------------- */ /* The byte and bit size of a size_t */ #define SIZE_T_SIZE (sizeof(size_t)) #define SIZE_T_BITSIZE (sizeof(size_t) << 3) /* Some constants coerced to size_t */ /* Annoying but necessary to avoid errors on some plaftorms */ #define SIZE_T_ZERO ((size_t)0) #define SIZE_T_ONE ((size_t)1) #define SIZE_T_TWO ((size_t)2) #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) /* The bit mask value corresponding to MALLOC_ALIGNMENT */ #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) /* True if address a has acceptable alignment */ #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) /* the number of bytes to offset an address to align it */ #define align_offset(A)\ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) /* -------------------------- MMAP preliminaries ------------------------- */ /* If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and checks to fail so compiler optimizer can delete code rather than using so many "#if"s. */ /* MORECORE and MMAP must return MFAIL on failure */ #define MFAIL ((void*)(MAX_SIZE_T)) #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ #if !HAVE_MMAP #define IS_MMAPPED_BIT (SIZE_T_ZERO) #define USE_MMAP_BIT (SIZE_T_ZERO) #define CALL_MMAP(s) MFAIL #define CALL_MUNMAP(a, s) (-1) #define DIRECT_MMAP(s) MFAIL #else /* HAVE_MMAP */ #define IS_MMAPPED_BIT (SIZE_T_ONE) #define USE_MMAP_BIT (SIZE_T_ONE) #ifndef WIN32 #define CALL_MUNMAP(a, s) munmap((a), (s)) #define MMAP_PROT (PROT_READ|PROT_WRITE) #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) #define MAP_ANONYMOUS MAP_ANON #endif /* MAP_ANON */ #ifdef MAP_ANONYMOUS #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) #else /* MAP_ANONYMOUS */ /* Nearly all versions of mmap support MAP_ANONYMOUS, so the following is unlikely to be needed, but is supplied just in case. */ #define MMAP_FLAGS (MAP_PRIVATE) static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) #endif /* MAP_ANONYMOUS */ #define DIRECT_MMAP(s) CALL_MMAP(s) #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ static void* win32mmap(size_t size) { void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); return (ptr != 0)? ptr: MFAIL; } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ static void* win32direct_mmap(size_t size) { void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE); return (ptr != 0)? ptr: MFAIL; } /* This function supports releasing coalesed segments */ static int win32munmap(void* ptr, size_t size) { MEMORY_BASIC_INFORMATION minfo; char* cptr = ptr; while (size) { if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; } return 0; } #define CALL_MMAP(s) win32mmap(s) #define CALL_MUNMAP(a, s) win32munmap((a), (s)) #define DIRECT_MMAP(s) win32direct_mmap(s) #endif /* WIN32 */ #endif /* HAVE_MMAP */ #if HAVE_MMAP && HAVE_MREMAP #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) #else /* HAVE_MMAP && HAVE_MREMAP */ #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL #endif /* HAVE_MMAP && HAVE_MREMAP */ #if HAVE_MORECORE #define CALL_MORECORE(S) MORECORE(S) #else /* HAVE_MORECORE */ #define CALL_MORECORE(S) MFAIL #endif /* HAVE_MORECORE */ /* mstate bit set if continguous morecore disabled or failed */ #define USE_NONCONTIGUOUS_BIT (4U) /* segment bit set in create_mspace_with_base */ #define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ #if USE_LOCKS /* When locks are defined, there are up to two global locks: * If HAVE_MORECORE, morecore_mutex protects sequences of calls to MORECORE. In many cases sys_alloc requires two calls, that should not be interleaved with calls by other threads. This does not protect against direct calls to MORECORE by other threads not using this lock, so there is still code to cope the best we can on interference. * magic_init_mutex ensures that mparams.magic and other unique mparams values are initialized only once. */ #ifndef WIN32 /* By default use posix locks */ #include <pthread.h> #define MLOCK_T pthread_mutex_t #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) #define ACQUIRE_LOCK(l) pthread_mutex_lock(l) #define RELEASE_LOCK(l) pthread_mutex_unlock(l) #if HAVE_MORECORE static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; #endif /* HAVE_MORECORE */ static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; #else /* WIN32 */ /* Because lock-protected regions have bounded times, and there are no recursive lock calls, we can use simple spinlocks. */ #define MLOCK_T long static int win32_acquire_lock (MLOCK_T *sl) { for (;;) { #ifdef InterlockedCompareExchangePointer if (!InterlockedCompareExchange(sl, 1, 0)) return 0; #else /* Use older void* version */ if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) return 0; #endif /* InterlockedCompareExchangePointer */ Sleep (0); } } static void win32_release_lock (MLOCK_T *sl) { InterlockedExchange (sl, 0); } #define INITIAL_LOCK(l) *(l)=0 #define ACQUIRE_LOCK(l) win32_acquire_lock(l) #define RELEASE_LOCK(l) win32_release_lock(l) #if HAVE_MORECORE static MLOCK_T morecore_mutex; #endif /* HAVE_MORECORE */ static MLOCK_T magic_init_mutex; #endif /* WIN32 */ #define USE_LOCK_BIT (2U) #else /* USE_LOCKS */ #define USE_LOCK_BIT (0U) #define INITIAL_LOCK(l) #endif /* USE_LOCKS */ #if USE_LOCKS && HAVE_MORECORE #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); #else /* USE_LOCKS && HAVE_MORECORE */ #define ACQUIRE_MORECORE_LOCK() #define RELEASE_MORECORE_LOCK() #endif /* USE_LOCKS && HAVE_MORECORE */ #if USE_LOCKS #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); #else /* USE_LOCKS */ #define ACQUIRE_MAGIC_INIT_LOCK() #define RELEASE_MAGIC_INIT_LOCK() #endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ /* (The following includes lightly edited explanations by Colin Plumb.) The malloc_chunk declaration below is misleading (but accurate and necessary). It declares a "view" into memory allowing access to necessary fields at known offsets from a given base. Chunks of memory are maintained using a `boundary tag' method as originally described by Knuth. (See the paper by Paul Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such techniques.) Sizes of free chunks are stored both in the front of each chunk and at the end. This makes consolidating fragmented chunks into bigger chunks fast. The head fields also hold bits representing whether chunks are free or in use. Here are some pictures to make it clearer. They are "exploded" to show that the state of a chunk can be thought of as extending from the high 31 bits of the head field of its header through the prev_foot and PINUSE_BIT bit of the following chunk header. A chunk that's in use looks like: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk (if P = 1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| | Size of this chunk 1| +-+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | +- -+ | | +- -+ | : +- size - sizeof(size_t) available payload bytes -+ : | chunk-> +- -+ | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| | Size of next chunk (may or may not be in use) | +-+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ And if it's free, it looks like this: chunk-> +- -+ | User payload (must be in use, or we would have merged!) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| | Size of this chunk 0| +-+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next pointer | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Prev pointer | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | : +- size - sizeof(struct chunk) unused bytes -+ : | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of this chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| | Size of next chunk (must be in use, or we would have merged)| +-+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | : +- User payload -+ : | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| +-+ Note that since we always merge adjacent free chunks, the chunks adjacent to a free chunk must be in use. Given a pointer to a chunk (which can be derived trivially from the payload pointer) we can, in O(1) time, find out whether the adjacent chunks are free, and if so, unlink them from the lists that they are on and merge them with the current chunk. Chunks always begin on even word boundaries, so the mem portion (which is returned to the user) is also on an even word boundary, and thus at least double-word aligned. The P (PINUSE_BIT) bit, stored in the unused low-order bit of the chunk size (which is always a multiple of two words), is an in-use bit for the *previous* chunk. If that bit is *clear*, then the word before the current chunk size contains the previous chunk size, and can be used to find the front of the previous chunk. The very first chunk allocated always has this bit set, preventing access to non-existent (or non-owned) memory. If pinuse is set for any given chunk, then you CANNOT determine the size of the previous chunk, and might even get a memory addressing fault when trying to do so. The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of the chunk size redundantly records whether the current chunk is inuse. This redundancy enables usage checks within free and realloc, and reduces indirection when freeing and consolidating chunks. Each freshly allocated chunk must have both cinuse and pinuse set. That is, each allocated chunk borders either a previously allocated and still in-use chunk, or the base of its memory arena. This is ensured by making all allocations from the the `lowest' part of any found chunk. Further, no free chunk physically borders another one, so each free chunk is known to be preceded and followed by either inuse chunks or the ends of memory. Note that the `foot' of the current chunk is actually represented as the prev_foot of the NEXT chunk. This makes it easier to deal with alignments etc but can be very confusing when trying to extend or adapt this code. The exceptions to all this are 1. The special chunk `top' is the top-most available chunk (i.e., the one bordering the end of available memory). It is treated specially. Top is never included in any bin, is used only if no other chunk is available, and is released back to the system if it is very large (see M_TRIM_THRESHOLD). In effect, the top chunk is treated as larger (and thus less well fitting) than any other available chunk. The top chunk doesn't update its trailing size field since there is no next contiguous chunk that would have to index off it. However, space is still allocated for it (TOP_FOOT_SIZE) to enable separation or merging when space is extended. 3. Chunks allocated via mmap, which have the lowest-order bit (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set PINUSE_BIT in their head fields. Because they are allocated one-by-one, each must carry its own prev_foot field, which is also used to hold the offset this chunk has within its mmapped region, which is needed to preserve alignment. Each mmapped chunk is trailed by the first two fields of a fake next-chunk for sake of usage checks. */ struct malloc_chunk { size_t prev_foot; /* Size of previous chunk (if free). */ size_t head; /* Size and inuse bits. */ struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk; }; typedef struct malloc_chunk mchunk; typedef struct malloc_chunk* mchunkptr; typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ typedef unsigned int bindex_t; /* Described below */ typedef unsigned int binmap_t; /* Described below */ typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ #define MCHUNK_SIZE (sizeof(mchunk)) #if FOOTERS #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) #else /* FOOTERS */ #define CHUNK_OVERHEAD (SIZE_T_SIZE) #endif /* FOOTERS */ /* MMapped chunks need a second word of overhead ... */ #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) /* ... and additional padding for fake next-chunk at foot */ #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) /* The smallest size we can malloc is an aligned minimal chunk */ #define MIN_CHUNK_SIZE\ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* conversion from malloc headers to user pointers, and back */ #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) /* chunk associated with aligned address A */ #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) /* Bounds on request (not chunk) sizes. */ #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) /* pad request bytes into a usable size */ #define pad_request(req) \ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* pad request, checking for minimum (but not maximum) */ #define request2size(req) \ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ /* The head field of a chunk is or'ed with PINUSE_BIT when previous adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in use. If the chunk was obtained with mmap, the prev_foot field has IS_MMAPPED_BIT set, otherwise holding the offset of the base of the mmapped region to the base of the chunk. */ #define PINUSE_BIT (SIZE_T_ONE) #define CINUSE_BIT (SIZE_T_TWO) #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) /* Head value for fenceposts */ #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) /* extraction of fields from head words */ #define cinuse(p) ((p)->head & CINUSE_BIT) #define pinuse(p) ((p)->head & PINUSE_BIT) #define chunksize(p) ((p)->head & ~(INUSE_BITS)) #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) /* Treat space at ptr +/- offset as a chunk */ #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) /* Ptr to next or previous physical malloc_chunk. */ #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) /* extract next chunk's pinuse bit */ #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) /* Get/set size at footer */ #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) /* Set size, pinuse bit, and foot */ #define set_size_and_pinuse_of_free_chunk(p, s)\ ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) /* Set size, pinuse bit, foot, and clear next pinuse */ #define set_free_with_pinuse(p, s, n)\ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) #define is_mmapped(p)\ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) /* Get the internal overhead associated with chunk p */ #define overhead_for(p)\ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) /* Return true if malloced space is not necessarily cleared */ #if MMAP_CLEARS #define calloc_must_clear(p) (!is_mmapped(p)) #else /* MMAP_CLEARS */ #define calloc_must_clear(p) (1) #endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ /* When chunks are not in use, they are treated as nodes of either lists or trees. "Small" chunks are stored in circular doubly-linked lists, and look like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Larger chunks are kept in a form of bitwise digital trees (aka tries) keyed on chunksizes. Because malloc_tree_chunks are only for free chunks greater than 256 bytes, their size doesn't impose any constraints on user chunk sizes. Each node looks like: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk of same size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk of same size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Pointer to left child (child[0]) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Pointer to right child (child[1]) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Pointer to parent | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | bin index of this chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Each tree holding treenodes is a tree of unique chunk sizes. Chunks of the same size are arranged in a circularly-linked list, with only the oldest chunk (the next to be used, in our FIFO ordering) actually in the tree. (Tree members are distinguished by a non-null parent pointer.) If a chunk with the same size an an existing node is inserted, it is linked off the existing node using pointers that work in the same way as fd/bk pointers of small chunks. Each tree contains a power of 2 sized range of chunk sizes (the smallest is 0x100 <= x < 0x180), which is is divided in half at each tree level, with the chunks in the smaller half of the range (0x100 <= x < 0x140 for the top nose) in the left subtree and the larger half (0x140 <= x < 0x180) in the right subtree. This is, of course, done by inspecting individual bits. Using these rules, each node's left subtree contains all smaller sizes than its right subtree. However, the node at the root of each subtree has no particular ordering relationship to either. (The dividing line between the subtree sizes is based on trie relation.) If we remove the last chunk of a given size from the interior of the tree, we need to replace it with a leaf node. The tree ordering rules permit a node to be replaced by any leaf below it. The smallest chunk in a tree (a common operation in a best-fit allocator) can be found by walking a path to the leftmost leaf in the tree. Unlike a usual binary tree, where we follow left child pointers until we reach a null, here we follow the right child pointer any time the left one is null, until we reach a leaf with both child pointers null. The smallest chunk in the tree will be somewhere along that path. The worst case number of steps to add, find, or remove a node is bounded by the number of bits differentiating chunks within bins. Under current bin calculations, this ranges from 6 up to 21 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case is of course much better. */ struct malloc_tree_chunk { /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; struct malloc_tree_chunk* fd; struct malloc_tree_chunk* bk; struct malloc_tree_chunk* child[2]; struct malloc_tree_chunk* parent; bindex_t index; }; typedef struct malloc_tree_chunk tchunk; typedef struct malloc_tree_chunk* tchunkptr; typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ /* A little helper macro for trees */ #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ /* Each malloc space may include non-contiguous segments, held in a list headed by an embedded malloc_segment record representing the top-most space. Segments also include flags holding properties of the space. Large chunks that are directly allocated by mmap are not included in this list. They are instead independently created and destroyed without otherwise keeping track of them. Segment management mainly comes into play for spaces allocated by MMAP. Any call to MMAP might or might not return memory that is adjacent to an existing segment. MORECORE normally contiguously extends the current space, so this space is almost always adjacent, which is simpler and faster to deal with. (This is why MORECORE is used preferentially to MMAP when both are available -- see sys_alloc.) When allocating using MMAP, we don't use any of the hinting mechanisms (inconsistently) supported in various implementations of unix mmap, or distinguish reserving from committing memory. Instead, we just ask for space, and exploit contiguity when we get it. It is probably possible to do better than this on some systems, but no general scheme seems to be significantly better. Management entails a simpler variant of the consolidation scheme used for chunks to reduce fragmentation -- new adjacent memory is normally prepended or appended to an existing segment. However, there are limitations compared to chunk consolidation that mostly reflect the fact that segment processing is relatively infrequent (occurring only when getting memory from system) and that we don't expect to have huge numbers of segments: * Segments are not indexed, so traversal requires linear scans. (It would be possible to index these, but is not worth the extra overhead and complexity for most programs on most platforms.) * New segments are only appended to old ones when holding top-most memory; if they cannot be prepended to others, they are held in different segments. Except for the top-most segment of an mstate, each segment record is kept at the tail of its segment. Segments are added by pushing segment records onto the list headed by &mstate.seg for the containing mstate. Segment flags control allocation/merge/deallocation policies: * If EXTERN_BIT set, then we did not allocate this segment, and so should not try to deallocate or merge with others. (This currently holds only for the initial segment passed into create_mspace_with_base.) * If IS_MMAPPED_BIT set, the segment may be merged with other surrounding mmapped segments and trimmed/de-allocated using munmap. * If neither bit is set, then the segment was obtained using MORECORE so can be merged with surrounding MORECORE'd segments and deallocated/trimmed using MORECORE with negative arguments. */ struct malloc_segment { char* base; /* base address */ size_t size; /* allocated size */ struct malloc_segment* next; /* ptr to next segment */ flag_t sflags; /* mmap and extern flag */ }; #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; typedef struct malloc_segment* msegmentptr; /* ---------------------------- malloc_state ----------------------------- */ /* A malloc_state holds all of the bookkeeping for a space. The main fields are: Top The topmost chunk of the currently active segment. Its size is cached in topsize. The actual size of topmost space is topsize+TOP_FOOT_SIZE, which includes space reserved for adding fenceposts and segment records if necessary when getting more space from the system. The size at which to autotrim top is cached from mparams in trim_check, except that it is disabled if an autotrim fails. Designated victim (dv) This is the preferred chunk for servicing small requests that don't have exact fits. It is normally the chunk split off most recently to service another small request. Its size is cached in dvsize. The link fields of this chunk are not maintained since it is not kept in a bin. SmallBins An array of bin headers for free chunks. These bins hold chunks with sizes less than MIN_LARGE_SIZE bytes. Each bin contains chunks of all the same size, spaced 8 bytes apart. To simplify use in double-linked lists, each bin header acts as a malloc_chunk pointing to the real first node, if it exists (else pointing to itself). This avoids special-casing for headers. But to avoid waste, we allocate only the fd/bk pointers of bins, and then use repositioning tricks to treat these as the fields of a chunk. TreeBins Treebins are pointers to the roots of trees holding a range of sizes. There are 2 equally spaced treebins for each power of two from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything larger. Bin maps There is one bit map for small bins ("smallmap") and one for treebins ("treemap). Each bin sets its bit when non-empty, and clears the bit when empty. Bit operations are then used to avoid bin-by-bin searching -- nearly all "search" is done without ever looking at bins that won't be selected. The bit maps conservatively use 32 bits per map word, even if on 64bit system. For a good description of some of the bit-based techniques used here, see Henry S. Warren Jr's book "Hacker's Delight" (and supplement at http://hackersdelight.org/). Many of these are intended to reduce the branchiness of paths through malloc etc, as well as to reduce the number of memory locations read or written. Segments A list of segments headed by an embedded malloc_segment record representing the initial space. Address check support The least_addr field is the least address ever obtained from MORECORE or MMAP. Attempted frees and reallocs of any address less than this are trapped (unless INSECURE is defined). Magic tag A cross-check field that should always hold same value as mparams.magic. Flags Bits recording whether to use MMAP, locks, or contiguous MORECORE Statistics Each space keeps track of current and maximum system memory obtained via MORECORE or MMAP. Locking If USE_LOCKS is defined, the "mutex" lock is acquired and released around every public call using this mspace. */ /* Bin types, widths and sizes */ #define NSMALLBINS (32U) #define NTREEBINS (32U) #define SMALLBIN_SHIFT (3U) #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) #define TREEBIN_SHIFT (8U) #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { binmap_t smallmap; binmap_t treemap; size_t dvsize; size_t topsize; char* least_addr; mchunkptr dv; mchunkptr top; size_t trim_check; size_t magic; mchunkptr smallbins[(NSMALLBINS+1)*2]; tbinptr treebins[NTREEBINS]; size_t footprint; size_t max_footprint; flag_t mflags; #if USE_LOCKS MLOCK_T mutex; /* locate lock among fields that rarely change */ #endif /* USE_LOCKS */ msegment seg; }; typedef struct malloc_state* mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ /* malloc_params holds global properties, including those that can be dynamically set using mallopt. There is a single instance, mparams, initialized in init_mparams. */ struct malloc_params { size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; }; static struct malloc_params mparams; /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; #define gm (&_gm_) #define is_global(M) ((M) == &_gm_) #define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) #define set_lock(M,L)\ ((M)->mflags = (L)?\ ((M)->mflags | USE_LOCK_BIT) :\ ((M)->mflags & ~USE_LOCK_BIT)) /* page-align a size */ #define page_align(S)\ (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) /* granularity-align a size */ #define granularity_align(S)\ (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) #define is_page_aligned(S)\ (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) #define is_granularity_aligned(S)\ (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) /* True if segment S holds address A */ #define segment_holds(S, A)\ ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) /* Return segment holding given address */ static msegmentptr segment_holding(mstate m, char* addr) { msegmentptr sp = &m->seg; for (;;) { if (addr >= sp->base && addr < sp->base + sp->size) return sp; if ((sp = sp->next) == 0) return 0; } } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { msegmentptr sp = &m->seg; for (;;) { if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) return 1; if ((sp = sp->next) == 0) return 0; } } #ifndef MORECORE_CANNOT_TRIM #define should_trim(M,s) ((s) > (M)->trim_check) #else /* MORECORE_CANNOT_TRIM */ #define should_trim(M,s) (0) #endif /* MORECORE_CANNOT_TRIM */ /* TOP_FOOT_SIZE is padding at the end of a segment, including space that may be needed to place segment records and fenceposts when new noncontiguous segments are added. */ #define TOP_FOOT_SIZE\ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ /* PREACTION should be defined to return 0 on success, and nonzero on failure. If you are not using locking, you can redefine these to do anything you like. */ #if USE_LOCKS /* Ensure locks are initialized */ #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } #else /* USE_LOCKS */ #ifndef PREACTION #define PREACTION(M) (0) #endif /* PREACTION */ #ifndef POSTACTION #define POSTACTION(M) #endif /* POSTACTION */ #endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. USAGE_ERROR_ACTION is triggered on detected bad frees and reallocs. The argument p is an address that might have triggered the fault. It is ignored by the two predefined actions, but might be useful in custom actions that try to help diagnose errors. */ #if PROCEED_ON_ERROR /* A count of the number of corruption errors causing resets */ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) #define USAGE_ERROR_ACTION(m, p) #else /* PROCEED_ON_ERROR */ #ifndef CORRUPTION_ERROR_ACTION #define CORRUPTION_ERROR_ACTION(m) ABORT #endif /* CORRUPTION_ERROR_ACTION */ #ifndef USAGE_ERROR_ACTION #define USAGE_ERROR_ACTION(m,p) ABORT #endif /* USAGE_ERROR_ACTION */ #endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ #if ! DEBUG #define check_free_chunk(M,P) #define check_inuse_chunk(M,P) #define check_malloced_chunk(M,P,N) #define check_mmapped_chunk(M,P) #define check_malloc_state(M) #define check_top_chunk(M,P) #else /* DEBUG */ #define check_free_chunk(M,P) do_check_free_chunk(M,P) #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) #define check_top_chunk(M,P) do_check_top_chunk(M,P) #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); static void do_check_malloced_chunk(mstate m, void* mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); #endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) #define small_index(s) ((s) >> SMALLBIN_SHIFT) #define small_index2size(i) ((i) << SMALLBIN_SHIFT) #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) /* addressing by index. See above about smallbin repositioning */ #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) #define treebin_at(M,i) (&((M)->treebins[i])) /* assign tree index for size S to variable I */ #if defined(__GNUC__) && defined(i386) #define compute_tree_index(S, I)\ {\ size_t X = S >> TREEBIN_SHIFT;\ if (X == 0)\ I = 0;\ else if (X > 0xFFFF)\ I = NTREEBINS-1;\ else {\ unsigned int K;\ __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ }\ } #else /* GNUC */ #define compute_tree_index(S, I)\ {\ size_t X = S >> TREEBIN_SHIFT;\ if (X == 0)\ I = 0;\ else if (X > 0xFFFF)\ I = NTREEBINS-1;\ else {\ unsigned int Y = (unsigned int)X;\ unsigned int N = ((Y - 0x100) >> 16) & 8;\ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ N += K;\ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ K = 14 - N + ((Y <<= K) >> 15);\ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ }\ } #endif /* GNUC */ /* Bit representing maximum resolved size in a treebin at i */ #define bit_for_tree_index(i) \ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) /* Shift placing maximum resolved bit in a treebin at i as sign bit */ #define leftshift_for_tree_index(i) \ ((i == NTREEBINS-1)? 0 : \ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) /* The size of the smallest chunk held in bin with index i */ #define minsize_for_tree_index(i) \ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) /* ------------------------ Operations on bin maps ----------------------- */ /* bit corresponding to given index */ #define idx2bit(i) ((binmap_t)(1) << (i)) /* Mark/Clear bits with given index */ #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) /* index corresponding to given bit */ #if defined(__GNUC__) && defined(i386) #define compute_bit2idx(X, I)\ {\ unsigned int J;\ __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ I = (bindex_t)J;\ } #else /* GNUC */ #if USE_BUILTIN_FFS #define compute_bit2idx(X, I) I = ffs(X)-1 #else /* USE_BUILTIN_FFS */ #define compute_bit2idx(X, I)\ {\ unsigned int Y = X - 1;\ unsigned int K = Y >> (16-4) & 16;\ unsigned int N = K; Y >>= K;\ N += K = Y >> (8-3) & 8; Y >>= K;\ N += K = Y >> (4-2) & 4; Y >>= K;\ N += K = Y >> (2-1) & 2; Y >>= K;\ N += K = Y >> (1-0) & 1; Y >>= K;\ I = (bindex_t)(N + Y);\ } #endif /* USE_BUILTIN_FFS */ #endif /* GNUC */ /* isolate the least set bit of a bitmap */ #define least_bit(x) ((x) & -(x)) /* mask with all bits to left of least bit of x on */ #define left_bits(x) ((x<<1) | -(x<<1)) /* mask with all bits to left of or equal to least bit of x on */ #define same_or_left_bits(x) ((x) | -(x)) /* ----------------------- Runtime Check Support ------------------------- */ /* For security, the main invariant is that malloc/free/etc never writes to a static address other than malloc_state, unless static malloc_state itself has been corrupted, which cannot occur via malloc (because of these checks). In essence this means that we believe all pointers, sizes, maps etc held in malloc_state, but check all of those linked or offsetted from other embedded data structures. These checks are interspersed with main code in a way that tends to minimize their run-time cost. When FOOTERS is defined, in addition to range checking, we also verify footer fields of inuse chunks, which can be used guarantee that the mstate controlling malloc/free is intact. This is a streamlined version of the approach described by William Robertson et al in "Run-time Detection of Heap-based Overflows" LISA'03 http://www.usenix.org/events/lisa03/tech/robertson.html The footer of an inuse chunk holds the xor of its mstate and a random seed, that is checked upon calls to free() and realloc(). This is (probablistically) unguessable from outside the program, but can be computed by any code successfully malloc'ing any chunk, so does not itself provide protection against code that has already broken security through some other means. Unlike Robertson et al, we always dynamically check addresses of all offset chunks (previous, next, etc). This turns out to be cheaper than relying on hashes. */ #if !INSECURE /* Check if address a is at least as high as any from MORECORE or MMAP */ #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) /* Check if address of next chunk n is higher than base chunk p */ #define ok_next(p, n) ((char*)(p) < (char*)(n)) /* Check if p has its cinuse bit on */ #define ok_cinuse(p) cinuse(p) /* Check if p has its pinuse bit on */ #define ok_pinuse(p) pinuse(p) #else /* !INSECURE */ #define ok_address(M, a) (1) #define ok_next(b, n) (1) #define ok_cinuse(p) (1) #define ok_pinuse(p) (1) #endif /* !INSECURE */ #if (FOOTERS && !INSECURE) /* Check if (alleged) mstate m has expected magic field */ #define ok_magic(M) ((M)->magic == mparams.magic) #else /* (FOOTERS && !INSECURE) */ #define ok_magic(M) (1) #endif /* (FOOTERS && !INSECURE) */ /* In gcc, use __builtin_expect to minimize impact of checks */ #if !INSECURE #if defined(__GNUC__) && __GNUC__ >= 3 #define RTCHECK(e) __builtin_expect(e, 1) #else /* GNUC */ #define RTCHECK(e) (e) #endif /* GNUC */ #else /* !INSECURE */ #define RTCHECK(e) (1) #endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ #if !FOOTERS #define mark_inuse_foot(M,p,s) /* Set cinuse bit and pinuse bit of next chunk */ #define set_inuse(M,p,s)\ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ #define set_inuse_and_pinuse(M,p,s)\ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) /* Set size, cinuse and pinuse bit of this chunk */ #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) #else /* FOOTERS */ /* Set foot of inuse chunk to be xor of mstate and seed */ #define mark_inuse_foot(M,p,s)\ (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) #define get_mstate_for(p)\ ((mstate)(((mchunkptr)((char*)(p) +\ (chunksize(p))))->prev_foot ^ mparams.magic)) #define set_inuse(M,p,s)\ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ mark_inuse_foot(M,p,s)) #define set_inuse_and_pinuse(M,p,s)\ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ mark_inuse_foot(M,p,s)) #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ mark_inuse_foot(M, p, s)) #endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ /* Initialize mparams */ static int init_mparams(void) { if (mparams.page_size == 0) { size_t s; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; #if MORECORE_CONTIGUOUS mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; #else /* MORECORE_CONTIGUOUS */ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; #endif /* MORECORE_CONTIGUOUS */ #if (FOOTERS && !INSECURE) { #if USE_DEV_RANDOM int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { s = *((size_t *) buf); close(fd); } else #endif /* USE_DEV_RANDOM */ s = (size_t)(time(0) ^ (size_t)0x55555555U); s |= (size_t)8U; /* ensure nonzero */ s &= ~(size_t)7U; /* improve chances of fault for bad values */ } #else /* (FOOTERS && !INSECURE) */ s = (size_t)0x58585858U; #endif /* (FOOTERS && !INSECURE) */ ACQUIRE_MAGIC_INIT_LOCK(); if (mparams.magic == 0) { mparams.magic = s; /* Set up lock for main malloc area */ INITIAL_LOCK(&gm->mutex); gm->mflags = mparams.default_mflags; } RELEASE_MAGIC_INIT_LOCK(); #ifndef WIN32 mparams.page_size = malloc_getpagesize; mparams.granularity = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : mparams.page_size); #else /* WIN32 */ { SYSTEM_INFO system_info; GetSystemInfo(&system_info); mparams.page_size = system_info.dwPageSize; mparams.granularity = system_info.dwAllocationGranularity; } #endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. ints must be at least 4 bytes. alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ if ((sizeof(size_t) != sizeof(char*)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) ABORT; } return 0; } /* support for mallopt */ static int change_mparam(int param_number, int value) { size_t val = (size_t)value; init_mparams(); switch(param_number) { case M_TRIM_THRESHOLD: mparams.trim_threshold = val; return 1; case M_GRANULARITY: if (val >= mparams.page_size && ((val & (val-1)) == 0)) { mparams.granularity = val; return 1; } else return 0; case M_MMAP_THRESHOLD: mparams.mmap_threshold = val; return 1; default: return 0; } } #if DEBUG /* ------------------------- Debugging Support --------------------------- */ /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { msegmentptr sp = segment_holding(m, (char*)p); size_t sz = chunksize(p); assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!next_pinuse(p)); } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { do_check_any_chunk(m, p); assert(cinuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); if (is_mmapped(p)) do_check_mmapped_chunk(m, p); } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!cinuse(p)); assert(!next_pinuse(p)); assert (!is_mmapped(p)); if (p != m->dv && p != m->top) { if (sz >= MIN_CHUNK_SIZE) { assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); assert (next == m->top || cinuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); } else /* markers are always of size SIZE_T_SIZE */ assert(sz == SIZE_T_SIZE); } } /* Check properties of malloced chunks at the point they are malloced */ static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { if (mem != 0) { mchunkptr p = mem2chunk(mem); size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); } } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { tchunkptr head = 0; tchunkptr u = t; bindex_t tindex = t->index; size_t tsize = chunksize(t); bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); assert(!cinuse(u)); assert(!next_pinuse(u)); assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { assert(u->child[0] == 0); assert(u->child[1] == 0); } else { assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); assert (u->parent->child[0] == u || u->parent->child[1] == u || *((tbinptr*)(u->parent)) == u); if (u->child[0] != 0) { assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); } if (u->child[1] != 0) { assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); } if (u->child[0] != 0 && u->child[1] != 0) { assert(chunksize(u->child[0]) < chunksize(u->child[1])); } } u = u->fd; } while (u != t); assert(head != 0); } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { tbinptr* tb = treebin_at(m, i); tchunkptr t = *tb; int empty = (m->treemap & (1U << i)) == 0; if (t == 0) assert(empty); if (!empty) do_check_tree(m, t); } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { sbinptr b = smallbin_at(m, i); mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; if (p == b) assert(empty); if (!empty) { for (; p != b; p = p->bk) { size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); /* chunk belongs in bin */ assert(small_index(size) == i); assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); } } } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { size_t size = chunksize(x); if (is_small(size)) { bindex_t sidx = small_index(size); sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { mchunkptr p = b; do { if (p == x) return 1; } while ((p = p->fd) != b); } } else { bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { tchunkptr t = *treebin_at(m, tidx); size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; sizebits <<= 1; } if (t != 0) { tchunkptr u = t; do { if (u == (tchunkptr)x) return 1; } while ((u = u->fd) != t); } } } return 0; } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { size_t sum = 0; if (is_initialized(m)) { msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { sum += chunksize(q); if (cinuse(q)) { assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); } else { assert(q == m->dv || bin_find(m, q)); assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); } lastq = q; q = next_chunk(q); } s = s->next; } } return sum; } /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { bindex_t i; size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); } if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); assert(m->topsize == chunksize(m->top)); assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); } #endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; if (!PREACTION(m)) { check_malloc_state(m); if (is_initialized(m)) { size_t nfree = SIZE_T_ONE; /* top always free */ size_t mfree = m->topsize + TOP_FOOT_SIZE; size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { size_t sz = chunksize(q); sum += sz; if (!cinuse(q)) { mfree += sz; ++nfree; } q = next_chunk(q); } s = s->next; } nm.arena = sum; nm.ordblks = nfree; nm.hblkhd = m->footprint - sum; nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; } POSTACTION(m); } return nm; } #endif /* !NO_MALLINFO */ static void internal_malloc_stats(mstate m) { if (!PREACTION(m)) { size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { if (!cinuse(q)) used -= chunksize(q); q = next_chunk(q); } s = s->next; } } fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); POSTACTION(m); } } /* ----------------------- Operations on smallbins ----------------------- */ /* Various forms of linking and unlinking are defined as macros. Even the ones for trees, which are very long but have very short typical paths. This is ugly but reduces reliance on inlining support of compilers. */ /* Link a free chunk into a smallbin */ #define insert_small_chunk(M, P, S) {\ bindex_t I = small_index(S);\ mchunkptr B = smallbin_at(M, I);\ mchunkptr F = B;\ assert(S >= MIN_CHUNK_SIZE);\ if (!smallmap_is_marked(M, I))\ mark_smallmap(M, I);\ else if (RTCHECK(ok_address(M, B->fd)))\ F = B->fd;\ else {\ CORRUPTION_ERROR_ACTION(M);\ }\ B->fd = P;\ F->bk = P;\ P->fd = F;\ P->bk = B;\ } /* Unlink a chunk from a smallbin */ #define unlink_small_chunk(M, P, S) {\ mchunkptr F = P->fd;\ mchunkptr B = P->bk;\ bindex_t I = small_index(S);\ assert(P != B);\ assert(P != F);\ assert(chunksize(P) == small_index2size(I));\ if (F == B)\ clear_smallmap(M, I);\ else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ (B == smallbin_at(M,I) || ok_address(M, B)))) {\ F->bk = B;\ B->fd = F;\ }\ else {\ CORRUPTION_ERROR_ACTION(M);\ }\ } /* Unlink the first chunk from a smallbin */ #define unlink_first_small_chunk(M, B, P, I) {\ mchunkptr F = P->fd;\ assert(P != B);\ assert(P != F);\ assert(chunksize(P) == small_index2size(I));\ if (B == F)\ clear_smallmap(M, I);\ else if (RTCHECK(ok_address(M, F))) {\ B->fd = F;\ F->bk = B;\ }\ else {\ CORRUPTION_ERROR_ACTION(M);\ }\ } /* Replace dv node, binning the old one */ /* Used only when dvsize known to be small */ #define replace_dv(M, P, S) {\ size_t DVS = M->dvsize;\ if (DVS != 0) {\ mchunkptr DV = M->dv;\ assert(is_small(DVS));\ insert_small_chunk(M, DV, DVS);\ }\ M->dvsize = S;\ M->dv = P;\ } /* ------------------------- Operations on trees ------------------------- */ /* Insert chunk into tree */ #define insert_large_chunk(M, X, S) {\ tbinptr* H;\ bindex_t I;\ compute_tree_index(S, I);\ H = treebin_at(M, I);\ X->index = I;\ X->child[0] = X->child[1] = 0;\ if (!treemap_is_marked(M, I)) {\ mark_treemap(M, I);\ *H = X;\ X->parent = (tchunkptr)H;\ X->fd = X->bk = X;\ }\ else {\ tchunkptr T = *H;\ size_t K = S << leftshift_for_tree_index(I);\ for (;;) {\ if (chunksize(T) != S) {\ tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ K <<= 1;\ if (*C != 0)\ T = *C;\ else if (RTCHECK(ok_address(M, C))) {\ *C = X;\ X->parent = T;\ X->fd = X->bk = X;\ break;\ }\ else {\ CORRUPTION_ERROR_ACTION(M);\ break;\ }\ }\ else {\ tchunkptr F = T->fd;\ if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ T->fd = F->bk = X;\ X->fd = F;\ X->bk = T;\ X->parent = 0;\ break;\ }\ else {\ CORRUPTION_ERROR_ACTION(M);\ break;\ }\ }\ }\ }\ } /* Unlink steps: 1. If x is a chained node, unlink it from its same-sized fd/bk links and choose its bk node as its replacement. 2. If x was the last node of its size, but not a leaf node, it must be replaced with a leaf node (not merely one with an open left or right), to make sure that lefts and rights of descendents correspond properly to bit masks. We use the rightmost descendent of x. We could use any other leaf, but this is easy to locate and tends to counteract removal of leftmosts elsewhere, and so keeps paths shorter than minimally guaranteed. This doesn't loop much because on average a node in a tree is near the bottom. 3. If x is the base of a chain (i.e., has parent links) relink x's parent and children to x's replacement (or null if none). */ #define unlink_large_chunk(M, X) {\ tchunkptr XP = X->parent;\ tchunkptr R;\ if (X->bk != X) {\ tchunkptr F = X->fd;\ R = X->bk;\ if (RTCHECK(ok_address(M, F))) {\ F->bk = R;\ R->fd = F;\ }\ else {\ CORRUPTION_ERROR_ACTION(M);\ }\ }\ else {\ tchunkptr* RP;\ if (((R = *(RP = &(X->child[1]))) != 0) ||\ ((R = *(RP = &(X->child[0]))) != 0)) {\ tchunkptr* CP;\ while ((*(CP = &(R->child[1])) != 0) ||\ (*(CP = &(R->child[0])) != 0)) {\ R = *(RP = CP);\ }\ if (RTCHECK(ok_address(M, RP)))\ *RP = 0;\ else {\ CORRUPTION_ERROR_ACTION(M);\ }\ }\ }\ if (XP != 0) {\ tbinptr* H = treebin_at(M, X->index);\ if (X == *H) {\ if ((*H = R) == 0) \ clear_treemap(M, X->index);\ }\ else if (RTCHECK(ok_address(M, XP))) {\ if (XP->child[0] == X) \ XP->child[0] = R;\ else \ XP->child[1] = R;\ }\ else\ CORRUPTION_ERROR_ACTION(M);\ if (R != 0) {\ if (RTCHECK(ok_address(M, R))) {\ tchunkptr C0, C1;\ R->parent = XP;\ if ((C0 = X->child[0]) != 0) {\ if (RTCHECK(ok_address(M, C0))) {\ R->child[0] = C0;\ C0->parent = R;\ }\ else\ CORRUPTION_ERROR_ACTION(M);\ }\ if ((C1 = X->child[1]) != 0) {\ if (RTCHECK(ok_address(M, C1))) {\ R->child[1] = C1;\ C1->parent = R;\ }\ else\ CORRUPTION_ERROR_ACTION(M);\ }\ }\ else\ CORRUPTION_ERROR_ACTION(M);\ }\ }\ } /* Relays to large vs small bin operations */ #define insert_chunk(M, P, S)\ if (is_small(S)) insert_small_chunk(M, P, S)\ else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } #define unlink_chunk(M, P, S)\ if (is_small(S)) unlink_small_chunk(M, P, S)\ else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } /* Relays to internal calls to malloc/free from realloc, memalign etc */ #if ONLY_MSPACES #define internal_malloc(m, b) mspace_malloc(m, b) #define internal_free(m, mem) mspace_free(m,mem); #else /* ONLY_MSPACES */ #if MSPACES #define internal_malloc(m, b)\ (m == gm)? dlmalloc(b) : mspace_malloc(m, b) #define internal_free(m, mem)\ if (m == gm) dlfree(mem); else mspace_free(m,mem); #else /* MSPACES */ #define internal_malloc(m, b) dlmalloc(b) #define internal_free(m, mem) dlfree(mem) #endif /* MSPACES */ #endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ /* Directly mmapped chunks are set up with an offset to the start of the mmapped region stored in the prev_foot field of the chunk. This allows reconstruction of the required argument to MUNMAP when freed, and also allows adjustment of the returned chunk to meet alignment requirements (especially in memalign). There is also enough space allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain the PINUSE bit so frees can be checked. */ /* Malloc using mmap */ static void* mmap_alloc(mstate m, size_t nb) { size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (mmsize > nb) { /* Check for wrap around 0 */ char* mm = (char*)(DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { size_t offset = align_offset(chunk2mem(mm)); size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset | IS_MMAPPED_BIT; (p)->head = (psize|CINUSE_BIT); mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; if (mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); } } return 0; } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { size_t oldsize = chunksize(oldp); if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); char* cp = (char*)CALL_MREMAP((char*)oldp - offset, oldmmsize, newmmsize, 1); if (cp != CMFAIL) { mchunkptr newp = (mchunkptr)(cp + offset); size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = (psize|CINUSE_BIT); mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; } } return 0; } /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); p = (mchunkptr)((char*)p + offset); psize -= offset; m->top = p; m->topsize = psize; p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; m->trim_check = mparams.trim_threshold; /* reset on each update */ } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { sbinptr bin = smallbin_at(m,i); bin->fd = bin->bk = bin; } } #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ m->smallbins = m->treebins = 0; m->dvsize = m->topsize = 0; m->seg.base = 0; m->seg.size = 0; m->seg.next = 0; m->top = m->dv = 0; for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); } #endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ static void* prepend_alloc(mstate m, char* newbase, char* oldbase, size_t nb) { mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); size_t psize = (char*)oldfirst - (char*)p; mchunkptr q = chunk_plus_offset(p, nb); size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); assert((char*)oldfirst > (char*)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); } else if (oldfirst == m->dv) { size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); } else { if (!cinuse(oldfirst)) { size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; } set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); } /* Add a segment to hold a new noncontiguous region */ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Determine locations and sizes of segment, fenceposts, old top */ char* old_top = (char*)m->top; msegmentptr oldsp = segment_holding(m, old_top); char* old_end = oldsp->base + oldsp->size; size_t ssize = pad_request(sizeof(struct malloc_segment)); char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); size_t offset = align_offset(chunk2mem(rawsp)); char* asp = rawsp + offset; char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); mchunkptr tnext = chunk_plus_offset(sp, ssize); mchunkptr p = tnext; int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; m->seg.next = ss; /* Insert trailing fenceposts */ for (;;) { mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; if ((char*)(&(nextp->head)) < old_end) p = nextp; else break; } assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { mchunkptr q = (mchunkptr)old_top; size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); } check_top_chunk(m, m->top); } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ static void* sys_alloc(mstate m, size_t nb) { char* tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; init_mparams(); /* Directly map large chunks */ if (use_mmap(m) && nb >= mparams.mmap_threshold) { void* mem = mmap_alloc(m, nb); if (mem != 0) return mem; } /* Try getting memory in any of three ways (in most-preferred to least-preferred order): 1. A call to MORECORE that can normally contiguously extend memory. (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or or main space is mmapped or a previous contiguous call failed) 2. A call to MMAP new space (disabled if not HAVE_MMAP). Note that under the default settings, if MORECORE is unable to fulfill a request, and HAVE_MMAP is true, then mmap is used as a noncontiguous system allocator. This is a useful backup strategy for systems with holes in address spaces -- in this case sbrk cannot contiguously expand the heap, but mmap may be able to find space. 3. A call to MORECORE that cannot usually contiguously extend memory. (disabled if not HAVE_MORECORE) */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { char* br = CMFAIL; msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); size_t asize = 0; ACQUIRE_MORECORE_LOCK(); if (ss == 0) { /* First time through or recovery */ char* base = (char*)CALL_MORECORE(0); if (base != CMFAIL) { asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) asize += (page_align((size_t)base) - (size_t)base); /* Can't call MORECORE if size is negative when treated as signed */ if (asize < HALF_MAX_SIZE_T && (br = (char*)(CALL_MORECORE(asize))) == base) { tbase = base; tsize = asize; } } } else { /* Subtract out existing available top space from MORECORE request. */ asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); /* Use mem here only if it did continuously extend old space */ if (asize < HALF_MAX_SIZE_T && (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { tbase = br; tsize = asize; } } if (tbase == CMFAIL) { /* Cope with partial failure */ if (br != CMFAIL) { /* Try to use/extend the space we did get */ if (asize < HALF_MAX_SIZE_T && asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); if (esize < HALF_MAX_SIZE_T) { char* end = (char*)CALL_MORECORE(esize); if (end != CMFAIL) asize += esize; else { /* Can't use; try to release */ CALL_MORECORE(-asize); br = CMFAIL; } } } } if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = asize; } else disable_contiguous(m); /* Don't try contiguous path in the future */ } RELEASE_MORECORE_LOCK(); } if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; size_t rsize = granularity_align(req); if (rsize > nb) { /* Fail if wraps around zero */ char* mp = (char*)(CALL_MMAP(rsize)); if (mp != CMFAIL) { tbase = mp; tsize = rsize; mmap_flag = IS_MMAPPED_BIT; } } } if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); if (asize < HALF_MAX_SIZE_T) { char* br = CMFAIL; char* end = CMFAIL; ACQUIRE_MORECORE_LOCK(); br = (char*)(CALL_MORECORE(asize)); end = (char*)(CALL_MORECORE(0)); RELEASE_MORECORE_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { tbase = br; tsize = ssize; } } } } if (tbase != CMFAIL) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; if (!is_initialized(m)) { /* first-time initialization */ m->seg.base = m->least_addr = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; m->magic = mparams.magic; init_bins(m); if (is_global(m)) init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); else { /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); } } else { /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; while (sp != 0 && tbase != sp->base + sp->size) sp = sp->next; if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); } else { if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = sp->next; if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { char* oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); } else add_segment(m, tbase, tsize, mmap_flag); } } if (nb < m->topsize) { /* Allocate from new or extended top space */ size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; set_size_and_pinuse_of_inuse_chunk(m, p, nb); check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); } } MALLOC_FAILURE_ACTION; return 0; } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { size_t released = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { char* base = sp->base; size_t size = sp->size; msegmentptr next = sp->next; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { mchunkptr p = align_as_chunk(base); size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { tchunkptr tp = (tchunkptr)p; assert(segment_holds(sp, (char*)sp)); if (p == m->dv) { m->dv = 0; m->dvsize = 0; } else { unlink_large_chunk(m, tp); } if (CALL_MUNMAP(base, size) == 0) { released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; } else { /* back out if cannot unmap */ insert_large_chunk(m, tp, psize); } } } pred = sp; sp = next; } return released; } static int sys_trim(mstate m, size_t pad) { size_t released = 0; if (pad < MAX_REQUEST && is_initialized(m)) { pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; msegmentptr sp = segment_holding(m, (char*)m->top); if (!is_extern_segment(sp)) { if (is_mmapped_segment(sp)) { if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { released = extra; } } } else if (HAVE_MORECORE) { if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MORECORE_LOCK(); { /* Make sure end of memory is where we last set it. */ char* old_br = (char*)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { char* rel_br = (char*)(CALL_MORECORE(-extra)); char* new_br = (char*)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; } } RELEASE_MORECORE_LOCK(); } } if (released != 0) { sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); } } /* Unmap any unused mmapped segments */ if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ if (released == 0) m->trim_check = MAX_SIZE_T; } return (released != 0)? 1 : 0; } /* ---------------------------- malloc support --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ static void* tmalloc_large(mstate m, size_t nb) { tchunkptr v = 0; size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { /* Traverse tree for this bin looking for node with size == nb */ size_t sizebits = nb << leftshift_for_tree_index(idx); tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { tchunkptr rt; size_t trem = chunksize(t) - nb; if (trem < rsize) { v = t; if ((rsize = trem) == 0) break; } rt = t->child[1]; t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; if (rt != 0 && rt != t) rst = rt; if (t == 0) { t = rst; /* set t to least subtree holding sizes > nb */ break; } sizebits <<= 1; } } if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); } } while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { rsize = trem; v = t; } t = leftmost_child(t); } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); } return chunk2mem(v); } } CORRUPTION_ERROR_ACTION(m); } return 0; } /* allocate a small request from the best fitting chunk in a treebin */ static void* tmalloc_small(mstate m, size_t nb) { tchunkptr t, v; size_t rsize; bindex_t i; binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { size_t trem = chunksize(t) - nb; if (trem < rsize) { rsize = trem; v = t; } } if (RTCHECK(ok_address(m, v))) { mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); } return chunk2mem(v); } } CORRUPTION_ERROR_ACTION(m); return 0; } /* --------------------------- realloc support --------------------------- */ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { if (bytes >= MAX_REQUEST) { MALLOC_FAILURE_ACTION; return 0; } if (!PREACTION(m)) { mchunkptr oldp = mem2chunk(oldmem); size_t oldsize = chunksize(oldp); mchunkptr next = chunk_plus_offset(oldp, oldsize); mchunkptr newp = 0; void* extra = 0; /* Try to either shrink or extend into top. Else malloc-copy-free */ if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && ok_next(oldp, next) && ok_pinuse(next))) { size_t nb = request2size(bytes); if (is_mmapped(oldp)) newp = mmap_resize(m, oldp, nb); else if (oldsize >= nb) { /* already big enough */ size_t rsize = oldsize - nb; newp = oldp; if (rsize >= MIN_CHUNK_SIZE) { mchunkptr remainder = chunk_plus_offset(newp, nb); set_inuse(m, newp, nb); set_inuse(m, remainder, rsize); extra = chunk2mem(remainder); } } else if (next == m->top && oldsize + m->topsize > nb) { /* Expand into top */ size_t newsize = oldsize + m->topsize; size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(oldp, nb); set_inuse(m, oldp, nb); newtop->head = newtopsize |PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = oldp; } } else { USAGE_ERROR_ACTION(m, oldmem); POSTACTION(m); return 0; } POSTACTION(m); if (newp != 0) { if (extra != 0) { internal_free(m, extra); } check_inuse_chunk(m, newp); return chunk2mem(newp); } else { void* newmem = internal_malloc(m, bytes); if (newmem != 0) { size_t oc = oldsize - overhead_for(oldp); memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); internal_free(m, oldmem); } return newmem; } } return 0; } /* --------------------------- memalign support -------------------------- */ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ return internal_malloc(m, bytes); if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; while (a < alignment) a <<= 1; alignment = a; } if (bytes >= MAX_REQUEST - alignment) { if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; } } else { size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; char* mem = (char*)internal_malloc(m, req); if (mem != 0) { void* leader = 0; void* trailer = 0; mchunkptr p = mem2chunk(mem); if (PREACTION(m)) return 0; if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if the first calculation places us at a spot with less than MIN_CHUNK_SIZE leader, we can move to the next aligned spot. We've allocated enough total room so that this is always possible. */ char* br = (char*)mem2chunk((size_t)(((size_t)(mem + alignment - SIZE_T_ONE)) & -alignment)); char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? br : br+alignment; mchunkptr newp = (mchunkptr)pos; size_t leadsize = pos - (char*)(p); size_t newsize = chunksize(p) - leadsize; if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = (newsize|CINUSE_BIT); } else { /* Otherwise, give back leader, use the rest */ set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); leader = chunk2mem(p); } p = newp; } /* Give back spare room at the end */ if (!is_mmapped(p)) { size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); trailer = chunk2mem(remainder); } } assert (chunksize(p) >= nb); assert((((size_t)(chunk2mem(p))) % alignment) == 0); check_inuse_chunk(m, p); POSTACTION(m); if (leader != 0) { internal_free(m, leader); } if (trailer != 0) { internal_free(m, trailer); } return chunk2mem(p); } } return 0; } /* ------------------------ comalloc/coalloc support --------------------- */ static void** ialloc(mstate m, size_t n_elements, size_t* sizes, int opts, void* chunks[]) { /* This provides common support for independent_X routines, handling all of the combinations that can result. The opts arg has: bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ size_t element_size; /* chunksize of each element, if all same */ size_t contents_size; /* total size of elements */ size_t array_size; /* request size of pointer array */ void* mem; /* malloced aggregate space */ mchunkptr p; /* corresponding chunk */ size_t remainder_size; /* remaining bytes while splitting */ void** marray; /* either "chunks" or malloced ptr array */ mchunkptr array_chunk; /* chunk for malloced ptr array */ flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; /* compute array length, if needed */ if (chunks != 0) { if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; } else { /* if empty req, must still return chunk representing empty array */ if (n_elements == 0) return (void**)internal_malloc(m, 0); marray = 0; array_size = request2size(n_elements * (sizeof(void*))); } /* compute total element size */ if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; } else { /* add up all the sizes */ element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); } size = contents_size + array_size; /* Allocate the aggregate chunk. First disable direct-mmapping so malloc won't use it, since we would not be able to later free/realloc space internal to a segregated mmap region. */ was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); if (was_enabled) enable_mmap(m); if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); remainder_size = chunksize(p); assert(!is_mmapped(p)); if (opts & 0x2) { /* optionally clear the elements */ memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; marray = (void**) (chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; } /* split out elements */ for (i = 0; ; ++i) { marray[i] = chunk2mem(p); if (i != n_elements-1) { if (element_size != 0) size = element_size; else size = request2size(sizes[i]); remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); } else { /* the final element absorbs any overallocation slop */ set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; } } #if DEBUG if (marray != chunks) { /* final element must have exactly exhausted chunk */ if (element_size != 0) { assert(remainder_size == element_size); } else { assert(remainder_size == request2size(sizes[i])); } check_inuse_chunk(m, mem2chunk(marray)); } for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); #endif /* DEBUG */ POSTACTION(m); return marray; } /* -------------------------- public routines ---------------------------- */ #if !ONLY_MSPACES void* dlmalloc(size_t bytes) { /* Basic algorithm: If a small request (< 256 bytes minus per-chunk overhead): 1. If one exists, use a remainderless chunk in associated smallbin. (Remainderless means that there are too few excess bytes to represent as a chunk.) 2. If it is big enough, use the dv chunk, which is normally the chunk adjacent to the one used for the most recent small request. 3. If one exists, split the smallest available chunk in a bin, saving remainder in dv. 4. If it is big enough, use the top chunk. 5. If available, get memory from system and use it Otherwise, for a large request: 1. Find the smallest available binned chunk that fits, and use it if it is better fitting than dv chunk, splitting if necessary. 2. If better fitting than any binned chunk, use the dv chunk. 3. If it is big enough, use the top chunk. 4. If request size >= mmap threshold, try to directly mmap this chunk. 5. If available, get memory from system and use it The ugly goto's here ensure that postaction occurs along all paths. */ if (!PREACTION(gm)) { void* mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { bindex_t idx; binmap_t smallbits; nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); unlink_first_small_chunk(gm, b, p, idx); set_inuse_and_pinuse(gm, p, small_index2size(idx)); mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; } else if (nb > gm->dvsize) { if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; size_t rsize; bindex_t i; binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; assert(chunksize(p) == small_index2size(i)); unlink_first_small_chunk(gm, b, p, i); rsize = small_index2size(i) - nb; /* Fit here cannot be remainderless if 4byte sizes */ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); } mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { check_malloced_chunk(gm, mem, nb); goto postaction; } } } else if (bytes >= MAX_REQUEST) nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { check_malloced_chunk(gm, mem, nb); goto postaction; } } if (nb <= gm->dvsize) { size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); } else { /* exhaust dv */ size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); } mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; } else if (nb < gm->topsize) { /* Split top */ size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; set_size_and_pinuse_of_inuse_chunk(gm, p, nb); mem = chunk2mem(p); check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; } mem = sys_alloc(gm, nb); postaction: POSTACTION(gm); return mem; } return 0; } void dlfree(void* mem) { /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed with special cases for top, dv, mmapped chunks, and usage errors. */ if (mem != 0) { mchunkptr p = mem2chunk(mem); #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { USAGE_ERROR_ACTION(fm, p); return; } #else /* FOOTERS */ #define fm gm #endif /* FOOTERS */ if (!PREACTION(fm)) { check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { size_t prevsize = p->prev_foot; if ((prevsize & IS_MMAPPED_BIT) != 0) { prevsize &= ~IS_MMAPPED_BIT; psize += prevsize + MMAP_FOOT_PAD; if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; } else { mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { unlink_chunk(fm, p, prevsize); } else if ((next->head & INUSE_BITS) == INUSE_BITS) { fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; } } else goto erroraction; } } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { fm->dv = 0; fm->dvsize = 0; } if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; } else if (next == fm->dv) { size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; } else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { fm->dvsize = psize; goto postaction; } } } else set_free_with_pinuse(p, psize, next); insert_chunk(fm, p, psize); check_free_chunk(fm, p); goto postaction; } } erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); } } #if !FOOTERS #undef fm #endif /* FOOTERS */ } void* dlcalloc(size_t n_elements, size_t elem_size) { void* mem; size_t req = 0; if (n_elements != 0) { req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) req = MAX_SIZE_T; /* force downstream failure on overflow */ } mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) memset(mem, 0, req); return mem; } void* dlrealloc(void* oldmem, size_t bytes) { if (oldmem == 0) return dlmalloc(bytes); #ifdef REALLOC_ZERO_BYTES_FREES if (bytes == 0) { dlfree(oldmem); return 0; } #endif /* REALLOC_ZERO_BYTES_FREES */ else { #if ! FOOTERS mstate m = gm; #else /* FOOTERS */ mstate m = get_mstate_for(mem2chunk(oldmem)); if (!ok_magic(m)) { USAGE_ERROR_ACTION(m, oldmem); return 0; } #endif /* FOOTERS */ return internal_realloc(m, oldmem, bytes); } } void* dlmemalign(size_t alignment, size_t bytes) { return internal_memalign(gm, alignment, bytes); } void** dlindependent_calloc(size_t n_elements, size_t elem_size, void* chunks[]) { size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); } void** dlindependent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]) { return ialloc(gm, n_elements, sizes, 0, chunks); } void* dlvalloc(size_t bytes) { size_t pagesz; init_mparams(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); } void* dlpvalloc(size_t bytes) { size_t pagesz; init_mparams(); pagesz = mparams.page_size; return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); } int dlmalloc_trim(size_t pad) { int result = 0; if (!PREACTION(gm)) { result = sys_trim(gm, pad); POSTACTION(gm); } return result; } size_t dlmalloc_footprint(void) { return gm->footprint; } size_t dlmalloc_max_footprint(void) { return gm->max_footprint; } #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { return internal_mallinfo(gm); } #endif /* NO_MALLINFO */ void dlmalloc_stats() { internal_malloc_stats(gm); } size_t dlmalloc_usable_size(void* mem) { if (mem != 0) { mchunkptr p = mem2chunk(mem); if (cinuse(p)) return chunksize(p) - overhead_for(p); } return 0; } int dlmallopt(int param_number, int value) { return change_mparam(param_number, value); } #endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ #if MSPACES static mstate init_user_mstate(char* tbase, size_t tsize) { size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); mstate m = (mstate)(chunk2mem(msp)); memset(m, 0, msize); INITIAL_LOCK(&m->mutex); msp->head = (msize|PINUSE_BIT|CINUSE_BIT); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; m->mflags = mparams.default_mflags; disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; } mspace create_mspace(size_t capacity, int locked) { mstate m = 0; size_t msize = pad_request(sizeof(struct malloc_state)); init_mparams(); /* Ensure pagesize etc initialized */ if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { size_t rs = ((capacity == 0)? mparams.granularity : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); char* tbase = (char*)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { m = init_user_mstate(tbase, tsize); m->seg.sflags = IS_MMAPPED_BIT; set_lock(m, locked); } } return (mspace)m; } mspace create_mspace_with_base(void* base, size_t capacity, int locked) { mstate m = 0; size_t msize = pad_request(sizeof(struct malloc_state)); init_mparams(); /* Ensure pagesize etc initialized */ if (capacity > msize + TOP_FOOT_SIZE && capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { m = init_user_mstate((char*)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); } return (mspace)m; } size_t destroy_mspace(mspace msp) { size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { msegmentptr sp = &ms->seg; while (sp != 0) { char* base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; sp = sp->next; if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; } } else { USAGE_ERROR_ACTION(ms,ms); } return freed; } /* mspace versions of routines are near-clones of the global versions. This is not so nice but better than the alternatives. */ void* mspace_malloc(mspace msp, size_t bytes) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } if (!PREACTION(ms)) { void* mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { bindex_t idx; binmap_t smallbits; nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); unlink_first_small_chunk(ms, b, p, idx); set_inuse_and_pinuse(ms, p, small_index2size(idx)); mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; } else if (nb > ms->dvsize) { if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; size_t rsize; bindex_t i; binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; assert(chunksize(p) == small_index2size(i)); unlink_first_small_chunk(ms, b, p, i); rsize = small_index2size(i) - nb; /* Fit here cannot be remainderless if 4byte sizes */ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); } mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { check_malloced_chunk(ms, mem, nb); goto postaction; } } } else if (bytes >= MAX_REQUEST) nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { check_malloced_chunk(ms, mem, nb); goto postaction; } } if (nb <= ms->dvsize) { size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); } else { /* exhaust dv */ size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); } mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; } else if (nb < ms->topsize) { /* Split top */ size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; set_size_and_pinuse_of_inuse_chunk(ms, p, nb); mem = chunk2mem(p); check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; } mem = sys_alloc(ms, nb); postaction: POSTACTION(ms); return mem; } return 0; } void mspace_free(mspace msp, void* mem) { if (mem != 0) { mchunkptr p = mem2chunk(mem); #if FOOTERS mstate fm = get_mstate_for(p); #else /* FOOTERS */ mstate fm = (mstate)msp; #endif /* FOOTERS */ if (!ok_magic(fm)) { USAGE_ERROR_ACTION(fm, p); return; } if (!PREACTION(fm)) { check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { size_t prevsize = p->prev_foot; if ((prevsize & IS_MMAPPED_BIT) != 0) { prevsize &= ~IS_MMAPPED_BIT; psize += prevsize + MMAP_FOOT_PAD; if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; } else { mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { unlink_chunk(fm, p, prevsize); } else if ((next->head & INUSE_BITS) == INUSE_BITS) { fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; } } else goto erroraction; } } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { fm->dv = 0; fm->dvsize = 0; } if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; } else if (next == fm->dv) { size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; } else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { fm->dvsize = psize; goto postaction; } } } else set_free_with_pinuse(p, psize, next); insert_chunk(fm, p, psize); check_free_chunk(fm, p); goto postaction; } } erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); } } } void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { void* mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } if (n_elements != 0) { req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) req = MAX_SIZE_T; /* force downstream failure on overflow */ } mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) memset(mem, 0, req); return mem; } void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { if (oldmem == 0) return mspace_malloc(msp, bytes); #ifdef REALLOC_ZERO_BYTES_FREES if (bytes == 0) { mspace_free(msp, oldmem); return 0; } #endif /* REALLOC_ZERO_BYTES_FREES */ else { #if FOOTERS mchunkptr p = mem2chunk(oldmem); mstate ms = get_mstate_for(p); #else /* FOOTERS */ mstate ms = (mstate)msp; #endif /* FOOTERS */ if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } return internal_realloc(ms, oldmem, bytes); } } void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } return internal_memalign(ms, alignment, bytes); } void** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void* chunks[]) { size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } return ialloc(ms, n_elements, &sz, 3, chunks); } void** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void* chunks[]) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); return 0; } return ialloc(ms, n_elements, sizes, 0, chunks); } int mspace_trim(mspace msp, size_t pad) { int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { if (!PREACTION(ms)) { result = sys_trim(ms, pad); POSTACTION(ms); } } else { USAGE_ERROR_ACTION(ms,ms); } return result; } void mspace_malloc_stats(mspace msp) { mstate ms = (mstate)msp; if (ok_magic(ms)) { internal_malloc_stats(ms); } else { USAGE_ERROR_ACTION(ms,ms); } } size_t mspace_footprint(mspace msp) { size_t result; mstate ms = (mstate)msp; if (ok_magic(ms)) { result = ms->footprint; } USAGE_ERROR_ACTION(ms,ms); return result; } size_t mspace_max_footprint(mspace msp) { size_t result; mstate ms = (mstate)msp; if (ok_magic(ms)) { result = ms->max_footprint; } USAGE_ERROR_ACTION(ms,ms); return result; } #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms,ms); } return internal_mallinfo(ms); } #endif /* NO_MALLINFO */ int mspace_mallopt(int param_number, int value) { return change_mparam(param_number, value); } #endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ /* Guidelines for creating a custom version of MORECORE: * For best performance, MORECORE should allocate in multiples of pagesize. * MORECORE may allocate more memory than requested. (Or even less, but this will usually result in a malloc failure.) * MORECORE must not allocate memory when given argument zero, but instead return one past the end address of memory from previous nonzero call. * For best performance, consecutive calls to MORECORE with positive arguments should return increasing addresses, indicating that space has been contiguously extended. * Even though consecutive calls to MORECORE need not return contiguous addresses, it must be OK for malloc'ed chunks to span multiple regions in those cases where they do happen to be contiguous. * MORECORE need not handle negative arguments -- it may instead just return MFAIL when given negative arguments. Negative arguments are always multiples of pagesize. MORECORE must not misinterpret negative args as large positive unsigned args. You can suppress all such calls from even occurring by defining MORECORE_CANNOT_TRIM, As an example alternative MORECORE, here is a custom allocator kindly contributed for pre-OSX macOS. It uses virtually but not necessarily physically contiguous non-paged memory (locked in, present and won't get swapped out). You can use it by uncommenting this section, adding some #includes, and setting up the appropriate defines above: #define MORECORE osMoreCore There is also a shutdown routine that should somehow be called for cleanup upon program exit. #define MAX_POOL_ENTRIES 100 #define MINIMUM_MORECORE_SIZE (64 * 1024U) static int next_os_pool; void *our_os_pools[MAX_POOL_ENTRIES]; void *osMoreCore(int size) { void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { return (void *) MFAIL; } // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; } else if (size < 0) { // we don't currently support shrink behavior return (void *) MFAIL; } else { return sbrk_top; } } // cleanup any allocated memory pools // called as last thing before shutting down driver void osCleanupMem(void) { void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { PoolDeallocate(*ptr); *ptr = 0; } } */ /* ----------------------------------------------------------------------- History: V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) * Add max_footprint functions * Ensure all appropriate literals are size_t * Fix conditional compilation problem for some #define settings * Avoid concatenating segments with the one provided in create_mspace_with_base * Rename some variables to avoid compiler shadowing warnings * Use explicit lock initialization. * Better handling of sbrk interference. * Simplify and fix segment insertion, trimming and mspace_destroy * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x * Thanks especially to Dennis Flanagan for help on these. V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) * Fix memalign brace error. V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) * Fix improper #endif nesting in C++ * Add explicit casts needed for C++ V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) * Use trees for large bins * Support mspaces * Use segments to unify sbrk-based and mmap-based system allocation, removing need for emulation on most platforms without sbrk. * Default safety checks * Optional footer checks. Thanks to William Robertson for the idea. * Internal code refactoring * Incorporate suggestions and platform-specific changes. Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, Aaron Bachmann, Emery Berger, and others. * Speed up non-fastbin processing enough to remove fastbins. * Remove useless cfree() to avoid conflicts with other apps. * Remove internal memcpy, memset. Compilers handle builtins better. * Remove some options that no one ever used and rename others. V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) * Fix malloc_state bitmap array misdeclaration V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) * Allow tuning of FIRST_SORTED_BIN_SIZE * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. * Better detection and support for non-contiguousness of MORECORE. Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger * Bypass most of malloc if no frees. Thanks To Emery Berger. * Fix freeing of old top non-contiguous chunk im sysmalloc. * Raised default trim and map thresholds to 256K. * Fix mmap-related #defines. Thanks to Lubos Lunak. * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. * Branch-free bin calculation * Default trim and mmap thresholds now 256K. V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) * Introduce independent_comalloc and independent_calloc. Thanks to Michael Pachos for motivation and help. * Make optional .h file available * Allow > 2GB requests on 32bit systems. * new WIN32 sbrk, mmap, munmap, lock code from <[email protected]>. Thanks also to Andreas Mueller <a.mueller at paradatec.de>, and Anonymous. * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for helping test this.) * memalign: check alignment arg * realloc: don't try to shift chunks backwards, since this leads to more fragmentation in some programs and doesn't seem to help in any others. * Collect all cases in malloc requiring system memory into sysmalloc * Use mmap as backup to sbrk * Place all internal state in malloc_state * Introduce fastbins (although similar to 2.5.1) * Many minor tunings and cosmetic improvements * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS Thanks to Tony E. Bennett <[email protected]> and others. * Include errno.h to support default failure action. V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) * return null for negative arguments * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' (e.g. WIN32 platforms) * Cleanup header file inclusion for WIN32 platforms * Cleanup code to avoid Microsoft Visual C++ compiler complaints * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing memory allocation routines * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to usage of 'assert' in non-WIN32 code * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to avoid infinite loop * Always call 'fREe()' rather than 'free()' V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) * Fixed ordering problem with boundary-stamping V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) * Added pvalloc, as recommended by H.J. Liu * Added 64bit pointer support mainly from Wolfram Gloger * Added anonymously donated WIN32 sbrk emulation * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen * malloc_extend_top: fix mask error that caused wastage after foreign sbrks * Add linux mremap support code from HJ Liu V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) * Integrated most documentation with the code. * Add support for mmap, with help from Wolfram Gloger ([email protected]). * Use last_remainder in more cases. * Pack bins using idea from [email protected] * Use ordered bins instead of best-fit threshhold * Eliminate block-local decls to simplify tracing and debugging. * Support another case of realloc via move into top * Fix error occuring when initial sbrk_base not word-aligned. * Rely on page size for units instead of SBRK_UNIT to avoid surprises about sbrk alignment conventions. * Add mallinfo, mallopt. Thanks to Raymond Nijssen ([email protected]) for the suggestion. * Add `pad' argument to malloc_trim and top_pad mallopt parameter. * More precautions for cases where other routines call sbrk, courtesy of Wolfram Gloger ([email protected]). * Added macros etc., allowing use in linux libc from H.J. Lu ([email protected]) * Inverted this history list V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) * Re-tuned and fixed to behave more nicely with V2.6.0 changes. * Removed all preallocation code since under current scheme the work required to undo bad preallocations exceeds the work saved in good cases for most test programs. * No longer use return list or unconsolidated bins since no scheme using them consistently outperforms those that don't given above changes. * Use best fit for very large chunks to prevent some worst-cases. * Added some support for debugging V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) * Removed footers when chunks are in use. Thanks to Paul Wilson ([email protected]) for the suggestion. V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) * Added malloc_trim, with help from Wolfram Gloger ([email protected]). V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) * realloc: try to expand in both directions * malloc: swap order of clean-bin strategy; * realloc: only conditionally expand backwards * Try not to scavenge used bins * Use bin counts as a guide to preallocation * Occasionally bin return list chunks in first scan * Add a few optimizations from [email protected] V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) * faster bin computation & slightly different binning * merged all consolidations to one part of malloc proper (eliminating old malloc_find_space & malloc_clean_bin) * Scan 2 returns chunks (not just 1) * Propagate failure in realloc if malloc returns 0 * Add stuff to allow compilation on non-ANSI compilers from [email protected] V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) * removed potential for odd address access in prev_chunk * removed dependency on getpagesize.h * misc cosmetics and a bit more internal documentation * anticosmetics: mangled names in macros to evade debugger strangeness * tested on sparc, hp-700, dec-mips, rs6000 with gcc & native cc (hp, dec only) allowing Detlefs & Zorn comparison study (in SIGPLAN Notices.) Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) * Based loosely on libg++-1.2X malloc. (It retains some of the overall structure of old version, but most details differ.) */
the_stack_data/86750.c
#include <stdlib.h> int main() { unsigned int a = 1, b = 1, i = 0, temp = 0; for(i = 0; i < 20; i++) { temp = a; a = b; b += temp; } return 0; }
the_stack_data/1056907.c
#include <stdio.h> #include <stdlib.h> int main(int argc, char **argv) { if (argc < 2) return 0; FILE *fp; char buf[255]; size_t ret; fp = fopen(argv[1], "rb"); if (!fp) { return 0; } int len = 20; fgets(buf, len, fp); buf[len] = '\0'; int num = atoi(buf); if (num == 0x7eadbeef) { printf("hey, you hit it \n"); abort(); } return 0; }
the_stack_data/168893316.c
extern void __VERIFIER_error() __attribute__ ((__noreturn__)); extern int __VERIFIER_nondet_int(); void foo() { __VERIFIER_error(); } int main() { void *p = 0; int x = __VERIFIER_nondet_int() % 2; // TODO: Goblint can't handle (unsigned) __VERIFIER_nondet_uint() % 2 if (x == 0 || x == 1 || x == -1) p = malloc(1); else p = &foo; // actually dead free(p); // TODO: free shouldn't spawn return 0; }
the_stack_data/95449923.c
// 添加資料加到 rear遇到front 表示資料已滿 // 取出資料取到 rear遇到front 表示資料已空 #include<stdio.h> #include<stdlib.h> #define SIZE 10 void func_Select(int data[], int front, int rear); int pop(int data[], int front, int rear); int push(int data[], int front, int rear); void show(int data[], int front, int rear); /* new functions */ int adjust(int data[], int front, int rear, int moveNumber);//Unf int cutInLine(int data[], int front, int rear, int cutNumber);//Unf int goAway(int data[], int front, int rear, int leaveNumber);//Unf int main(){ int data[SIZE]; int front=0; int rear=0; puts("\n\nProgram start\n"); func_Select(data, front, rear); return 0; } int adjust(int data[], int front, int rear, int moveNumber){ //Unf return 0; } int pop(int data[], int front, int rear){ if(front != rear){ front++; if(front <= SIZE-1){ if(front == rear){ puts("list is already Empty!\n"); } }else{ front = 0; if(front == rear){ puts("list is already Empty!\n"); } } } else{ puts("list is already Empty!\n"); } show(data, front, rear); return front; } int push(int data[], int front, int rear){ rear++; if(rear <= SIZE-1){ if(rear != front){ printf("Plz input the data\n" "---------------------------\n"); scanf("%d", &data[rear]); }else{ rear--; puts("list is already full!\n"); } } else{ rear=0; if(rear != front){ printf("Plz input the data\n" "---------------------------\n"); scanf("%d", &data[rear]); } else{ rear = SIZE-1; puts("list is already full!\n"); } } show(data, front, rear); return rear; } void show(int data[], int front, int rear){ int i,j; printf("show Data:\n"); if(rear < front){ printf(" [ "); /*前半段資料*/ for(i=front+1; i<SIZE; i++){ printf("\t%d ", data[i]); } /*後半段資料*/ for(i=0; i<=rear; i++){ printf("\t%d ", data[i]); } printf("\t] "); }else{ printf(" [ "); for(i=front+1; i<=rear; i++){ printf("\t%d ", data[i]); } printf("\t] "); } puts("\n"); } void func_Select(int data[], int front, int rear){ int information; int request; printf("Explain\n" "======================\n" "- 1 - push\n" "- 2 - pop\n" "- 3 - show\n" "- 4 - exit\n" "======================\n" "-Plz input the request: "); while(1){ scanf("%d", &request); switch(request){ case 1: rear = push(data, front, rear); break; case 2: front = pop(data, front, rear); break; case 3: show(data, front, rear); break; case 4: puts("\n\nEnd of run....\n"); exit(1); break; default: puts("Error input\n"); break; } } }
the_stack_data/111077597.c
#ifdef IEEEFP #define IEEELO 1 #define IEEEHI 0 #define UINT32 unsigned long #define ieeehi(x) ((UINT32 *)(&(x)))[IEEEHI] #define ieeelo(x) ((UINT32 *)(&(x)))[IEEELO] int isnan(x) double x; { return (((ieeehi(x) & 0x7FF00000L) == 0x7FF00000L) && ((ieeehi(x) & 0xFFFFFL) != 0 || ieeelo(x) != 0)); } #endif /* IEEEFP */
the_stack_data/41550.c
/* Test diagnostics for options used on their own without -Wformat. -Wformat-zero-length. */ /* Origin: Joseph Myers <[email protected]> */ /* { dg-do compile } */ /* { dg-options "-Wformat-zero-length" } */ /* { dg-warning "-Wformat-zero-length ignored without -Wformat" "ignored" { target *-*-* } 0 } */
the_stack_data/187644077.c
// buy ship and die // peer0_0 = client > server char peer0_0[] = { 0x00, 0x00, 0x00, 0x22, 0x01, 0x00, 0x00, 0x00 }; char peer0_1[] = { 0x34, 0x72, 0x01, 0x98, 0x79, 0x31, 0x5f, 0xdd, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x21, 0xbc, 0x00, 0x0c, 0x48, 0x79, 0x65, 0x6e, 0x61, 0x2e, 0x68, 0x75, 0x6e, 0x74, 0x65, 0x72, 0xbb, 0x01 }; char peer1_0[] = { 0x00, 0x00, 0x00, 0x10, 0x01, 0x00, 0xd6, 0x08 }; char peer1_1[] = { 0x2c, 0x54, 0x01, 0x98, 0x79, 0x31, 0x60, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, 0x00, 0xd6, 0x08, 0x1b, 0x54, 0x01, 0x98, 0x79, 0x31, 0x60, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0x6d };