file
stringlengths
18
26
data
stringlengths
3
1.04M
the_stack_data/79242.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef WIN32 #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT extern #endif typedef struct cute_struct { int i; } CUTE; DLLEXPORT CUTE * ReturnStruct() { CUTE *i = malloc(sizeof(CUTE)); i->i = 100; return i; } DLLEXPORT int * ReturnArray() { int *i = malloc(3 * sizeof(int)); i[0] = 1; i[1] = 2; i[2] = 3; return i; } DLLEXPORT int * ReturnInt() { int *i = malloc(sizeof(int)); *i = 101; return i; } DLLEXPORT short * ReturnShort() { short *i = malloc(sizeof(short)); *i = 102; return i; } DLLEXPORT char * ReturnByte() { char *i = malloc(sizeof(char)); *i = -103; return i; } DLLEXPORT double * ReturnDouble() { double *i = malloc(sizeof(double)); *i = 99.9; return i; } DLLEXPORT float * ReturnFloat() { float *i = malloc(sizeof(float)); *i = -4.5; return i; } DLLEXPORT char * ReturnString() { return "epic cuteness"; } DLLEXPORT char * ReturnNullString() { return NULL; }
the_stack_data/843069.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define thisprog "xe-statsgrp1" #define thisprog "xe-statsgrp2" #define thisprog "xe-statsgrp3" #define TITLE_STRING thisprog" v 9: 29.April.2019 [JRH]" /* <TAGS>math stats</TAGS> v 9: 29.April.2019 [JRH] - rework so "grep -vE" can be used to generate code for xe-statsgroup2 and xe-statsgroup1 from xe-statsgroup3 grep -vE 'grp3|cg3' xe-statsgrp3.c > xe-statsgrp2.c grep -vE 'grp2|cg2' xe-statsgrp2.c > xe-statsgrp1.c VALIDATION: in1=/opt/LDAS/docs/sample_data/sample_group_x_time.txt list1=$(xe-cut1 $in1 subject -o 1 | sort -nu) list2=$(xe-cut1 $in1 group -o 1 | sort -nu) list3=$(xe-cut1 $in1 dname -o 1 | sort -nu) for i in $list1 ; do for j in $list2 ; do for k in $list3 ; do xe-dbmatch1 $in1 subject $i | xe-dbmatch1 stdin group $j | xe-dbmatch1 stdin dname $k | xe-cut1 stdin gamma -o 1 | xe-statsd1 stdin | xe-getkey stdin MEAN | awk '{print "'$i'\t'$j'\t'$k'\t",$1}' ; done ; done ; done ...compare with... xe-statsgrp3 $in1 -cg1 1 -cg2 4 -cg3 3 -cy 9 v 9: 28.April.2019 [JRH] - bugfix: check for finite data before adding it to the temporary data array - bugfix: initialize results to NAN before calling stats function - use xf_lineread1 and xf_lineparse2 so there is no limit to line-length - add checks for valid column-specification v 8: 4.April.2019 [JRH] - use newer variable naming conventions - retire unused word, bin setlow, sethigh, grp variables - upgrade group-variables to double - fix instructions v 8: 5.May.2013 [JRH] - update usage of qsort to call external compare function xf_compare1_d v 7: 10.February.2013 [JRH] - bugfix - previous versions omitted header for group3 - remove leading blank line from output v 6: 15.January.2013 [JRH] - bugfix - switched from double compare function to float compare function, as appropriate for floating point precision grouping variables - this led to some pretty bizarre output previously! v 5: 3.December.2012 [JRH] - switch to using built-in qsort function - retire unused word, and bin variables v 4: 28.October.2012 [JRH] - bugfix - was doubling output from first group, because "a" was being initialized to grp[0] instead of listgrp[0] before eliminating duplicates in listgrp[] v 3: 28.September.2012 [JRH] - change to having default grouping & data columns, and added -cg1 -cg2 -cg3 and -cy options to override defaults v 2: 24.September.2012 [JRH] resolve a few minor memory allocation and freeing issues v 1: 9.September.2012 [JRH] A program to calculate stats on a variable using 3 grouping variable NOTE: I tried having flexible column-numbers - just too complicated for data storage esp. if we want to pipe input to the program */ /* external functions start */ char *xf_lineread1(char *line, long *maxlinelen, FILE *fpin); long *xf_lineparse2(char *line,char *delimiters, long *nwords); void xf_stats2_d(double *data, long n, int varcalc, double *result_d); int xf_compare1_d(const void *a, const void *b); /* external functions end */ int main (int argc, char *argv[]) { /* general variables */ char *line=NULL; long int ii,jj,kk,mm,nn,maxlinelen=0; double aa,bb,cc,dd,result_d[64]; FILE *fpin; /* program-specific variables */ double *grp1=NULL,*listgrp1=NULL; long nlistgrp1=0; int sizeofgrp1=sizeof(*grp1); double tempgrp1; double *grp2=NULL,*listgrp2=NULL; long nlistgrp2=0; int sizeofgrp2=sizeof(*grp2); double tempgrp2; double *grp3=NULL,*listgrp3=NULL; long nlistgrp3=0; int sizeofgrp3=sizeof(*grp3); double tempgrp3; long nwords=0,*iword=NULL,colmatch; long ntempdata=0,ngrptot=0; double *data=NULL,*tempdata=NULL; int sizeofdata=sizeof(*data); /* arguments */ char *infile=NULL; int setgint=0; long setcolgrp1=1; long setcolgrp2=2; long setcolgrp3=3; long setcoldata; setcoldata= setcolgrp1+1; setcoldata= setcolgrp2+1; setcoldata= setcolgrp3+1; /* PRINT INSTRUCTIONS IF THERE IS NO FILENAME SPECIFIED */ if(argc<2) { fprintf(stderr,"\n"); fprintf(stderr,"----------------------------------------------------------------------\n"); fprintf(stderr,"%s\n",TITLE_STRING); fprintf(stderr,"----------------------------------------------------------------------\n"); fprintf(stderr,"Calculate stats on a data-column using grouping-columns\n"); fprintf(stderr,"- input must be tab-delimited\n"); fprintf(stderr,"- grouping-variables must be numeric (can be floating-point)\n"); fprintf(stderr,"- non-numeric data-values will be ignored for stats calculations\n"); fprintf(stderr,"\n"); fprintf(stderr,"USAGE: %s [input] [options]\n",thisprog); fprintf(stderr," [input]: file name or \"stdin\"\n"); fprintf(stderr,"VALID OPTIONS:\n"); fprintf(stderr," -cg1: column defining grouping-variable 1 [%ld]\n",setcolgrp1); fprintf(stderr," -cg2: column defining grouping-variable 2 [%ld]\n",setcolgrp2); fprintf(stderr," -cg3: column defining grouping-variable 3 [%ld]\n",setcolgrp3); fprintf(stderr," -cy: column containing dependent variable [%ld]\n",setcoldata); fprintf(stderr," -gint: output groups as integers? (0=NO 1=YES) [%d]\n",setgint); fprintf(stderr,"EXAMPLES:\n"); fprintf(stderr," %s data.txt",thisprog); fprintf(stderr," -cg1 5"); fprintf(stderr," -cg2 7"); fprintf(stderr," -cg3 9"); fprintf(stderr,"\n"); fprintf(stderr," cat temp.txt | %s stdin -gint 1\n",thisprog); fprintf(stderr,"OUTPUT:\n"); fprintf(stderr,"\tgrp1"); fprintf(stderr,"\tgrp2"); fprintf(stderr,"\tgrp3"); fprintf(stderr,"\tn mean sd sem ntot\n"); fprintf(stderr,"\n"); fprintf(stderr," NOTE:\n"); fprintf(stderr," ntot= total datapoints for a given group-combination\n"); fprintf(stderr," n= valid numbers contributing to statistical result\n"); fprintf(stderr,"----------------------------------------------------------------------\n"); fprintf(stderr,"\n"); exit(0); } /* READ THE FILENAME AND OPTIONAL ARGUMENTS */ infile= argv[1]; for(ii=2;ii<argc;ii++) { if( *(argv[ii]+0) == '-') { if((ii+1)>=argc) {fprintf(stderr,"\n--- Error [%s]: missing value for argument \"%s\"\n\n",thisprog,argv[ii]); exit(1);} else if(strcmp(argv[ii],"-cg1")==0) setcolgrp1= atol(argv[++ii]); else if(strcmp(argv[ii],"-cg2")==0) setcolgrp2= atol(argv[++ii]); else if(strcmp(argv[ii],"-cg3")==0) setcolgrp3= atol(argv[++ii]); else if(strcmp(argv[ii],"-cy")==0) setcoldata= atol(argv[++ii]); else if(strcmp(argv[ii],"-gint")==0) setgint= atoi(argv[++ii]); else {fprintf(stderr,"\n--- Error [%s]: invalid command line argument \"%s\"\n",thisprog,argv[ii]); exit(1);} }} if(setcolgrp1<1) {fprintf(stderr,"\n--- Error [%s]: invalid group column (-cg1 %ld) - must be >0\n",thisprog,setcolgrp1); exit(1);} if(setcolgrp2<1) {fprintf(stderr,"\n--- Error [%s]: invalid group column (-cg2 %ld) - must be >0\n",thisprog,setcolgrp2); exit(1);} if(setcolgrp3<1) {fprintf(stderr,"\n--- Error [%s]: invalid group column (-cg3 %ld) - must be >0\n",thisprog,setcolgrp3); exit(1);} if(setcoldata<1) {fprintf(stderr,"\n--- Error [%s]: invalid data column (-cy %ld) - must be >0\n",thisprog,setcoldata); exit(1);} /* DECREMENT COLUMN-NUMBERS SO THEY'RE ZERO-OFFSET */ setcolgrp1--; setcolgrp2--; setcolgrp3--; setcoldata--; /* STORE DATA */ if(strcmp(infile,"stdin")==0) fpin=stdin; else if((fpin=fopen(infile,"r"))==0) {fprintf(stderr,"\n--- Error [%s]: file \"%s\" not found\n\n",thisprog,infile);exit(1);} nn=0; dd=NAN; tempgrp1=NAN; tempgrp2=NAN; tempgrp3=NAN; while((line=xf_lineread1(line,&maxlinelen,fpin))!=NULL) { if(maxlinelen==-1) {fprintf(stderr,"\n--- Error [%s]: readline function encountered insufficient memory\n\n",thisprog);exit(1);} if(line[0]=='#') continue; /* parse the line & make sure all required columns are present */ iword= xf_lineparse2(line,"\t",&nwords); if(nwords<0) {fprintf(stderr,"\n--- Error [%s]: lineparse function encountered insufficient memory\n\n",thisprog);exit(1);}; if( nwords<setcoldata || nwords<setcolgrp1 || nwords<setcolgrp2 || nwords<setcolgrp3 ) continue; /* make sure each group-columns are numeric & finite, and convert non-numeric data to NAN */ if(sscanf(line+iword[setcolgrp1],"%lf",&tempgrp1)!=1 || !isfinite(tempgrp1)) continue; if(sscanf(line+iword[setcolgrp2],"%lf",&tempgrp2)!=1 || !isfinite(tempgrp2)) continue; if(sscanf(line+iword[setcolgrp3],"%lf",&tempgrp3)!=1 || !isfinite(tempgrp3)) continue; if(sscanf(line+iword[setcoldata],"%lf",&dd)!=1) dd=NAN; else if(!isfinite(dd)) dd=NAN; /* reallocate memory */ data= realloc(data,(nn+1)*sizeofdata); grp1= realloc(grp1,(nn+1)*sizeofgrp1); grp2= realloc(grp2,(nn+1)*sizeofgrp2); grp3= realloc(grp3,(nn+1)*sizeofgrp3); if( data==NULL || grp1==NULL || grp2==NULL || grp3==NULL ) {fprintf(stderr,"\n--- Error [%s]: insufficient memory\n\n",thisprog);exit(1);}; /* assign values */ data[nn]= dd; grp1[nn]= tempgrp1; grp2[nn]= tempgrp2; grp3[nn]= tempgrp3; nn++; } if(strcmp(infile,"stdin")!=0) fclose(fpin); //TEST: for(ii=0;ii<nn;ii++) printf("%g\t%g\t%g\t%g\n",grp1[ii],grp2[ii],grp3[ii],data[ii]); exit(0); /* ALLOCATE MEMORY FOR LISTS AND TEMPDATA */ listgrp1= realloc(listgrp1,(nn+1)*sizeofgrp1); listgrp2= realloc(listgrp2,(nn+1)*sizeofgrp2); listgrp3= realloc(listgrp3,(nn+1)*sizeofgrp3); tempdata= realloc(tempdata,(nn+1)*sizeofdata); if( listgrp1==NULL || listgrp2==NULL || listgrp3==NULL ) {fprintf(stderr,"\n--- Error [%s]: insufficient memory\n\n",thisprog);exit(1);}; /* CREATE A SORTED LIST OF THE ELEMENTS IN grp1 */ for(ii=0;ii<nn;ii++) listgrp1[ii]= grp1[ii]; qsort(listgrp1,nn,sizeof(double),xf_compare1_d); /* copy only unique items to new version of listgrp1 */ aa=listgrp1[0]; for(ii=nlistgrp1=1;ii<nn;ii++) {if(listgrp1[ii]!=aa) listgrp1[nlistgrp1++]=listgrp1[ii];aa=listgrp1[ii]; } /* CREATE A SORTED LIST OF THE ELEMENTS IN grp2 */ for(ii=0;ii<nn;ii++) listgrp2[ii]=grp2[ii]; qsort(listgrp2,nn,sizeof(double),xf_compare1_d); /* copy only unique items to new version of listgrp2 */ aa=listgrp2[0]; for(ii=nlistgrp2=1;ii<nn;ii++) {if(listgrp2[ii]!=aa) listgrp2[nlistgrp2++]=listgrp2[ii];aa=listgrp2[ii]; } /* CREATE A SORTED LIST OF THE ELEMENTS IN grp3 */ for(ii=0;ii<nn;ii++) listgrp3[ii]=grp3[ii]; qsort(listgrp3,nn,sizeof(double),xf_compare1_d); /* copy only unique items to new version of listgrp3 */ aa=listgrp3[0]; for(ii=nlistgrp3=1;ii<nn;ii++) {if(listgrp3[ii]!=aa) listgrp3[nlistgrp3++]=listgrp3[ii];aa=listgrp3[ii]; } //TEST: for(ii=0;ii<nn;ii++) printf("%g\t%g\t%g\t%g\n",listgrp1[ii],listgrp2[ii],listgrp3[ii],data[ii]); exit(0); //TEST: printf("1:%ld\n2:%ld\n3:%ld\n",nlistgrp1,nlistgrp2,nlistgrp3);exit(0); /* CALCULATE STATS ON DATA IN EACH COMBINATION OF GROUP-CATEGORIES */ printf("grp1\t"); printf("grp2\t"); printf("grp3\t"); printf("n\tmean\tsd\tsem\tntot\n"); for(ii=0;ii<nlistgrp1;ii++) { //printf("%g\n",listgrp1[ii]); for(jj=0;jj<nlistgrp2;jj++) { //printf("\t%g\n",listgrp2[jj]); for(kk=0;kk<nlistgrp3;kk++) { //printf("\t\t%g\n",listgrp3[kk]); continue; ngrptot= 0; result_d[0]=result_d[2]=result_d[3]= NAN; /* copy the good data for this category to a temp array */ ntempdata=0; for(mm=0;mm<nn;mm++) { if( grp1[mm]==listgrp1[ii] && grp2[mm]==listgrp2[jj] && grp3[mm]==listgrp3[kk] ) { ngrptot++; if(isfinite(data[mm])) tempdata[ntempdata++]= data[mm]; }} if(ngrptot<=0) continue; /* get stats on the temp array */ if(ntempdata>0) xf_stats2_d(tempdata,ntempdata,2,result_d); /* output the results */ if(setgint==0) { printf("%g\t",listgrp1[ii]); printf("%g\t",listgrp2[jj]); printf("%g\t",listgrp3[kk]); printf("%ld\t%g\t%g\t%g\t%ld\n",ntempdata,result_d[0],result_d[2],result_d[3],ngrptot); } else { printf("%ld\t",(long)listgrp1[ii]); printf("%ld\t",(long)listgrp2[jj]); printf("%ld\t",(long)listgrp3[kk]); printf("%ld\t%g\t%g\t%g\t%ld\n",ntempdata,result_d[0],result_d[2],result_d[3],ngrptot); } } } } if(line!=NULL) free(line); if(iword!=NULL) free(iword); if(grp1!=NULL) free(grp1); if(grp2!=NULL) free(grp2); if(grp3!=NULL) free(grp3); if(listgrp1!=NULL) free(listgrp1); if(listgrp2!=NULL) free(listgrp2); if(listgrp3!=NULL) free(listgrp3); if(data!=NULL) free(data); if(tempdata!=NULL) free(tempdata); exit(0); }
the_stack_data/243892030.c
#include <stdio.h> void print_name_age(char **names, int *ages, int count) { int i = 0; while(i < count) { printf("%s is %d years old.\n", names[i], ages[i]); i++; } } int main(int argc, char *argv[]) { // create two arrays we care about int ages[] = {23, 43, 12, 89, 2}; char *names[] = { "Alan", "Frank", "Mary", "John", "Lisa" }; // safely get the size of ages int count = sizeof(ages) / sizeof(int); int i = count - 1; // first way using indexing while(i >= 0) { printf("%s has %d years alive.\n", *(names + i), *(ages + i)); i--; } printf("---\n"); // setup the pointers to the start of the arrays int *cur_age = ages; char **cur_name = names; // second way using pointers for(i = 0; i < count; i++) { printf("%s is %d years old.\n", cur_name[i], cur_age[i]); } printf("---\n"); // third way, pointers are just arrays for(i = count - 1; i >= 0; i--) { printf("%s is %d years old again.\n", *(cur_name+i), *(cur_age+i)); } printf("---\n"); // fourth way with pointers in a stupid complex way for(cur_name = names, cur_age = ages; (cur_age - ages) < count; cur_name++, cur_age++) { printf("%s lived %d years so far.\n", *cur_name, *cur_age); } printf("---\n"); for(i = 0; i < count; i++) { printf("%s is stored at address %p==%p.\n", names[i], &(names[i]), names+i); printf("%d is stored at address %p==%p.\n", *(ages+i), ages+i, &(ages[i])); } printf("---\n"); print_name_age(names, ages, count); return 0; }
the_stack_data/510599.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> /*********************************************************************************************************/ /********************************************** PROTOTYPES ***********************************************/ /*********************************************************************************************************/ /* This function initializes episodes */ unsigned int InitializeEpisode(unsigned int number_of_non_terminal_states); /* This function selects a policy with using epsilon-greedy from the state-action-value function */ void EpsilonGreedyPolicyFromStateActionFunction(unsigned int* number_of_actions_per_non_terminal_state, unsigned int number_of_state_action_value_functions, double*** state_action_value_functions, double epsilon, unsigned int state_index, double** policy, double** policy_cumulative_sum); /* This function loops through episodes and updates the policy */ void LoopThroughEpisode(unsigned int number_of_non_terminal_states, unsigned int* number_of_actions_per_non_terminal_state, unsigned int** number_of_state_action_successor_states, unsigned int*** state_action_successor_state_indices, double*** state_action_successor_state_transition_probabilities_cumulative_sum, double*** state_action_successor_state_rewards, unsigned int number_of_state_action_value_functions, double*** state_action_value_functions, unsigned int** state_action_value_function_max_tie_stack, double** policy, double** policy_cumulative_sum, double alpha, double epsilon, double discounting_factor_gamma, unsigned int maximum_episode_length, unsigned int state_index); /* This function updates the state-action-value function */ unsigned int UpdateStateActionValueFunction(unsigned int number_of_non_terminal_states, unsigned int* number_of_actions_per_non_terminal_state, unsigned int** state_action_value_function_max_tie_stack, double alpha, double discounting_factor_gamma, unsigned int state_index, unsigned int action_index, double reward, unsigned int next_state_index, unsigned int updating_state_action_value_function_index, unsigned int number_of_state_action_value_functions, double*** state_action_value_functions); /* This function returns a random uniform number within range [0,1] */ double UnifRand(void); /*********************************************************************************************************/ /************************************************* MAIN **************************************************/ /*********************************************************************************************************/ int main(int argc, char* argv[]) { unsigned int i, j, k; int system_return; /*********************************************************************************************************/ /**************************************** READ IN THE ENVIRONMENT ****************************************/ /*********************************************************************************************************/ /* Get the number of states */ unsigned int number_of_states = 0; FILE* infile_number_of_states = fopen("inputs/number_of_states.txt", "r"); system_return = fscanf(infile_number_of_states, "%u", &number_of_states); if (system_return == -1) { printf("Failed reading file inputs/number_of_states.txt\n"); } fclose(infile_number_of_states); /* Get number of terminal states */ unsigned int number_of_terminal_states = 0; FILE* infile_number_of_terminal_states = fopen("inputs/number_of_terminal_states.txt", "r"); system_return = fscanf(infile_number_of_terminal_states, "%u", &number_of_terminal_states); if (system_return == -1) { printf("Failed reading file inputs/number_of_terminal_states.txt\n"); } fclose(infile_number_of_terminal_states); /* Get number of non-terminal states */ unsigned int number_of_non_terminal_states = number_of_states - number_of_terminal_states; /* Get the number of actions per non-terminal state */ unsigned int* number_of_actions_per_non_terminal_state; FILE* infile_number_of_actions_per_non_terminal_state = fopen("inputs/number_of_actions_per_non_terminal_state.txt", "r"); number_of_actions_per_non_terminal_state = malloc(sizeof(int) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { system_return = fscanf(infile_number_of_actions_per_non_terminal_state, "%u", &number_of_actions_per_non_terminal_state[i]); if (system_return == -1) { printf("Failed reading file inputs/number_of_actions_per_non_terminal_state.txt\n"); } } // end of i loop fclose(infile_number_of_actions_per_non_terminal_state); /* Get the number of actions per all states */ unsigned int* number_of_actions_per_state; number_of_actions_per_state = malloc(sizeof(int) * number_of_states); for (i = 0; i < number_of_non_terminal_states; i++) { number_of_actions_per_state[i] = number_of_actions_per_non_terminal_state[i]; } // end of i loop for (i = 0; i < number_of_terminal_states; i++) { number_of_actions_per_state[i + number_of_non_terminal_states] = 0; } // end of i loop /* Get the number of state-action successor states */ unsigned int** number_of_state_action_successor_states; FILE* infile_number_of_state_action_successor_states = fopen("inputs/number_of_state_action_successor_states.txt", "r"); number_of_state_action_successor_states = malloc(sizeof(int*) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { number_of_state_action_successor_states[i] = malloc(sizeof(int) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { system_return = fscanf(infile_number_of_state_action_successor_states, "%u\t", &number_of_state_action_successor_states[i][j]); if (system_return == -1) { printf("Failed reading file inputs/number_of_state_action_successor_states.txt\n"); } } // end of j loop } // end of i loop fclose(infile_number_of_state_action_successor_states); /* Get the state-action-successor state indices */ unsigned int*** state_action_successor_state_indices; FILE* infile_state_action_successor_state_indices = fopen("inputs/state_action_successor_state_indices.txt", "r"); state_action_successor_state_indices = malloc(sizeof(unsigned int**) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { state_action_successor_state_indices[i] = malloc(sizeof(unsigned int*) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { state_action_successor_state_indices[i][j] = malloc(sizeof(unsigned int*) * number_of_state_action_successor_states[i][j]); for (k = 0; k < number_of_state_action_successor_states[i][j]; k++) { system_return = fscanf(infile_state_action_successor_state_indices, "%u\t", &state_action_successor_state_indices[i][j][k]); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_indices.txt\n"); } } // end of k loop system_return = fscanf(infile_state_action_successor_state_indices, "\n"); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_indices.txt\n"); } } // end of j loop } // end of i loop fclose(infile_state_action_successor_state_indices); /* Get the state-action-successor state transition probabilities */ double*** state_action_successor_state_transition_probabilities; FILE* infile_state_action_successor_state_transition_probabilities = fopen("inputs/state_action_successor_state_transition_probabilities.txt", "r"); state_action_successor_state_transition_probabilities = malloc(sizeof(double**) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { state_action_successor_state_transition_probabilities[i] = malloc(sizeof(double*) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { state_action_successor_state_transition_probabilities[i][j] = malloc(sizeof(double*) * number_of_state_action_successor_states[i][j]); for (k = 0; k < number_of_state_action_successor_states[i][j]; k++) { system_return = fscanf(infile_state_action_successor_state_transition_probabilities, "%lf\t", &state_action_successor_state_transition_probabilities[i][j][k]); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_transition_probabilities.txt\n"); } } // end of k loop system_return = fscanf(infile_state_action_successor_state_transition_probabilities, "\n"); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_transition_probabilities.txt\n"); } } // end of j loop } // end of i loop fclose(infile_state_action_successor_state_transition_probabilities); /* Create the state-action-successor state transition probability cumulative sum array */ double*** state_action_successor_state_transition_probabilities_cumulative_sum; state_action_successor_state_transition_probabilities_cumulative_sum = malloc(sizeof(double**) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { state_action_successor_state_transition_probabilities_cumulative_sum[i] = malloc(sizeof(double*) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { state_action_successor_state_transition_probabilities_cumulative_sum[i][j] = malloc(sizeof(double*) * number_of_state_action_successor_states[i][j]); if (number_of_state_action_successor_states[i][j] > 0) { state_action_successor_state_transition_probabilities_cumulative_sum[i][j][0] = state_action_successor_state_transition_probabilities[i][j][0]; for (k = 1; k < number_of_state_action_successor_states[i][j]; k++) { state_action_successor_state_transition_probabilities_cumulative_sum[i][j][k] = state_action_successor_state_transition_probabilities_cumulative_sum[i][j][k - 1] + state_action_successor_state_transition_probabilities[i][j][k]; } // end of k loop } } // end of j loop } // end of i loop /* Get the state-action-successor state rewards */ double*** state_action_successor_state_rewards; FILE* infile_state_action_successor_state_rewards = fopen("inputs/state_action_successor_state_rewards.txt", "r"); state_action_successor_state_rewards = malloc(sizeof(double**) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { state_action_successor_state_rewards[i] = malloc(sizeof(double*) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { state_action_successor_state_rewards[i][j] = malloc(sizeof(double) * number_of_state_action_successor_states[i][j]); for (k = 0; k < number_of_state_action_successor_states[i][j]; k++) { system_return = fscanf(infile_state_action_successor_state_rewards, "%lf\t", &state_action_successor_state_rewards[i][j][k]); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_rewards.txt\n"); } } // end of k loop system_return = fscanf(infile_state_action_successor_state_rewards, "\n"); if (system_return == -1) { printf("Failed reading file inputs/state_action_successor_state_rewards.txt\n"); } } // end of j loop } // end of i loop fclose(infile_state_action_successor_state_rewards); /*********************************************************************************************************/ /**************************************** SETUP POLICY ITERATION *****************************************/ /*********************************************************************************************************/ /* Set the number of episodes */ unsigned int number_of_episodes = 10000; /* Set the maximum episode length */ unsigned int maximum_episode_length = 200; /* Set the number of state-action-value functions */ unsigned int number_of_state_action_value_functions = 8; /* Create state-action-value function arrays */ double*** state_action_value_functions; state_action_value_functions = malloc(sizeof(double**) * number_of_state_action_value_functions); for (i = 0; i < number_of_state_action_value_functions; i++) { state_action_value_functions[i] = malloc(sizeof(double*) * number_of_states); for (j = 0; j < number_of_states; j++) { state_action_value_functions[i][j] = malloc(sizeof(double) * number_of_actions_per_state[j]); for (k = 0; k < number_of_actions_per_state[j]; k++) { state_action_value_functions[i][j][k] = 0.0; } // end of k loop } // end of j loop } // end of i loop unsigned int** state_action_value_function_max_tie_stack; state_action_value_function_max_tie_stack = malloc(sizeof(unsigned int*) * number_of_states); for (i = 0; i < number_of_states; i++) { state_action_value_function_max_tie_stack[i] = malloc(sizeof(unsigned int) * number_of_actions_per_state[i]); for (j = 0; j < number_of_actions_per_state[i]; j++) { state_action_value_function_max_tie_stack[i][j] = 0; } // end of j loop } // end of i loop /* Create policy array */ double** policy; policy = malloc(sizeof(double*) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { policy[i] = malloc(sizeof(double) * number_of_actions_per_non_terminal_state[i]); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { policy[i][j] = 1.0 / number_of_actions_per_non_terminal_state[i]; } // end of j loop } // end of i loop /* Create policy cumulative sum array */ double** policy_cumulative_sum; policy_cumulative_sum = malloc(sizeof(double*) * number_of_non_terminal_states); for (i = 0; i < number_of_non_terminal_states; i++) { policy_cumulative_sum[i] = malloc(sizeof(double) * number_of_actions_per_non_terminal_state[i]); policy_cumulative_sum[i][0] = policy[i][0]; for (j = 1; j < number_of_actions_per_non_terminal_state[i]; j++) { policy_cumulative_sum[i][j] = policy_cumulative_sum[i][j - 1] + policy[i][j]; } // end of j loop } // end of i loop /* Set learning rate alpha */ double alpha = 0.1; /* Set epsilon for our epsilon level of exploration */ double epsilon = 0.1; /* Set discounting factor gamma */ double discounting_factor_gamma = 1.0; /* Set random seed */ srand(0); /*********************************************************************************************************/ /******************************************* RUN POLICY CONTROL ******************************************/ /*********************************************************************************************************/ printf("\nInitial state-action value functions:\n"); for (i = 0; i < number_of_state_action_value_functions; i++) { printf("Q_%u\n", i); for (j = 0; j < number_of_non_terminal_states; j++) { printf("%u", j); for (k = 0; k < number_of_actions_per_non_terminal_state[j]; k++) { printf("\t%lf", state_action_value_functions[i][j][k]); } // end of k loop printf("\n"); } // end of j loop printf("\n"); } // end of i loop printf("\nInitial policy:\n"); for (i = 0; i < number_of_non_terminal_states; i++) { printf("%u", i); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { printf("\t%lf", policy[i][j]); } // end of j loop printf("\n"); } // end of i loop unsigned int initial_state_index = 0; /* Loop over episodes */ for (i = 0; i < number_of_episodes; i++) { /* Initialize episode to get initial state and action */ initial_state_index = InitializeEpisode(number_of_non_terminal_states); /* Loop through episode and update the policy */ LoopThroughEpisode(number_of_non_terminal_states, number_of_actions_per_non_terminal_state, number_of_state_action_successor_states, state_action_successor_state_indices, state_action_successor_state_transition_probabilities_cumulative_sum, state_action_successor_state_rewards, number_of_state_action_value_functions, state_action_value_functions, state_action_value_function_max_tie_stack, policy, policy_cumulative_sum, alpha, epsilon, discounting_factor_gamma, maximum_episode_length, initial_state_index); } // end of i loop /*********************************************************************************************************/ /*************************************** PRINT VALUES AND POLICIES ***************************************/ /*********************************************************************************************************/ printf("\nFinal state-action value functions:\n"); for (i = 0; i < number_of_state_action_value_functions; i++) { printf("Q_%u\n", i); for (j = 0; j < number_of_non_terminal_states; j++) { printf("%u", j); for (k = 0; k < number_of_actions_per_non_terminal_state[j]; k++) { printf("\t%lf", state_action_value_functions[i][j][k]); } // end of k loop printf("\n"); } // end of j loop printf("\n"); } // end of i loop printf("\nFinal policy:\n"); for (i = 0; i < number_of_non_terminal_states; i++) { printf("%u", i); for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { printf("\t%lf", policy[i][j]); } // end of j loop printf("\n"); } // end of i loop /*********************************************************************************************************/ /****************************************** FREE DYNAMIC MEMORY ******************************************/ /*********************************************************************************************************/ for (i = 0; i < number_of_non_terminal_states; i++) { free(policy_cumulative_sum[i]); free(policy[i]); } // end of i loop free(policy_cumulative_sum); free(policy); for (i = 0; i < number_of_states; i++) { free(state_action_value_function_max_tie_stack[i]); } // end of i loop free(state_action_value_function_max_tie_stack); for (i = 0; i < number_of_state_action_value_functions; i++) { for (j = 0; j < number_of_states; j++) { free(state_action_value_functions[i][j]); } // end of j loop free(state_action_value_functions[i]); } // end of i loop free(state_action_value_functions); for (i = 0; i < number_of_non_terminal_states; i++) { for (j = 0; j < number_of_actions_per_non_terminal_state[i]; j++) { free(state_action_successor_state_rewards[i][j]); free(state_action_successor_state_transition_probabilities_cumulative_sum[i][j]); free(state_action_successor_state_transition_probabilities[i][j]); free(state_action_successor_state_indices[i][j]); } // end of j loop free(state_action_successor_state_rewards[i]); free(state_action_successor_state_transition_probabilities_cumulative_sum[i]); free(state_action_successor_state_transition_probabilities[i]); free(state_action_successor_state_indices[i]); free(number_of_state_action_successor_states[i]); } // end of i loop free(state_action_successor_state_rewards); free(state_action_successor_state_transition_probabilities_cumulative_sum); free(state_action_successor_state_transition_probabilities); free(state_action_successor_state_indices); free(number_of_state_action_successor_states); free(number_of_actions_per_state); free(number_of_actions_per_non_terminal_state); return 0; } // end of main /*********************************************************************************************************/ /*********************************************** FUNCTIONS ***********************************************/ /*********************************************************************************************************/ /* This function initializes episodes */ unsigned int InitializeEpisode(unsigned int number_of_non_terminal_states) { unsigned int initial_state_index; /* Initial state */ initial_state_index = rand() % number_of_non_terminal_states; // randomly choose an initial state from all non-terminal states return initial_state_index; } // end of InitializeEpisode function /* This function selects a policy with using epsilon-greedy from the state-action-value function */ void EpsilonGreedyPolicyFromStateActionFunction(unsigned int* number_of_actions_per_non_terminal_state, unsigned int number_of_state_action_value_functions, double*** state_action_value_functions, double epsilon, unsigned int state_index, double** policy, double** policy_cumulative_sum) { unsigned int i, j, max_action_count = 1; double state_action_value = 0.0, max_state_action_value = -DBL_MAX, max_policy_apportioned_probability_per_action = 1.0, remaining_apportioned_probability_per_action = 0.0; /* Update policy greedily from state-value function */ for (i = 0; i < number_of_actions_per_non_terminal_state[state_index]; i++) { /* Save max state action value and find the number of actions that have the same max state action value */ state_action_value = 0.0; for (j = 0; j < number_of_state_action_value_functions; j++) { state_action_value += state_action_value_functions[j][state_index][i]; } // end of j loop if (state_action_value > max_state_action_value) { max_state_action_value = state_action_value; max_action_count = 1; } else if (state_action_value == max_state_action_value) { max_action_count++; } } // end of i loop /* Apportion policy probability across ties equally for state-action pairs that have the same value and spread out epsilon otherwise */ if (max_action_count == number_of_actions_per_non_terminal_state[state_index]) { max_policy_apportioned_probability_per_action = 1.0 / max_action_count; remaining_apportioned_probability_per_action = 0.0; } else { max_policy_apportioned_probability_per_action = (1.0 - epsilon) / max_action_count; remaining_apportioned_probability_per_action = epsilon / (number_of_actions_per_non_terminal_state[state_index] - max_action_count); } /* Update policy with our apportioned probabilities */ for (i = 0; i < number_of_actions_per_non_terminal_state[state_index]; i++) { state_action_value = 0.0; for (j = 0; j < number_of_state_action_value_functions; j++) { state_action_value += state_action_value_functions[j][state_index][i]; } // end of j loop if (state_action_value == max_state_action_value) { policy[state_index][i] = max_policy_apportioned_probability_per_action; } else { policy[state_index][i] = remaining_apportioned_probability_per_action; } } // end of i loop /* Update policy cumulative sum */ policy_cumulative_sum[state_index][0] = policy[state_index][0]; for (i = 1; i < number_of_actions_per_non_terminal_state[state_index]; i++) { policy_cumulative_sum[state_index][i] = policy_cumulative_sum[state_index][i - 1] + policy[state_index][i]; } // end of i loop return; } // end of EpsilonGreedyPolicyFromStateActionFunction function /* This function loops through episodes and updates the policy */ void LoopThroughEpisode(unsigned int number_of_non_terminal_states, unsigned int* number_of_actions_per_non_terminal_state, unsigned int** number_of_state_action_successor_states, unsigned int*** state_action_successor_state_indices, double*** state_action_successor_state_transition_probabilities_cumulative_sum, double*** state_action_successor_state_rewards, unsigned int number_of_state_action_value_functions, double*** state_action_value_functions, unsigned int** state_action_value_function_max_tie_stack, double** policy, double** policy_cumulative_sum, double alpha, double epsilon, double discounting_factor_gamma, unsigned int maximum_episode_length, unsigned int state_index) { unsigned int t, i; unsigned int action_index, successor_state_transition_index, next_state_index, updating_state_action_value_function_index; double probability, reward; /* Loop through episode steps until termination */ for (t = 0; t < maximum_episode_length; t++) { /* Get epsilon-greedy action */ probability = UnifRand(); /* Choose policy for chosen state by epsilon-greedy choosing from the state-action-value function */ EpsilonGreedyPolicyFromStateActionFunction(number_of_actions_per_non_terminal_state, number_of_state_action_value_functions, state_action_value_functions, epsilon, state_index, policy, policy_cumulative_sum); /* Find which action using probability */ for (i = 0; i < number_of_actions_per_non_terminal_state[state_index]; i++) { if (probability <= policy_cumulative_sum[state_index][i]) { action_index = i; break; // break i loop since we found our index } } // end of i loop /* Get reward */ probability = UnifRand(); for (i = 0; i < number_of_state_action_successor_states[state_index][action_index]; i++) { if (probability <= state_action_successor_state_transition_probabilities_cumulative_sum[state_index][action_index][i]) { successor_state_transition_index = i; break; // break i loop since we found our index } } // end of i loop /* Get reward from state and action */ reward = state_action_successor_state_rewards[state_index][action_index][successor_state_transition_index]; /* Get next state */ next_state_index = state_action_successor_state_indices[state_index][action_index][successor_state_transition_index]; /* Update state action value equally randomly selecting from the state-action-value functions */ updating_state_action_value_function_index = rand() % number_of_state_action_value_functions; state_index = UpdateStateActionValueFunction(number_of_non_terminal_states, number_of_actions_per_non_terminal_state, state_action_value_function_max_tie_stack, alpha, discounting_factor_gamma, state_index, action_index, reward, next_state_index, updating_state_action_value_function_index, number_of_state_action_value_functions, state_action_value_functions); /* Check to see if we actioned into a terminal state */ if (state_index >= number_of_non_terminal_states) { break; // episode terminated since we ended up in a terminal state } } // end of t loop return; } // end of LoopThroughEpisode function /* This function updates the state-action-value function */ unsigned int UpdateStateActionValueFunction(unsigned int number_of_non_terminal_states, unsigned int* number_of_actions_per_non_terminal_state, unsigned int** state_action_value_function_max_tie_stack, double alpha, double discounting_factor_gamma, unsigned int state_index, unsigned int action_index, double reward, unsigned int next_state_index, unsigned int updating_state_action_value_function_index, unsigned int number_of_state_action_value_functions, double***state_action_value_functions) { unsigned int i; unsigned int next_action_index, max_action_count, not_updating_state_action_value_function_index = 0; double max_action_value; /* Check to see if we actioned into a terminal state */ if (next_state_index >= number_of_non_terminal_states) { state_action_value_functions[updating_state_action_value_function_index][state_index][action_index] += alpha * (reward - state_action_value_functions[updating_state_action_value_function_index][state_index][action_index]); } else { /* Get next action, max action of next state */ max_action_value = -DBL_MAX; max_action_count = 0; for (i = 0; i < number_of_actions_per_non_terminal_state[next_state_index]; i++) { if (max_action_value < state_action_value_functions[updating_state_action_value_function_index][next_state_index][i]) { max_action_value = state_action_value_functions[updating_state_action_value_function_index][next_state_index][i]; state_action_value_function_max_tie_stack[next_state_index][0] = i; max_action_count = 1; } else if (max_action_value == state_action_value_functions[updating_state_action_value_function_index][next_state_index][i]) { state_action_value_function_max_tie_stack[next_state_index][max_action_count]; max_action_count++; } } // end of i loop next_action_index = state_action_value_function_max_tie_stack[next_state_index][rand() % max_action_count]; /* Calculate state-action-function using quintuple SARSA */ if (number_of_state_action_value_functions > 1) { do { not_updating_state_action_value_function_index = rand() % number_of_state_action_value_functions; } while (not_updating_state_action_value_function_index == updating_state_action_value_function_index); } state_action_value_functions[updating_state_action_value_function_index][state_index][action_index] += alpha * (reward + discounting_factor_gamma * state_action_value_functions[not_updating_state_action_value_function_index][next_state_index][next_action_index] - state_action_value_functions[updating_state_action_value_function_index][state_index][action_index]); /* Update state and action to next state and action */ state_index = next_state_index; } return state_index; } // end of UpdateStateActionValueFunction function /* This function returns a random uniform number within range [0,1] */ double UnifRand(void) { return (double)rand() / (double)RAND_MAX; } // end of UnifRand function
the_stack_data/148271.c
/* $Id$ */ char *getenv(name) register char *name; { extern char **environ; register char **v = environ, *p, *q; if (v == 0 || name == 0) return 0; while ((p = *v++) != 0) { q = name; while (*q && *q++ == *p++) /* nothing */ ; if (*q || *p != '=') continue; return(p+1); } return(0); }
the_stack_data/67086.c
#include <stdio.h> int main() { int szam; printf("szam: "); scanf("%d", &szam); if (szam % 2 == 0) { printf("paros\n"); } else { printf("paratlan\n"); } return 0; }
the_stack_data/86648.c
// This program combine three different uses of b: identity, reset // (involutive) and increment, but in the loop b is always positive // $Id$ #include <stdlib.h> void run(void) { int b = 0; while(1) { if (rand() % 2) ; else if (rand() % 2) b = 0; else b++; } } int main(void) { run(); return 0; }
the_stack_data/111076707.c
#include <stdio.h> #include <stdlib.h> #include <time.h> int spexti(char f[16]) { int i; do{ printf("Einai i seira sou dose mou to tetragono pou thes na bi to stixio sou ksekinodas apo to 0 mexrti to 15\n"); scanf("%d",&i); if (f[i]!='X' && f[i]!='O') break; } while(f[i]!=EOF); return i; } int smix(char f[16]) { int i; srand(time(NULL)); printf("\nEinai i seira tou ipologisti. Perimenete!\n\n"); if (f[0]==f[0+1] && f[0]==f[0+2] && f[0+3]!='X' && f[0+3]!='O' ){ i=0+3; return i; } if (f[4]==f[4+1] && f[4]==f[4+2] && f[4+3]!='X' && f[4+3]!='O'){ i=4+3; return i; } if (f[8]==f[8+1] && f[8]==f[8+2] && f[8+3]!='X' && f[8+3]!='O'){ i=8+3; return i; } if (f[12]==f[12+1] && f[12]==f[12+2] && f[12+3]!='X' && f[12+3]!='O'){ i=12+3; return i; } if (f[0]==f[0+1] && f[0]==f[0+3] && f[0+2]!='X' && f[0+2]!='O'){ i=0+2; return i; } if (f[4]==f[4+1] && f[4]==f[4+3] && f[4+2]!='X' && f[4+2]!='O'){ i=4+2; return i; } if (f[8]==f[8+1] && f[8]==f[8+3] && f[8+2]!='X' && f[8+2]!='O'){ i=8+2; return i; } if (f[12]==f[12+1] && f[12]==f[12+3] && f[12+2]!='X' && f[12+2]!='O'){ i=12+2; return i; } if (f[0]==f[0+2] && f[0]==f[0+3] && f[0+1]!='X' && f[0+1]!='O'){ i=0+1; return i; } if (f[4]==f[4+2] && f[4]==f[4+3] && f[4+1]!='X' && f[4+1]!='O'){ i=4+1; return i; } if (f[8]==f[8+2] && f[8]==f[8+3] && f[8+1]!='X' && f[8+1]!='O'){ i=8+1; return i; } if (f[12]==f[12+2] && f[12]==f[12+3] && f[12+1]!='X' && f[12+1]!='O'){ i=12+1; return i; } if (f[0+1]==f[0+2] && f[0+1]==f[0+3] && f[0]!='X' && f[0]!='O'){ i=0; return i; } if (f[4+1]==f[4+2] && f[4+1]==f[4+3] && f[4]!='X' && f[4]!='O'){ i=4; return i; } if (f[8+1]==f[8+2] && f[8+1]==f[8+3] && f[8]!='X' && f[8]!='O'){ i=8; return i; } if (f[12+1]==f[12+2] && f[12+1]==f[12+3] && f[12]!='X' && f[12]!='O'){ i=12; return i; } if (f[0]==f[4] && f[0]==f[8] && f[12]!='X' && f[12]!='O'){ i=12; return i; } if (f[0]==f[4] && f[0]==f[12] && f[8]!='X' && f[8]!='O'){ i=8; return i; } if (f[0]==f[8] && f[0]==f[12] && f[4]!='X' && f[4]!='O'){ i=4; return i; } if (f[4]==f[8] && f[4]==f[12] && f[0]!='X' && f[0]!='O'){ return 0; } if (f[1]==f[5] && f[1]==f[9] && f[13]!='X' && f[13]!='O'){ i=13; return i; } if (f[1]==f[5] && f[1]==f[13] && f[9]!='X' && f[9]!='O'){ i=9; return i; } if (f[1]==f[9] && f[1]==f[13] && f[5]!='X' && f[5]!='O'){ i=5; return i; } if (f[5]==f[9] && f[5]==f[13] && f[1]!='X' && f[1]!='O'){ return 1; } if (f[2]==f[6] && f[2]==f[10] && f[14]!='X' && f[14]!='O'){ i=14; return i; } if (f[2]==f[6] && f[2]==f[14] && f[10]!='X' && f[10]!='O'){ i=10; return i; } if (f[2]==f[10] && f[2]==f[14] && f[6]!='X' && f[6]!='O'){ i=6; return i; } if (f[6]==f[10] && f[6]==f[14] && f[2]!='X' && f[2]!='O'){ return 2; } if (f[3]==f[7] && f[3]==f[11] && f[15]!='X' && f[15]!='O'){ i=15; return i; } if (f[3]==f[7] && f[3]==f[15] && f[11]!='X' && f[11]!='O'){ i=11; return i; } if (f[3]==f[11] && f[3]==f[15] && f[7]!='X' && f[7]!='O'){ i=7; return i; } if (f[7]==f[11] && f[7]==f[15] && f[3]!='X' && f[3]!='O'){ return 3; } if (f[0]==f[5] && f[0]==f[10] && f[15]!='X' && f[15]!='O'){ i=15; return i; } if (f[0]==f[5] && f[0]==f[15] && f[10]!='X' && f[10]!='O'){ i=10; return i; } if (f[0]==f[10] && f[0]==f[15] && f[5]!='X' && f[5]!='O'){ i=5; return i; } if (f[5]==f[10] && f[5]==f[15] && f[0]!='X' && f[0]!='O'){ i=0; return i; } if (f[3]==f[6] && f[3]==f[9] && f[12]!='X' && f[12]!='O'){ i=12; return i; } if (f[3]==f[6] && f[3]==f[12] && f[9]!='X' && f[9]!='O'){ i=9; return i; } if (f[3]==f[9] && f[3]==f[12] && f[6]!='X' && f[6]!='O'){ i=6; return i; } if (f[6]==f[9] && f[6]==f[12] && f[3]!='X' && f[3]!='O'){ i=3; return i; } do{ i = rand() % 16; if (f[i]!='X' && f[i]!='O') break; } while(f[i]!=EOF); return i; } void emfanisi(char z[4][4]) { int a,b; for (a=0;a<4;a++){ for (b=0;b<4;b++){ printf(" %c|",z[a][b]); } printf("\n"); printf(" - - - -\n"); } } int elnikiti(char z[4][4],char x) { int a,b; for (a=0;a<4;a++){ for (b=0;b<4;b++){ if (z[a][b]==z[a+1][b] && z[a][b]==z[a+2][b] && z[a][b]==z[a+3][b] && (z[a][b]=='X' || z[a][b]=='O')){ if (x=='p'){ printf("Sixaritiria molis kataferate na nikisete!!!!\n"); return 3; } else{ printf("Distixos molis xasate :(\n"); return 3; } } if (z[a][b]==z[a][b+1] && z[a][b+1]==z[a][b+2] && z[a][b+2]==z[a][b+3] && (z[a][b]=='X' || z[a][b]=='O')){ if (x=='p'){ printf("Sixaritiria molis kataferate na nikisete!!!!\n"); return 3; } else{ printf("Distixos molis xasate :(\n"); return 3; } } if (z[0][0]==z[1][1] && z[0][0]==z[2][2] && z[0][0]==z[3][3] && (z[0][0]=='X' || z[0][0]=='O')){ if (x=='p'){ printf("Sixaritiria molis kataferate na nikisete!!!!\n"); return 3; } else{ printf("Distixos molis xasate :(\n"); return 3; } } if (z[0][3]==z[1][2] && z[0][3]==z[2][1] && z[0][3]==z[3][0] && (z[0][3]=='X' || z[0][3]=='O')){ if (x=='p'){ printf("Sixaritiria molis kataferate na nikisete!!!!\n"); return 3; } else{ printf("Distixos molis xasate :(\n"); return 3; } } } } return 0; } int gemp(char z[4][4]) { int a,b,i=0; for (a=0;a<4;a++){ for (b=0;b<4;b++){ if ( z[a][b]=='X' || z[a][b]=='O'){ i++; } } } return i; } int main(int argc, char *argv[]) { int i,a,b,y,j,g,t; char x,p,u; char z[4][4]={" "}; char f[16]; char l='p'; char k='u'; srand(time(NULL)); emfanisi(z); printf("Ean thes na ksekinisis protos pata y ean oxi n\n"); switch (x=getchar()) { case'n': u = 'X'; p = 'O'; a = rand() % 4; b = rand() % 4; z[a][b] = u; emfanisi(z); break; case'y': p = 'X'; u = 'O'; break; default: break; } g=0; for (a=0;a<4;a++){ for (b=0;b<4;b++){ f[g]=z[a][b]; g++; } } for (i=0;i<9;i++){ t=gemp(z); if (t==16){ printf("Distixos molis gemise o pinakas etsi to pexnidi ginete isopalia\n"); system("PAUSE"); return 0; } g=spexti(f); f[g]=p; g=0; for (a=0;a<4;a++){ for (b=0;b<4;b++){ z[a][b]=f[g]; g++; } } emfanisi(z); y=elnikiti(z,l); if (y==3){ system("PAUSE"); return 0; } t=gemp(z); if (t==16){ printf("Distixos molis gemise o pinakas etsi to pexnidi ginete isopalia\n"); system("PAUSE"); return 0; } g=smix(f); f[g]=u; g=0; for (a=0;a<4;a++){ for (b=0;b<4;b++){ z[a][b]=f[g]; g++; } } emfanisi(z); y=elnikiti(z,k); if (y==3){ system("PAUSE"); return 0; } } system("PAUSE"); return 0; }
the_stack_data/9513174.c
int main() { return 65+88; }
the_stack_data/168892186.c
/* Copyright © 2011 MLstate This file is part of Opa. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /// whoops !
the_stack_data/120833.c
// test-touch.c // //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifdef __cplusplus extern "C" { #endif extern double omp_get_wtime(); extern int omp_get_num_threads(); extern int omp_get_max_threads(); #ifdef __cplusplus } #endif int main() { omp_get_wtime(); omp_get_num_threads(); omp_get_max_threads(); return 0; } // end of file //
the_stack_data/41448.c
extern void __VERIFIER_error() __attribute__ ((__noreturn__)); extern void __VERIFIER_assume(int); void __VERIFIER_assert(int cond) { if(!(cond)) { ERROR: __VERIFIER_error(); } } extern int __VERIFIER_nondet_int(void); int N; int main() { N = __VERIFIER_nondet_int(); if(N <= 0) return 1; int i; int a[N]; int b[N]; b[0] = 1; a[0] = N+1; for(i=1; i<N; i++) { b[i] = b[i-1] + 2; } for(i=1; i<N; i++) { a[i] = a[i-1] + b[i-1] + 2; } for(i=0; i<N; i++) { __VERIFIER_assert(a[i] == N + (i+1)*(i+1)); } return 1; }
the_stack_data/165764223.c
#include <stdio.h> int main() { char c; c=getchar(); if(c>='a' && c<='z') { c=(97 + (122 - c)); printf("\n%c\n",c); } return 0; }
the_stack_data/918107.c
char hankaku[4096] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0xaa, 0xaa, 0x82, 0x82, 0xaa, 0x92, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x7c, 0xfe, 0xd6, 0xd6, 0xfe, 0xfe, 0xd6, 0xee, 0x7c, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x54, 0xfe, 0x54, 0x10, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, 0xd6, 0x10, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, 0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0x38, 0x54, 0x92, 0x10, 0x10, 0x38, 0x44, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x44, 0x38, 0x10, 0x10, 0xfe, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0e, 0x0b, 0x0b, 0x0a, 0x08, 0x08, 0x18, 0x78, 0xf8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1f, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x77, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x54, 0x38, 0x28, 0x38, 0x54, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e, 0xfe, 0x7e, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x54, 0x92, 0x10, 0x10, 0x10, 0x92, 0x54, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x3e, 0x4a, 0x8a, 0x8a, 0x8a, 0x8a, 0x4a, 0x3a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x7c, 0x82, 0x40, 0x20, 0x38, 0x44, 0x82, 0x82, 0x82, 0x44, 0x38, 0x08, 0x04, 0x82, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x54, 0x92, 0x10, 0x10, 0x10, 0x92, 0x54, 0x38, 0x10, 0x7c, 0x00, 0x00, 0x00, 0x10, 0x38, 0x54, 0x92, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x92, 0x54, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x08, 0x04, 0xfe, 0x04, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x20, 0x40, 0xfe, 0x40, 0x20, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x44, 0xfe, 0x44, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, 0x38, 0x38, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x28, 0x28, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x44, 0xfe, 0x44, 0x44, 0x44, 0x44, 0x44, 0xfe, 0x44, 0x44, 0x44, 0x00, 0x00, 0x10, 0x3a, 0x56, 0x92, 0x92, 0x90, 0x50, 0x38, 0x14, 0x12, 0x92, 0x92, 0xd4, 0xb8, 0x10, 0x10, 0x62, 0x92, 0x94, 0x94, 0x68, 0x08, 0x10, 0x10, 0x20, 0x2c, 0x52, 0x52, 0x92, 0x8c, 0x00, 0x00, 0x00, 0x70, 0x88, 0x88, 0x88, 0x90, 0x60, 0x47, 0xa2, 0x92, 0x8a, 0x84, 0x46, 0x39, 0x00, 0x00, 0x04, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04, 0x08, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x04, 0x02, 0x00, 0x80, 0x40, 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x40, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x92, 0x54, 0x38, 0x54, 0x92, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0xfe, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x08, 0x08, 0x08, 0x10, 0x10, 0x20, 0x20, 0x40, 0x40, 0x40, 0x80, 0x80, 0x00, 0x18, 0x24, 0x24, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x24, 0x24, 0x18, 0x00, 0x00, 0x00, 0x08, 0x18, 0x28, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x3e, 0x00, 0x00, 0x00, 0x18, 0x24, 0x42, 0x42, 0x02, 0x04, 0x08, 0x10, 0x20, 0x20, 0x40, 0x40, 0x7e, 0x00, 0x00, 0x00, 0x18, 0x24, 0x42, 0x02, 0x02, 0x04, 0x18, 0x04, 0x02, 0x02, 0x42, 0x24, 0x18, 0x00, 0x00, 0x00, 0x0c, 0x0c, 0x0c, 0x14, 0x14, 0x14, 0x24, 0x24, 0x44, 0x7e, 0x04, 0x04, 0x1e, 0x00, 0x00, 0x00, 0x7c, 0x40, 0x40, 0x40, 0x58, 0x64, 0x02, 0x02, 0x02, 0x02, 0x42, 0x24, 0x18, 0x00, 0x00, 0x00, 0x18, 0x24, 0x42, 0x40, 0x58, 0x64, 0x42, 0x42, 0x42, 0x42, 0x42, 0x24, 0x18, 0x00, 0x00, 0x00, 0x7e, 0x42, 0x42, 0x04, 0x04, 0x08, 0x08, 0x08, 0x10, 0x10, 0x10, 0x10, 0x38, 0x00, 0x00, 0x00, 0x18, 0x24, 0x42, 0x42, 0x42, 0x24, 0x18, 0x24, 0x42, 0x42, 0x42, 0x24, 0x18, 0x00, 0x00, 0x00, 0x18, 0x24, 0x42, 0x42, 0x42, 0x42, 0x42, 0x26, 0x1a, 0x02, 0x42, 0x24, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x08, 0x08, 0x10, 0x00, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x04, 0x08, 0x10, 0x10, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x9a, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x9c, 0x80, 0x46, 0x38, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x24, 0x24, 0x24, 0x24, 0x7e, 0x42, 0x42, 0x42, 0xe7, 0x00, 0x00, 0x00, 0xf0, 0x48, 0x44, 0x44, 0x44, 0x48, 0x78, 0x44, 0x42, 0x42, 0x42, 0x44, 0xf8, 0x00, 0x00, 0x00, 0x3a, 0x46, 0x42, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x82, 0x42, 0x44, 0x38, 0x00, 0x00, 0x00, 0xf8, 0x44, 0x44, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x44, 0x44, 0xf8, 0x00, 0x00, 0x00, 0xfe, 0x42, 0x42, 0x40, 0x40, 0x44, 0x7c, 0x44, 0x40, 0x40, 0x42, 0x42, 0xfe, 0x00, 0x00, 0x00, 0xfe, 0x42, 0x42, 0x40, 0x40, 0x44, 0x7c, 0x44, 0x44, 0x40, 0x40, 0x40, 0xf0, 0x00, 0x00, 0x00, 0x3a, 0x46, 0x42, 0x82, 0x80, 0x80, 0x9e, 0x82, 0x82, 0x82, 0x42, 0x46, 0x38, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x42, 0x42, 0x42, 0x7e, 0x42, 0x42, 0x42, 0x42, 0x42, 0xe7, 0x00, 0x00, 0x00, 0x7c, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x7c, 0x00, 0x00, 0x00, 0x1f, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x84, 0x48, 0x30, 0x00, 0x00, 0xe7, 0x42, 0x44, 0x48, 0x50, 0x50, 0x60, 0x50, 0x50, 0x48, 0x44, 0x42, 0xe7, 0x00, 0x00, 0x00, 0xf0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x42, 0x42, 0xfe, 0x00, 0x00, 0x00, 0xc3, 0x42, 0x66, 0x66, 0x66, 0x5a, 0x5a, 0x5a, 0x42, 0x42, 0x42, 0x42, 0xe7, 0x00, 0x00, 0x00, 0xc7, 0x42, 0x62, 0x62, 0x52, 0x52, 0x52, 0x4a, 0x4a, 0x4a, 0x46, 0x46, 0xe2, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0xf8, 0x44, 0x42, 0x42, 0x42, 0x44, 0x78, 0x40, 0x40, 0x40, 0x40, 0x40, 0xf0, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x92, 0x8a, 0x44, 0x3a, 0x00, 0x00, 0x00, 0xfc, 0x42, 0x42, 0x42, 0x42, 0x7c, 0x44, 0x42, 0x42, 0x42, 0x42, 0x42, 0xe7, 0x00, 0x00, 0x00, 0x3a, 0x46, 0x82, 0x82, 0x80, 0x40, 0x38, 0x04, 0x02, 0x82, 0x82, 0xc4, 0xb8, 0x00, 0x00, 0x00, 0xfe, 0x92, 0x92, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x7c, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x24, 0x3c, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x42, 0x42, 0x24, 0x24, 0x24, 0x24, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x42, 0x5a, 0x5a, 0x5a, 0x5a, 0x24, 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x24, 0x24, 0x24, 0x18, 0x24, 0x24, 0x24, 0x42, 0x42, 0xe7, 0x00, 0x00, 0x00, 0xee, 0x44, 0x44, 0x44, 0x28, 0x28, 0x28, 0x10, 0x10, 0x10, 0x10, 0x10, 0x7c, 0x00, 0x00, 0x00, 0xfe, 0x84, 0x84, 0x08, 0x08, 0x10, 0x10, 0x20, 0x20, 0x40, 0x42, 0x82, 0xfe, 0x00, 0x00, 0x00, 0x3e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x00, 0x80, 0x80, 0x40, 0x40, 0x20, 0x20, 0x20, 0x10, 0x10, 0x08, 0x08, 0x04, 0x04, 0x04, 0x02, 0x02, 0x00, 0x7c, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x7c, 0x00, 0x00, 0x10, 0x28, 0x44, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x08, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x8c, 0x76, 0x00, 0x00, 0xc0, 0x40, 0x40, 0x40, 0x40, 0x58, 0x64, 0x42, 0x42, 0x42, 0x42, 0x42, 0x64, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x4c, 0x84, 0x84, 0x80, 0x80, 0x82, 0x44, 0x38, 0x00, 0x00, 0x0c, 0x04, 0x04, 0x04, 0x04, 0x34, 0x4c, 0x84, 0x84, 0x84, 0x84, 0x84, 0x4c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0xfc, 0x80, 0x82, 0x42, 0x3c, 0x00, 0x00, 0x0e, 0x10, 0x10, 0x10, 0x10, 0x7c, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x4c, 0x84, 0x84, 0x84, 0x84, 0x4c, 0x34, 0x04, 0x04, 0x78, 0xc0, 0x40, 0x40, 0x40, 0x40, 0x58, 0x64, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0xe3, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x30, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x38, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x0c, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x30, 0xc0, 0x40, 0x40, 0x40, 0x40, 0x4e, 0x44, 0x48, 0x50, 0x60, 0x50, 0x48, 0x44, 0xe6, 0x00, 0x00, 0x30, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x64, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0xe3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x64, 0x42, 0x42, 0x42, 0x42, 0x42, 0x64, 0x58, 0x40, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x4c, 0x84, 0x84, 0x84, 0x84, 0x84, 0x4c, 0x34, 0x04, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x62, 0x42, 0x40, 0x40, 0x40, 0x40, 0x40, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x86, 0x82, 0xc0, 0x38, 0x06, 0x82, 0xc2, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x7c, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x46, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x42, 0x24, 0x24, 0x24, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x5a, 0x5a, 0x5a, 0x24, 0x24, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x44, 0x28, 0x28, 0x10, 0x28, 0x28, 0x44, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0x42, 0x42, 0x24, 0x24, 0x24, 0x18, 0x18, 0x10, 0x10, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x82, 0x84, 0x08, 0x10, 0x20, 0x42, 0x82, 0xfe, 0x00, 0x00, 0x00, 0x06, 0x08, 0x10, 0x10, 0x10, 0x10, 0x60, 0x10, 0x10, 0x10, 0x10, 0x08, 0x06, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x60, 0x10, 0x08, 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x08, 0x08, 0x10, 0x60, 0x00, 0x00, 0x00, 0x72, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x28, 0x44, 0x82, 0xfe, 0x82, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x82, 0x44, 0x38, 0x10, 0x20, 0x00, 0x00, 0x24, 0x24, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x42, 0x3e, 0x00, 0x00, 0x0c, 0x08, 0x10, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0xfe, 0x80, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x24, 0x24, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0x00, 0x00, 0x18, 0x24, 0x18, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x42, 0x80, 0x80, 0x80, 0x80, 0x80, 0x42, 0x3c, 0x08, 0x10, 0x00, 0x10, 0x28, 0x44, 0x00, 0x38, 0x44, 0x82, 0x82, 0xfe, 0x80, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x24, 0x24, 0x00, 0x38, 0x44, 0x82, 0x82, 0xfe, 0x80, 0x82, 0x44, 0x38, 0x00, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0xfe, 0x80, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x24, 0x24, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x24, 0x24, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0xfe, 0x82, 0x82, 0x82, 0x82, 0x00, 0x00, 0x00, 0x38, 0x44, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0xfe, 0x82, 0x82, 0x82, 0x82, 0x00, 0x00, 0x0c, 0x08, 0x10, 0xfe, 0x80, 0x80, 0x80, 0x80, 0xf8, 0x80, 0x80, 0x80, 0x80, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x1c, 0x12, 0x72, 0x9e, 0x90, 0x90, 0x92, 0x6c, 0x00, 0x00, 0x0c, 0x10, 0x20, 0x28, 0x28, 0x28, 0xfe, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x00, 0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x24, 0x24, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x42, 0x3e, 0x00, 0x00, 0x10, 0x08, 0x04, 0x00, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x42, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x24, 0x24, 0x00, 0x82, 0x82, 0x44, 0x44, 0x28, 0x28, 0x10, 0x10, 0x20, 0x20, 0x40, 0x24, 0x24, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x24, 0x24, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x28, 0x28, 0x28, 0x3c, 0x6a, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0x6a, 0x3c, 0x28, 0x28, 0x28, 0x00, 0x0c, 0x12, 0x20, 0x20, 0x20, 0xfc, 0x20, 0x20, 0x20, 0x60, 0xa0, 0xb2, 0x4c, 0x00, 0x00, 0x00, 0x82, 0x82, 0x44, 0x28, 0x10, 0xfe, 0x10, 0x10, 0xfe, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0xe0, 0x90, 0x88, 0x88, 0x88, 0x94, 0xe4, 0x9f, 0x84, 0x84, 0x84, 0x84, 0x84, 0x00, 0x00, 0x00, 0x0c, 0x12, 0x10, 0x10, 0x10, 0xfe, 0x10, 0x10, 0x10, 0x10, 0x10, 0x90, 0x60, 0x00, 0x00, 0x0c, 0x08, 0x10, 0x00, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0x00, 0x0c, 0x08, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x0c, 0x08, 0x10, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x0c, 0x08, 0x10, 0x00, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x42, 0x3e, 0x00, 0x00, 0x00, 0x12, 0x2a, 0x24, 0x00, 0xf8, 0x84, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x00, 0x00, 0x12, 0x2a, 0x24, 0x00, 0x82, 0xc2, 0xc2, 0xa2, 0x92, 0x92, 0x8a, 0x86, 0x86, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x04, 0x04, 0x3c, 0x44, 0x84, 0x84, 0x44, 0x3e, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x44, 0x82, 0x82, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x20, 0x44, 0x82, 0x82, 0x82, 0x44, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x10, 0x30, 0x10, 0x10, 0x10, 0x00, 0xfe, 0x00, 0x78, 0x04, 0x38, 0x40, 0x7c, 0x00, 0x00, 0x00, 0x10, 0x30, 0x10, 0x10, 0x10, 0x00, 0xfe, 0x00, 0x18, 0x28, 0x48, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x24, 0x48, 0x90, 0x90, 0x48, 0x24, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x48, 0x24, 0x12, 0x12, 0x24, 0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xf0, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xf0, 0x10, 0xf0, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf4, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x10, 0xf0, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf4, 0x04, 0xf4, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x04, 0xf4, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf4, 0x04, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xf0, 0x10, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x1f, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xff, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x1f, 0x10, 0x1f, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x17, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x17, 0x10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x10, 0x17, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf7, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xf7, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x17, 0x10, 0x17, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf7, 0x00, 0xf7, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x1f, 0x10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x10, 0x1f, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0xf7, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xff, 0x10, 0xff, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, };
the_stack_data/231392036.c
typedef struct _s { int x; union { int y; int z; }; } s; s s1 = {1, 2}; s s2 = { .x = 1, .y = 2 }; s s3 = { .x = 1, .z = 2 };
the_stack_data/57949933.c
// This file contains stubs matching the sybols previously exported by libobjc // when i386 Mac was actually supported. These stubs allow us to tease apart the // dependencies to prepare for removing i386 Mac libobjc entirely. // // This file is not built when building for any other arch/OS combination. When // building for i386 Mac, no other source files are built, just this one. This // is handled using the Included/Excluded Source File Names settings in Xcode, // with arch/OS-specific overrides. // // rdar://problem/58541885 #pragma GCC visibility push(default) const char ___ld_hide_os10_5__objc_class_name_NSObject __asm__("$ld$hide$os10.5$.objc_class_name_NSObject"); const char ___ld_hide_os10_6__objc_class_name_NSObject __asm__("$ld$hide$os10.6$.objc_class_name_NSObject"); const char ___ld_hide_os10_7__objc_class_name_NSObject __asm__("$ld$hide$os10.7$.objc_class_name_NSObject"); const char ___objc_class_name_List __asm__(".objc_class_name_List"); const char ___objc_class_name_NSObject __asm__(".objc_class_name_NSObject"); const char ___objc_class_name_Object __asm__(".objc_class_name_Object"); const char ___objc_class_name_Protocol __asm__(".objc_class_name_Protocol"); void NXCompareHashTables(void) {} void NXCompareMapTables(void) {} void NXCopyHashTable(void) {} void NXCopyStringBuffer(void) {} void NXCopyStringBufferFromZone(void) {} void NXCountHashTable(void) {} void NXCountMapTable(void) {} void NXCreateHashTable(void) {} void NXCreateHashTableFromZone(void) {} void NXCreateMapTable(void) {} void NXCreateMapTableFromZone(void) {} void NXEmptyHashTable(void) {} void NXFreeHashTable(void) {} void NXFreeMapTable(void) {} void NXHashGet(void) {} void NXHashInsert(void) {} void NXHashInsertIfAbsent(void) {} void NXHashMember(void) {} void NXHashRemove(void) {} void NXInitHashState(void) {} void NXInitMapState(void) {} void NXMapGet(void) {} void NXMapInsert(void) {} void NXMapMember(void) {} void NXMapRemove(void) {} void NXNextHashState(void) {} void NXNextMapState(void) {} void NXNoEffectFree(void) {} const char NXObjectMapPrototype; void NXPtrHash(void) {} void NXPtrIsEqual(void) {} const char NXPtrPrototype; const char NXPtrStructKeyPrototype; const char NXPtrValueMapPrototype; void NXReallyFree(void) {} void NXResetHashTable(void) {} void NXResetMapTable(void) {} void NXStrHash(void) {} void NXStrIsEqual(void) {} const char NXStrPrototype; const char NXStrStructKeyPrototype; const char NXStrValueMapPrototype; void NXUniqueString(void) {} void NXUniqueStringNoCopy(void) {} void NXUniqueStringWithLength(void) {} char _alloc; void _class_getIvarMemoryManagement(void) {} void _class_isFutureClass(void) {} void _class_isSwift(void) {} char _copy; char _dealloc; char _error; void _objcInit(void) {} void _objc_addWillInitializeClassFunc(void) {} void _objc_atfork_child(void) {} void _objc_atfork_parent(void) {} void _objc_atfork_prepare(void) {} void _objc_autoreleasePoolPop(void) {} void _objc_autoreleasePoolPrint(void) {} void _objc_autoreleasePoolPush(void) {} void _objc_deallocOnMainThreadHelper(void) {} const char _objc_debug_class_hash; const char _objc_empty_cache; void _objc_error(void) {} void _objc_flush_caches(void) {} void _objc_getFreedObjectClass(void) {} void _objc_init(void) {} void _objc_msgForward(void) {} void _objc_msgForward_stret(void) {} void _objc_resolve_categories_for_class(void) {} void _objc_rootAlloc(void) {} void _objc_rootAllocWithZone(void) {} void _objc_rootAutorelease(void) {} void _objc_rootDealloc(void) {} void _objc_rootFinalize(void) {} void _objc_rootHash(void) {} void _objc_rootInit(void) {} void _objc_rootIsDeallocating(void) {} void _objc_rootRelease(void) {} void _objc_rootReleaseWasZero(void) {} void _objc_rootRetain(void) {} void _objc_rootRetainCount(void) {} void _objc_rootTryRetain(void) {} void _objc_rootZone(void) {} void _objc_setBadAllocHandler(void) {} void _objc_setClassLoader(void) {} void _protocol_getMethodTypeEncoding(void) {} char _realloc; char _zoneAlloc; char _zoneCopy; char _zoneRealloc; void class_addIvar(void) {} void class_addMethod(void) {} void class_addMethods(void) {} void class_addProperty(void) {} void class_addProtocol(void) {} void class_conformsToProtocol(void) {} void class_copyIvarList(void) {} void class_copyMethodList(void) {} void class_copyPropertyList(void) {} void class_copyProtocolList(void) {} void class_createInstance(void) {} void class_createInstanceFromZone(void) {} void class_createInstances(void) {} void class_getClassMethod(void) {} void class_getClassVariable(void) {} void class_getImageName(void) {} void class_getInstanceMethod(void) {} void class_getInstanceSize(void) {} void class_getInstanceVariable(void) {} void class_getIvarLayout(void) {} void class_getMethodImplementation(void) {} void class_getMethodImplementation_stret(void) {} void class_getName(void) {} void class_getProperty(void) {} void class_getSuperclass(void) {} void class_getVersion(void) {} void class_getWeakIvarLayout(void) {} void class_isMetaClass(void) {} void class_lookupMethod(void) {} void class_nextMethodList(void) {} void class_poseAs(void) {} void class_removeMethods(void) {} void class_replaceMethod(void) {} void class_replaceProperty(void) {} void class_respondsToMethod(void) {} void class_respondsToSelector(void) {} void class_setIvarLayout(void) {} void class_setSuperclass(void) {} void class_setVersion(void) {} void class_setWeakIvarLayout(void) {} void gdb_class_getClass(void) {} void gdb_object_getClass(void) {} void imp_getBlock(void) {} void imp_implementationWithBlock(void) {} void imp_removeBlock(void) {} void instrumentObjcMessageSends(void) {} void ivar_getName(void) {} void ivar_getOffset(void) {} void ivar_getTypeEncoding(void) {} void method_copyArgumentType(void) {} void method_copyReturnType(void) {} void method_exchangeImplementations(void) {} void method_getArgumentType(void) {} void method_getDescription(void) {} void method_getImplementation(void) {} void method_getName(void) {} void method_getNumberOfArguments(void) {} void method_getReturnType(void) {} void method_getSizeOfArguments(void) {} void method_getTypeEncoding(void) {} void method_invoke(void) {} void method_invoke_stret(void) {} void method_setImplementation(void) {} void objc_addClass(void) {} void objc_addLoadImageFunc(void) {} void objc_alloc(void) {} void objc_allocWithZone(void) {} void objc_alloc_init(void) {} void objc_allocateClassPair(void) {} void objc_allocateProtocol(void) {} void objc_allocate_object(void) {} void objc_appRequiresGC(void) {} void objc_assertRegisteredThreadWithCollector(void) {} void objc_assign_global(void) {} void objc_assign_ivar(void) {} void objc_assign_strongCast(void) {} void objc_assign_threadlocal(void) {} void objc_assign_weak(void) {} void objc_atomicCompareAndSwapGlobal(void) {} void objc_atomicCompareAndSwapGlobalBarrier(void) {} void objc_atomicCompareAndSwapInstanceVariable(void) {} void objc_atomicCompareAndSwapInstanceVariableBarrier(void) {} void objc_atomicCompareAndSwapPtr(void) {} void objc_atomicCompareAndSwapPtrBarrier(void) {} void objc_autorelease(void) {} void objc_autoreleasePoolPop(void) {} void objc_autoreleasePoolPush(void) {} void objc_autoreleaseReturnValue(void) {} void objc_clear_deallocating(void) {} void objc_clear_stack(void) {} void objc_collect(void) {} void objc_collect_init(void) {} void objc_collectableZone(void) {} void objc_collectingEnabled(void) {} void objc_collecting_enabled(void) {} void objc_constructInstance(void) {} void objc_copyClassList(void) {} void objc_copyClassNamesForImage(void) {} void objc_copyClassNamesForImageHeader(void) {} void objc_copyCppObjectAtomic(void) {} void objc_copyImageNames(void) {} void objc_copyProtocolList(void) {} void objc_copyStruct(void) {} void objc_copyWeak(void) {} const char objc_debug_autoreleasepoolpage_child_offset; const char objc_debug_autoreleasepoolpage_depth_offset; const char objc_debug_autoreleasepoolpage_hiwat_offset; const char objc_debug_autoreleasepoolpage_magic_offset; const char objc_debug_autoreleasepoolpage_next_offset; const char objc_debug_autoreleasepoolpage_parent_offset; const char objc_debug_autoreleasepoolpage_thread_offset; void objc_destroyWeak(void) {} void objc_destructInstance(void) {} void objc_disposeClassPair(void) {} void objc_dumpHeap(void) {} void objc_duplicateClass(void) {} void objc_enumerationMutation(void) {} void objc_exception_extract(void) {} void objc_exception_get_functions(void) {} void objc_exception_match(void) {} void objc_exception_set_functions(void) {} void objc_exception_throw(void) {} void objc_exception_try_enter(void) {} void objc_exception_try_exit(void) {} void objc_finalizeOnMainThread(void) {} void objc_getAssociatedObject(void) {} void objc_getClass(void) {} void objc_getClassList(void) {} void objc_getClasses(void) {} void objc_getFutureClass(void) {} void objc_getMetaClass(void) {} void objc_getOrigClass(void) {} void objc_getProperty(void) {} void objc_getProtocol(void) {} void objc_getRequiredClass(void) {} void objc_initWeak(void) {} void objc_initWeakOrNil(void) {} void objc_initializeClassPair(void) {} void objc_isAuto(void) {} void objc_is_finalized(void) {} void objc_loadModule(void) {} void objc_loadModules(void) {} void objc_loadWeak(void) {} void objc_loadWeakRetained(void) {} void objc_lookUpClass(void) {} void objc_memmove_collectable(void) {} void objc_moveWeak(void) {} void objc_msgSend(void) {} void objc_msgSendSuper(void) {} void objc_msgSendSuper_stret(void) {} void objc_msgSend_fpret(void) {} void objc_msgSend_stret(void) {} void objc_msgSendv(void) {} void objc_msgSendv_fpret(void) {} void objc_msgSendv_stret(void) {} void objc_opt_class(void) {} void objc_opt_isKindOfClass(void) {} void objc_opt_new(void) {} void objc_opt_respondsToSelector(void) {} void objc_opt_self(void) {} void objc_read_weak(void) {} void objc_registerClassPair(void) {} void objc_registerProtocol(void) {} void objc_registerThreadWithCollector(void) {} void objc_release(void) {} void objc_removeAssociatedObjects(void) {} void objc_retain(void) {} void objc_retainAutorelease(void) {} void objc_retainAutoreleaseReturnValue(void) {} void objc_retainAutoreleasedReturnValue(void) {} void objc_retainBlock(void) {} void objc_retain_autorelease(void) {} void objc_retainedObject(void) {} void objc_setAssociatedObject(void) {} void objc_setClassHandler(void) {} void objc_setCollectionRatio(void) {} void objc_setCollectionThreshold(void) {} void objc_setEnumerationMutationHandler(void) {} void objc_setForwardHandler(void) {} void objc_setHook_getImageName(void) {} void objc_setMultithreaded(void) {} void objc_setProperty(void) {} void objc_setProperty_atomic(void) {} void objc_setProperty_atomic_copy(void) {} void objc_setProperty_nonatomic(void) {} void objc_setProperty_nonatomic_copy(void) {} void objc_set_collection_ratio(void) {} void objc_set_collection_threshold(void) {} void objc_should_deallocate(void) {} void objc_startCollectorThread(void) {} void objc_start_collector_thread(void) {} void objc_storeStrong(void) {} void objc_storeWeak(void) {} void objc_storeWeakOrNil(void) {} void objc_sync_enter(void) {} void objc_sync_exit(void) {} void objc_sync_try_enter(void) {} void objc_unloadModules(void) {} void objc_unregisterThreadWithCollector(void) {} void objc_unretainedObject(void) {} void objc_unretainedPointer(void) {} void objc_unsafeClaimAutoreleasedReturnValue(void) {} void object_copy(void) {} void object_copyFromZone(void) {} void object_dispose(void) {} void object_getClass(void) {} void object_getClassName(void) {} void object_getIndexedIvars(void) {} void object_getInstanceVariable(void) {} void object_getIvar(void) {} void object_getMethodImplementation(void) {} void object_getMethodImplementation_stret(void) {} void object_isClass(void) {} void object_realloc(void) {} void object_reallocFromZone(void) {} void object_setClass(void) {} void object_setInstanceVariable(void) {} void object_setInstanceVariableWithStrongDefault(void) {} void object_setIvar(void) {} void object_setIvarWithStrongDefault(void) {} void property_copyAttributeList(void) {} void property_copyAttributeValue(void) {} void property_getAttributes(void) {} void property_getName(void) {} void protocol_addMethodDescription(void) {} void protocol_addProperty(void) {} void protocol_addProtocol(void) {} void protocol_conformsToProtocol(void) {} void protocol_copyMethodDescriptionList(void) {} void protocol_copyPropertyList(void) {} void protocol_copyPropertyList2(void) {} void protocol_copyProtocolList(void) {} void protocol_getMethodDescription(void) {} void protocol_getName(void) {} void protocol_getProperty(void) {} void protocol_isEqual(void) {} void sel_getName(void) {} void sel_getUid(void) {} void sel_isEqual(void) {} void sel_isMapped(void) {} void sel_registerName(void) {} void objc_cache_buckets(void) {} void objc_cache_bytesForCapacity(void) {} void objc_cache_capacity(void) {} void objc_cache_occupied(void) {} void objc_copyClassesForImage(void) {}
the_stack_data/122560.c
#include <stdio.h> /* Define one of CubicAlgorithm, QuadraticAlgorithm, NlogNAlgorithm, * or LinearAlgorithm to get one algorithm compiled */ #define NlogNAlgorithm #ifdef CubicAlgorithm /* START: fig2_5.txt */ int MaxSubsequenceSum( const int A[ ], int N ) { int ThisSum, MaxSum, i, j, k; /* 1*/ MaxSum = 0; /* 2*/ for( i = 0; i < N; i++ ) /* 3*/ for( j = i; j < N; j++ ) { /* 4*/ ThisSum = 0; /* 5*/ for( k = i; k <= j; k++ ) /* 6*/ ThisSum += A[ k ]; /* 7*/ if( ThisSum > MaxSum ) /* 8*/ MaxSum = ThisSum; } /* 9*/ return MaxSum; } /* END */ #endif #ifdef QuadraticAlgorithm /* START: fig2_6.txt */ int MaxSubsequenceSum( const int A[ ], int N ) { int ThisSum, MaxSum, i, j; /* 1*/ MaxSum = 0; /* 2*/ for( i = 0; i < N; i++ ) { /* 3*/ ThisSum = 0; /* 4*/ for( j = i; j < N; j++ ) { /* 5*/ ThisSum += A[ j ]; /* 6*/ if( ThisSum > MaxSum ) /* 7*/ MaxSum = ThisSum; } } /* 8*/ return MaxSum; } /* END */ #endif #ifdef NlogNAlgorithm static int Max3( int A, int B, int C ) { return A > B ? A > C ? A : C : B > C ? B : C; } /* START: fig2_7.txt */ static int MaxSubSum( const int A[ ], int Left, int Right ) { int MaxLeftSum, MaxRightSum; int MaxLeftBorderSum, MaxRightBorderSum; int LeftBorderSum, RightBorderSum; int Center, i; /* 1*/ if( Left == Right ) /* Base case */ /* 2*/ if( A[ Left ] > 0 ) /* 3*/ return A[ Left ]; else /* 4*/ return 0; /* 5*/ Center = ( Left + Right ) / 2; /* 6*/ MaxLeftSum = MaxSubSum( A, Left, Center ); /* 7*/ MaxRightSum = MaxSubSum( A, Center + 1, Right ); /* 8*/ MaxLeftBorderSum = 0; LeftBorderSum = 0; /* 9*/ for( i = Center; i >= Left; i-- ) { /*10*/ LeftBorderSum += A[ i ]; /*11*/ if( LeftBorderSum > MaxLeftBorderSum ) /*12*/ MaxLeftBorderSum = LeftBorderSum; } /*13*/ MaxRightBorderSum = 0; RightBorderSum = 0; /*14*/ for( i = Center + 1; i <= Right; i++ ) { /*15*/ RightBorderSum += A[ i ]; /*16*/ if( RightBorderSum > MaxRightBorderSum ) /*17*/ MaxRightBorderSum = RightBorderSum; } /*18*/ return Max3( MaxLeftSum, MaxRightSum, /*19*/ MaxLeftBorderSum + MaxRightBorderSum ); } int MaxSubsequenceSum( const int A[ ], int N ) { return MaxSubSum( A, 0, N - 1 ); } /* END */ #endif #ifdef LinearAlgorithm /* START: fig2_8.txt */ int MaxSubsequenceSum( const int A[ ], int N ) { int ThisSum, MaxSum, j; /* 1*/ ThisSum = MaxSum = 0; /* 2*/ for( j = 0; j < N; j++ ) { /* 3*/ ThisSum += A[ j ]; /* 4*/ if( ThisSum > MaxSum ) /* 5*/ MaxSum = ThisSum; /* 6*/ else if( ThisSum < 0 ) /* 7*/ ThisSum = 0; } /* 8*/ return MaxSum; } /* END */ #endif main( ) { static int A[ ] = { 4, -3, 5, -2, -1, 2, 6, -2 }; printf( "Maxsum = %d\n", MaxSubsequenceSum( A, sizeof( A ) / sizeof( A[ 0 ] ) ) ); return 0; }
the_stack_data/105226.c
/* Author : Arnob Mahmud mail : [email protected] */ #include <stdio.h> int main(int argc, char const *argv[]) { int year; printf("Enter a year :\n"); scanf("%d", &year); if (year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)) { printf("%d is a leap year.", year); } else { printf("%d isn't a leap year.", year); } return 0; }
the_stack_data/162644560.c
extern int printf (char *); extern int scanf (char *, int *); extern void exit (int); main () { int x; scanf("%d",&x); if (x == 0) { printf("Immediate exit.\n"); exit(0); } printf("The number was not zero.\n"); return 0; }
the_stack_data/85893.c
/*************************************************************************** * Copyright (C) 2020 Qorvo Inc. * * All rights reserved. * * * * Contributed originally by Koen Martens, [email protected] * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions * * are met: * * * * Redistributions of source code must retain the above copyright * * notice, this list of conditions and the following disclaimer. * * * * Redistributions in binary form must reproduce the above copyright * * notice, this list of conditions and the following disclaimer in the * * documentation and/or other materials provided with the distribution. * * * * Neither the name of Qorvo Inc. nor the names of its * * contributors may be used to endorse or promote products derived from * * this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,* * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * * DAMAGE. * * * ***************************************************************************/ #include <string.h> /*!\ingroup string_util *\file string_util.c * This file implements string utilities */ size_t count_lines(char const *text) { size_t count = 0; while(text != NULL) { text = strchr(text, '\n'); if (text!=NULL) { count++; text += 1; } } return count; } /*@}*/ /* EOF */
the_stack_data/34215.c
#define GCD(a,b) gcd(a,b) #define MOD(a,b) a%b int gcd(int a,int b){ if(MOD(a,b)==0)return b; return gcd(b,MOD(a,b)); } int main(){ int a,b; while(scanf("%d%d",&a,&b)!=-1)printf("%d\n",GCD(a,b)); }
the_stack_data/13553.c
#include <fcntl.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> /* unistd for sleep */ void sigint_handler(int sig) { fprintf(stderr, "Caught signal %d.\n", sig); unlink("/tmp/MyUniqueName"); /* exit() is not safe in a signal handler, use _exit() */ _exit(1); } int main() { struct sigaction act; int myfd; myfd = open("/tmp/MyUniqueName", O_CREAT|O_EXCL); if ( myfd < 0 ) { fprintf(stderr, "I am already running!\n"); exit(1); } act.sa_handler = sigint_handler; sigemptyset(&act.sa_mask); act.sa_flags = 0; sigaction(SIGINT, &act, NULL); /* here the real code of the app*/ sleep(20); /* end of the app */ unlink("/tmp/MyUniqueName"); close(myfd); return 0; }
the_stack_data/492433.c
#include <stdio.h> // Write a program to generate all cominations of 1, 2, 3 using for loop. int main(void) { for ( int i = 1; i <= 3; i++ ) { for ( int j = 1; j <= 3; j++ ) { for ( int k = 1; k <= 3; k++ ) { if ( i == j || j == k || i == k ) continue; printf("%d%d%d\n", i, j, k); } } } }
the_stack_data/6387788.c
// LANGUAGE: c // ENV: gcc // AUTHOR: Nitin Sultania // GITHUB: https://github.com/nitinsultania #include<stdio.h> #include<stdlib.h> int *sum(int *A,int *B,int m,int n) { int *C; if(m>n) { C=(int *)malloc((m+1)*sizeof(int)); for(int i=0;i<=m;i++) { C[i]=A[i]; } for(int i=0;i<=n;i++) { C[i]+=B[i]; } } else { C=(int *)malloc((n+1)*sizeof(int)); for(int i=0;i<=n;i++) { C[i]=B[i]; } for(int i=0;i<=m;i++) { C[i]+=A[i]; } } return C; } int *multiply(int *A,int *B,int m,int n) { int *C; C=(int *)malloc((m+n+1)*sizeof(int)); for(int i=0;i<=m+n+1;i++) { C[i]=0; } for(int i=0;i<=m;i++) { for(int j=0;j<=n;j++) { C[i+j]+=(A[i]*B[j]); } } return C; } int main() { int m,n,k,coff,exp; printf("Enter the max power in poly 1: "); scanf("%d",&m); printf("Enter the max power in poly 2: "); scanf("%d",&n); int *A=(int *)malloc((m+1)*sizeof(int)); for(int i=0;i<=m;i++) { A[i]=0; } printf("Enter number of terms in poly 1: "); scanf("%d",&k); for(int i=0;i<k;i++) { printf("Enter the coff and power"); scanf("%d%d",&coff,&exp); A[exp]=coff; } int *B=(int *)malloc((n+1)*sizeof(int)); for(int i=0;i<=n;i++) { B[i]=0; } printf("Enter number of terms in poly 2: "); scanf("%d",&k); for(int i=0;i<k;i++) { printf("Enter the coff and power"); scanf("%d%d",&coff,&exp); B[exp]=coff; } int *C=sum(A,B,m,n); int size=(m>n?m:n); for(int i=0;i<=size;i++) { if(C[i]==0) continue; printf("%d^%d ",C[i],i); } int *D=multiply(A,B,m,n); for(int i=0;i<=m+n+1;i++) { if(D[i]==0) continue; printf("%d^%d ",D[i],i); } return 0; }
the_stack_data/72013804.c
#include<stdio.h> void tukar(int *x, int *y) { int z = *x; *x = *y; *y = z; } void bubble(int array[], int n) { int i, j; for (i = 0; i < n-1; i++) for (j = 0; j < n-i-1; j++) if (array[j] > array[j+1]) tukar(&array[j], &array[j+1]); } void cetak(int array[], int besar) { int i,j; for (i=0; i < besar; i++){ for(j=1;j<=i;j++);{ printf("DATA %d: %d \n", j,array[i]); }} printf("\n"); } int main() { int b,array[] = {64, 34, 25, 12, 22, 11, 90,1,128,222}; int a = sizeof(array)/sizeof(array[0]); bubble(array, a); printf("Hasil: \n"); cetak(array, a); return 0; }
the_stack_data/455633.c
#define _BSD_SOURCE #include <curses.h> #include <stdlib.h> #include <time.h> #define FILENAME "windump" int main() { char word[7]; int x,w,r; srandom((unsigned)time(NULL)); /* seed randomizer */ word[6] = '\0'; initscr(); /* Fill most of the screen with random 6-char words */ for(x=0;x<200;x++) { for(w=0;w<6;w++) word[w] = (random() % 26) + 'a'; printw("%s\t",word); } addch('\n'); addstr("Press Enter to write this screen to disk\n"); refresh(); getch(); /* write the window to disk */ r = scr_dump(FILENAME); if( r == ERR) addstr("Error writing window to disk\n"); else addstr("File written; press Enter to quit\n"); refresh(); getch(); endwin(); return 0; }
the_stack_data/4950.c
/* display byte size of basic data type */ #include <stdio.h> int main() { printf("Variables of data type int occupy %zd bytes\n", sizeof(int)); printf("Variables of data type int occupy %zd bytes\n", sizeof(char)); printf("Variables of data type int occupy %zd bytes\n", sizeof(long)); printf("Variables of data type int occupy %zd bytes\n", sizeof(long long)); printf("Variables of data type int occupy %zd bytes\n", sizeof(double)); printf("Variables of data type int occupy %zd bytes\n", sizeof(long double)); return 0; }
the_stack_data/615461.c
/* * Partial Secondary Implementation */
the_stack_data/132952303.c
/* ID: fx.yoyo1 LANG: C TASK: gift1 */ #include<stdio.h> int np=0; char name[10][15]={0}; int money[10]={0}; int namecheck(char named[]) { int i,j; for(i=0;i<np;i++) { for(j=0;named[j]!=0;j++) if(name[i][j]!=named[j]) break; if(named[j]==0) return i; } return 10; } int main() { FILE *in,*out; in=fopen("gift1.in","r"); out=fopen("gift1.out","w"); int i,j; int pay,n; int re,tr; char named[15]={0}; fscanf(in,"%d",&np); for(i=0;i<np;i++) fscanf(in,"%s",name[i]); for(i=0;i<np;i++) { fscanf(in,"%s",named); fscanf(in,"%d%d",&pay,&n); if(n==0) { re=pay; tr=0; } else { re=pay%n; tr=(pay-re)/n; } money[namecheck(named)]-=pay; money[namecheck(named)]+=re; for(j=0;j<n;j++) { fscanf(in,"%s",named); money[namecheck(named)]+=tr; } } for(i=0;i<np;i++) fprintf(out,"%s %d\n",name[i],money[i]); fclose(in); fclose(out); return 0; }
the_stack_data/64201260.c
/// https://bugs.llvm.org/show_bug.cgi?id=38067 /// An abnormal exit does not clear execution counts of subsequent instructions. // RUN: mkdir -p %t.dir && cd %t.dir // RUN: %clang --coverage %s -o %t // RUN: test -f gcov-__gcov_flush-terminate.gcno // RUN: rm -f gcov-__gcov_flush-terminate.gcda && %expect_crash %run %t // RUN: llvm-cov gcov -t gcov-__gcov_flush-terminate.gcda | FileCheck %s // CHECK: -: 0:Runs:1 // CHECK-NEXT: -: 0:Programs:1 void __gcov_dump(void); void __gcov_reset(void); int main(void) { // CHECK: 1: [[#@LINE]]:int main(void) int i = 22; // CHECK-NEXT: 1: [[#@LINE]]: __gcov_dump(); // CHECK-NEXT: 1: [[#@LINE]]: __gcov_reset(); // CHECK-NEXT: 1: [[#@LINE]]: i = 42; // CHECK-NEXT: 1: [[#@LINE]]: __builtin_trap(); // CHECK-NEXT: 1: [[#@LINE]]: i = 84; // CHECK-NEXT: 1: [[#@LINE]]: return 0; // CHECK-NEXT: 1: [[#@LINE]]: }
the_stack_data/23576581.c
/* Copyright (C) 2002, 2005 by Red Hat, Incorporated. All rights reserved. * * Permission to use, copy, modify, and distribute this software * is freely granted, provided that this notice is preserved. */ #include <errno.h> #include <sys/types.h> #include <string.h> #include <stdlib.h> #include <argz.h> #define __need_ptrdiff_t #include <stddef.h> error_t argz_insert (char **argz, size_t *argz_len, char *before, const char *entry) { int len = 0; ptrdiff_t delta; if (before == NULL) return argz_add(argz, argz_len, entry); if (before < *argz || before >= *argz + *argz_len) return EINVAL; while (before != *argz && before[-1]) before--; /* delta will always be non-negative, and < *argz_len */ delta = before - *argz; len = strlen(entry) + 1; if(!(*argz = (char *)realloc(*argz, *argz_len + len))) return ENOMEM; memmove(*argz + delta + len, *argz + delta, *argz_len - delta); memcpy(*argz + delta, entry, len); *argz_len += len; return 0; }
the_stack_data/150140729.c
int main(void){ register int a[5] = {0}; a[0]; return 0; }
the_stack_data/87636666.c
#include<stdio.h> int main() { int n; while(scanf("%d", &n) && n) { if(((5*n+5)*n)%4) printf("N\n"); else printf("Y\n"); } return 0; }
the_stack_data/53841.c
/******************************************************************************* * * "for" problem. Count from 1 to 32 and list the range of unsigned integer * numbers that can be stored in each collection of bits. I.E. 8 bits can hold * the range 0 to 255. * ******************************************************************************/ #include <stdio.h> #include <math.h> int main(void) { printf("#bits | [Min, Max]\n"); for (int i = 1; i <= 32; i++) { printf(" %2d | [0,%u]\n", i, (unsigned int)pow(2,i) - 1); } }
the_stack_data/59513062.c
/*Exercise 4 - Functions Implement the three functions minimum(), maximum() and multiply() below the main() function. Do not change the code given in the main() function when you are implementing your solution.*/ #include <stdio.h> int multiply(int n1, int n2); int minimum(int n1, int n2); int maximum(int n1, int n2); int main() { int no1, no2; printf("Enter a value for no 1 : "); scanf("%d", &no1); printf("Enter a value for no 2 : "); scanf("%d", &no2); printf("%d ", minimum(no1, no2)); printf("%d ", maximum(no1, no2)); printf("%d ", multiply(no1, no2)); return 0; } int multiply(int n1, int n2) { return n1 * n2; } int minimum(int n1, int n2) { if(n1 < n2) { return n1; } else return n2; } int maximum(int n1, int n2) { if(n1 > n2) { return n1; } else return n2; }
the_stack_data/45451519.c
#ifdef MALLOC_PROVIDED int _dummy_mallocr = 1; #else /* ---------- To make a malloc.h, start cutting here ------------ */ /* A version of malloc/free/realloc written by Doug Lea and released to the public domain. Send questions/comments/complaints/performance data to [email protected] * VERSION 2.6.5 Wed Jun 17 15:55:16 1998 Doug Lea (dl at gee) Note: There may be an updated version of this malloc obtainable at ftp://g.oswego.edu/pub/misc/malloc.c Check before installing! Note: This version differs from 2.6.4 only by correcting a statement ordering error that could cause failures only when calls to this malloc are interposed with calls to other memory allocators. * Why use this malloc? This is not the fastest, most space-conserving, most portable, or most tunable malloc ever written. However it is among the fastest while also being among the most space-conserving, portable and tunable. Consistent balance across these factors results in a good general-purpose allocator. For a high-level description, see http://g.oswego.edu/dl/html/malloc.html * Synopsis of public routines (Much fuller descriptions are contained in the program documentation below.) malloc(size_t n); Return a pointer to a newly allocated chunk of at least n bytes, or null if no space is available. free(Void_t* p); Release the chunk of memory pointed to by p, or no effect if p is null. realloc(Void_t* p, size_t n); Return a pointer to a chunk of size n that contains the same data as does chunk p up to the minimum of (n, p's size) bytes, or null if no space is available. The returned pointer may or may not be the same as p. If p is null, equivalent to malloc. Unless the #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a size argument of zero (re)allocates a minimum-sized chunk. memalign(size_t alignment, size_t n); Return a pointer to a newly allocated chunk of n bytes, aligned in accord with the alignment argument, which must be a power of two. valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system (or as near to this as can be figured out from all the includes/defines below.) pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. calloc(size_t unit, size_t quantity); Returns a pointer to quantity * unit bytes, with all locations set to zero. cfree(Void_t* p); Equivalent to free(p). malloc_trim(size_t pad); Release all but pad bytes of freed top-most memory back to the system. Return 1 if successful, else 0. malloc_usable_size(Void_t* p); Report the number usable allocated bytes associated with allocated chunk p. This may or may not report more bytes than were requested, due to alignment and minimum size constraints. malloc_stats(); Prints brief summary statistics on stderr. mallinfo() Returns (by copy) a struct containing various summary statistics. mallopt(int parameter_number, int parameter_value) Changes one of the tunable parameters described below. Returns 1 if successful in changing the parameter, else 0. * Vital statistics: Alignment: 8-byte 8 byte alignment is currently hardwired into the design. This seems to suffice for all current machines and C compilers. Assumed pointer representation: 4 or 8 bytes Code for 8-byte pointers is untested by me but has worked reliably by Wolfram Gloger, who contributed most of the changes supporting this. Assumed size_t representation: 4 or 8 bytes Note that size_t is allowed to be 4 bytes even if pointers are 8. Minimum overhead per allocated chunk: 4 or 8 bytes Each malloced chunk has a hidden overhead of 4 bytes holding size and status information. Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte ptrs but 4 byte size) or 24 (for 8/8) additional bytes are needed; 4 (8) for a trailing size field and 8 (16) bytes for free list pointers. Thus, the minimum allocatable size is 16/24/32 bytes. Even a request for zero bytes (i.e., malloc(0)) returns a pointer to something of the minimum allocatable size. Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes 8-byte size_t: 2^63 - 16 bytes It is assumed that (possibly signed) size_t bit values suffice to represent chunk sizes. `Possibly signed' is due to the fact that `size_t' may be defined on a system as either a signed or an unsigned type. To be conservative, values that would appear as negative numbers are avoided. Requests for sizes with a negative sign bit will return a minimum-sized chunk. Maximum overhead wastage per allocated chunk: normally 15 bytes Alignnment demands, plus the minimum allocatable size restriction make the normal worst-case wastage 15 bytes (i.e., up to 15 more bytes will be allocated than were requested in malloc), with two exceptions: 1. Because requests for zero bytes allocate non-zero space, the worst case wastage for a request of zero bytes is 24 bytes. 2. For requests >= mmap_threshold that are serviced via mmap(), the worst case wastage is 8 bytes plus the remainder from a system page (the minimal mmap unit); typically 4096 bytes. * Limitations Here are some features that are NOT currently supported * No user-definable hooks for callbacks and the like. * No automated mechanism for fully checking that all accesses to malloced memory stay within their bounds. * No support for compaction. * Synopsis of compile-time options: People have reported using previous versions of this malloc on all versions of Unix, sometimes by tweaking some of the defines below. It has been tested most extensively on Solaris and Linux. It is also reported to work on WIN32 platforms. People have also reported adapting this malloc for use in stand-alone embedded systems. The implementation is in straight, hand-tuned ANSI C. Among other consequences, it uses a lot of macros. Because of this, to be at all usable, this code should be compiled using an optimizing compiler (for example gcc -O2) that can simplify expressions and control paths. __STD_C (default: derived from C compiler defines) Nonzero if using ANSI-standard C compiler, a C++ compiler, or a C compiler sufficiently close to ANSI to get away with it. DEBUG (default: NOT defined) Define to enable debugging. Adds fairly extensive assertion-based checking to help track down memory errors, but noticeably slows down execution. SEPARATE_OBJECTS (default: NOT defined) Define this to compile into separate .o files. You must then compile malloc.c several times, defining a DEFINE_* macro each time. The list of DEFINE_* macros appears below. MALLOC_LOCK (default: NOT defined) MALLOC_UNLOCK (default: NOT defined) Define these to C expressions which are run to lock and unlock the malloc data structures. Calls may be nested; that is, MALLOC_LOCK may be called more than once before the corresponding MALLOC_UNLOCK calls. MALLOC_LOCK must avoid waiting for a lock that it already holds. MALLOC_ALIGNMENT (default: NOT defined) Define this to 16 if you need 16 byte alignment instead of 8 byte alignment which is the normal default. REALLOC_ZERO_BYTES_FREES (default: NOT defined) Define this if you think that realloc(p, 0) should be equivalent to free(p). Otherwise, since malloc returns a unique pointer for malloc(0), so does realloc(p, 0). HAVE_MEMCPY (default: defined) Define if you are not otherwise using ANSI STD C, but still have memcpy and memset in your C library and want to use them. Otherwise, simple internal versions are supplied. USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) Define as 1 if you want the C library versions of memset and memcpy called in realloc and calloc (otherwise macro versions are used). At least on some platforms, the simple macro versions usually outperform libc versions. HAVE_MMAP (default: defined as 1) Define to non-zero to optionally make malloc() use mmap() to allocate very large blocks. HAVE_MREMAP (default: defined as 0 unless Linux libc set) Define to non-zero to optionally make realloc() use mremap() to reallocate very large blocks. malloc_getpagesize (default: derived from system #includes) Either a constant or routine call returning the system page size. HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) Optionally define if you are on a system with a /usr/include/malloc.h that declares struct mallinfo. It is not at all necessary to define this even if you do, but will ensure consistency. INTERNAL_SIZE_T (default: size_t) Define to a 32-bit type (probably `unsigned int') if you are on a 64-bit machine, yet do not want or need to allow malloc requests of greater than 2^31 to be handled. This saves space, especially for very small chunks. INTERNAL_LINUX_C_LIB (default: NOT defined) Defined only when compiled as part of Linux libc. Also note that there is some odd internal name-mangling via defines (for example, internally, `malloc' is named `mALLOc') needed when compiling in this case. These look funny but don't otherwise affect anything. INTERNAL_NEWLIB (default: NOT defined) Defined only when compiled as part of the Cygnus newlib distribution. WIN32 (default: undefined) Define this on MS win (95, nt) platforms to compile in sbrk emulation. LACKS_UNISTD_H (default: undefined) Define this if your system does not have a <unistd.h>. MORECORE (default: sbrk) The name of the routine to call to obtain more memory from the system. MORECORE_FAILURE (default: -1) The value returned upon failure of MORECORE. MORECORE_CLEARS (default 1) True (1) if the routine mapped to MORECORE zeroes out memory (which holds for sbrk). DEFAULT_TRIM_THRESHOLD DEFAULT_TOP_PAD DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_MAX Default values of tunable parameters (described in detail below) controlling interaction with host system routines (sbrk, mmap, etc). These values may also be changed dynamically via mallopt(). The preset defaults are those that give best performance for typical programs/systems. */ /* Preliminaries */ #ifndef __STD_C #ifdef __STDC__ #define __STD_C 1 #else #if __cplusplus #define __STD_C 1 #else #define __STD_C 0 #endif /*__cplusplus*/ #endif /*__STDC__*/ #endif /*__STD_C*/ #ifndef Void_t #if __STD_C #define Void_t void #else #define Void_t char #endif #endif /*Void_t*/ #if __STD_C #include <stddef.h> /* for size_t */ #else #include <sys/types.h> #endif #ifdef __cplusplus extern "C" { #endif #include <stdio.h> /* needed for malloc_stats */ #include <limits.h> /* needed for overflow checks */ #include <errno.h> /* needed to set errno to ENOMEM */ #ifdef WIN32 #define WIN32_LEAN_AND_MEAN #include <windows.h> #endif /* Compile-time options */ /* Special defines for Cygnus newlib distribution. */ #ifdef INTERNAL_NEWLIB #include <sys/config.h> /* In newlib, all the publically visible routines take a reentrancy pointer. We don't currently do anything much with it, but we do pass it to the lock routine. */ #include <reent.h> #define POINTER_UINT unsigned _POINTER_INT #define SEPARATE_OBJECTS #define HAVE_MMAP 0 #define MORECORE(size) _sbrk_r(reent_ptr, (size)) #define MORECORE_CLEARS 0 #define MALLOC_LOCK __malloc_lock(reent_ptr) #define MALLOC_UNLOCK __malloc_unlock(reent_ptr) #ifdef __CYGWIN__ # undef _WIN32 # undef WIN32 #endif #ifndef _WIN32 #ifdef SMALL_MEMORY #define malloc_getpagesize (128) #else #define malloc_getpagesize (4096) #endif #endif #if __STD_C extern void __malloc_lock(struct _reent *); extern void __malloc_unlock(struct _reent *); #else extern void __malloc_lock(); extern void __malloc_unlock(); #endif #if __STD_C #define RARG struct _reent *reent_ptr, #define RONEARG struct _reent *reent_ptr #else #define RARG reent_ptr #define RONEARG reent_ptr #define RDECL struct _reent *reent_ptr; #endif #define RERRNO reent_ptr->_errno #define RCALL reent_ptr, #define RONECALL reent_ptr #else /* ! INTERNAL_NEWLIB */ #define POINTER_UINT unsigned long #define RARG #define RONEARG #define RDECL #define RERRNO errno #define RCALL #define RONECALL #endif /* ! INTERNAL_NEWLIB */ /* Debugging: Because freed chunks may be overwritten with link fields, this malloc will often die when freed memory is overwritten by user programs. This can be very effective (albeit in an annoying way) in helping track down dangling pointers. If you compile with -DDEBUG, a number of assertion checks are enabled that will catch more memory errors. You probably won't be able to make much sense of the actual assertion errors, but they should help you locate incorrectly overwritten memory. The checking is fairly extensive, and will slow down execution noticeably. Calling malloc_stats or mallinfo with DEBUG set will attempt to check every non-mmapped allocated and free chunk in the course of computing the summmaries. (By nature, mmapped regions cannot be checked very much automatically.) Setting DEBUG may also be helpful if you are trying to modify this code. The assertions in the check routines spell out in more detail the assumptions and invariants underlying the algorithms. */ #if DEBUG #include <assert.h> #else #define assert(x) ((void)0) #endif /* SEPARATE_OBJECTS should be defined if you want each function to go into a separate .o file. You must then compile malloc.c once per function, defining the appropriate DEFINE_ macro. See below for the list of macros. */ #ifndef SEPARATE_OBJECTS #define DEFINE_MALLOC #define DEFINE_FREE #define DEFINE_REALLOC #define DEFINE_CALLOC #define DEFINE_CFREE #define DEFINE_MEMALIGN #define DEFINE_VALLOC #define DEFINE_PVALLOC #define DEFINE_MALLINFO #define DEFINE_MALLOC_STATS #define DEFINE_MALLOC_USABLE_SIZE #define DEFINE_MALLOPT #define STATIC static #else #define STATIC #endif /* Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to lock and unlock the malloc data structures. MALLOC_LOCK may be called recursively. */ #ifndef MALLOC_LOCK #define MALLOC_LOCK #endif #ifndef MALLOC_UNLOCK #define MALLOC_UNLOCK #endif /* INTERNAL_SIZE_T is the word-size used for internal bookkeeping of chunk sizes. On a 64-bit machine, you can reduce malloc overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the expense of not being able to handle requests greater than 2^31. This limitation is hardly ever a concern; you are encouraged to set this. However, the default version is the same as size_t. */ #ifndef INTERNAL_SIZE_T #define INTERNAL_SIZE_T size_t #endif /* Following is needed on implementations whereby long > size_t. The problem is caused because the code performs subtractions of size_t values and stores the result in long values. In the case where long > size_t and the first value is actually less than the second value, the resultant value is positive. For example, (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the fact that assignment from unsigned to signed won't sign extend. */ #define long_sub_size_t(x, y) \ (sizeof (long) > sizeof (INTERNAL_SIZE_T) && x < y \ ? -(long) (y - x) \ : (long) (x - y)) /* REALLOC_ZERO_BYTES_FREES should be set if a call to realloc with zero bytes should be the same as a call to free. Some people think it should. Otherwise, since this malloc returns a unique pointer for malloc(0), so does realloc(p, 0). */ /* #define REALLOC_ZERO_BYTES_FREES */ /* WIN32 causes an emulation of sbrk to be compiled in mmap-based options are not currently supported in WIN32. */ /* #define WIN32 */ #ifdef WIN32 #define MORECORE wsbrk #define HAVE_MMAP 0 #endif /* HAVE_MEMCPY should be defined if you are not otherwise using ANSI STD C, but still have memcpy and memset in your C library and want to use them in calloc and realloc. Otherwise simple macro versions are defined here. USE_MEMCPY should be defined as 1 if you actually want to have memset and memcpy called. People report that the macro versions are often enough faster than libc versions on many systems that it is better to use them. */ #define HAVE_MEMCPY /* Although the original macro is called USE_MEMCPY, newlib actually uses memmove to handle cases whereby a platform's memcpy implementation copies backwards and thus destructive overlap may occur in realloc whereby we are reclaiming free memory prior to the old allocation. */ #ifndef USE_MEMCPY #ifdef HAVE_MEMCPY #define USE_MEMCPY 1 #else #define USE_MEMCPY 0 #endif #endif #if (__STD_C || defined(HAVE_MEMCPY)) #if __STD_C void* memset(void*, int, size_t); void* memcpy(void*, const void*, size_t); void* memmove(void*, const void*, size_t); #else Void_t* memset(); Void_t* memcpy(); Void_t* memmove(); #endif #endif #if USE_MEMCPY /* The following macros are only invoked with (2n+1)-multiples of INTERNAL_SIZE_T units, with a positive integer n. This is exploited for fast inline execution when n is small. */ #define MALLOC_ZERO(charp, nbytes) \ do { \ INTERNAL_SIZE_T mzsz = (nbytes); \ if(mzsz <= 9*sizeof(mzsz)) { \ INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ *mz++ = 0; \ if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ *mz++ = 0; \ if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ *mz++ = 0; }}} \ *mz++ = 0; \ *mz++ = 0; \ *mz = 0; \ } else memset((charp), 0, mzsz); \ } while(0) #define MALLOC_COPY(dest,src,nbytes) \ do { \ INTERNAL_SIZE_T mcsz = (nbytes); \ if(mcsz <= 9*sizeof(mcsz)) { \ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; \ if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; \ if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; }}} \ *mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; \ *mcdst = *mcsrc ; \ } else memmove(dest, src, mcsz); \ } while(0) #else /* !USE_MEMCPY */ /* Use Duff's device for good zeroing/copying performance. */ #define MALLOC_ZERO(charp, nbytes) \ do { \ INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mzp++ = 0; \ case 7: *mzp++ = 0; \ case 6: *mzp++ = 0; \ case 5: *mzp++ = 0; \ case 4: *mzp++ = 0; \ case 3: *mzp++ = 0; \ case 2: *mzp++ = 0; \ case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ } \ } while(0) #define MALLOC_COPY(dest,src,nbytes) \ do { \ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mcdst++ = *mcsrc++; \ case 7: *mcdst++ = *mcsrc++; \ case 6: *mcdst++ = *mcsrc++; \ case 5: *mcdst++ = *mcsrc++; \ case 4: *mcdst++ = *mcsrc++; \ case 3: *mcdst++ = *mcsrc++; \ case 2: *mcdst++ = *mcsrc++; \ case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ } \ } while(0) #endif /* Define HAVE_MMAP to optionally make malloc() use mmap() to allocate very large blocks. These will be returned to the operating system immediately after a free(). */ #ifndef HAVE_MMAP #define HAVE_MMAP 1 #endif /* Define HAVE_MREMAP to make realloc() use mremap() to re-allocate large blocks. This is currently only possible on Linux with kernel versions newer than 1.3.77. */ #ifndef HAVE_MREMAP #ifdef INTERNAL_LINUX_C_LIB #define HAVE_MREMAP 1 #else #define HAVE_MREMAP 0 #endif #endif #if HAVE_MMAP #include <unistd.h> #include <fcntl.h> #include <sys/mman.h> #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) #define MAP_ANONYMOUS MAP_ANON #endif #endif /* HAVE_MMAP */ /* Access to system page size. To the extent possible, this malloc manages memory from the system in page-size units. The following mechanics for getpagesize were adapted from bsd/gnu getpagesize.h */ #ifndef LACKS_UNISTD_H # include <unistd.h> #endif #ifndef malloc_getpagesize # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ # ifndef _SC_PAGE_SIZE # define _SC_PAGE_SIZE _SC_PAGESIZE # endif # endif # ifdef _SC_PAGE_SIZE # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) # else # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) extern size_t getpagesize(); # define malloc_getpagesize getpagesize() # else # include <sys/param.h> # ifdef EXEC_PAGESIZE # define malloc_getpagesize EXEC_PAGESIZE # else # ifdef NBPG # ifndef CLSIZE # define malloc_getpagesize NBPG # else # define malloc_getpagesize (NBPG * CLSIZE) # endif # else # ifdef NBPC # define malloc_getpagesize NBPC # else # ifdef PAGESIZE # define malloc_getpagesize PAGESIZE # else # define malloc_getpagesize (4096) /* just guess */ # endif # endif # endif # endif # endif # endif #endif /* This version of malloc supports the standard SVID/XPG mallinfo routine that returns a struct containing the same kind of information you can get from malloc_stats. It should work on any SVID/XPG compliant system that has a /usr/include/malloc.h defining struct mallinfo. (If you'd like to install such a thing yourself, cut out the preliminary declarations as described above and below and save them in a malloc.h file. But there's no compelling reason to bother to do this.) The main declaration needed is the mallinfo struct that is returned (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a bunch of fields, most of which are not even meaningful in this version of malloc. Some of these fields are are instead filled by mallinfo() with other numbers that might possibly be of interest. HAVE_USR_INCLUDE_MALLOC_H should be set if you have a /usr/include/malloc.h file that includes a declaration of struct mallinfo. If so, it is included; else an SVID2/XPG2 compliant version is declared below. These must be precisely the same for mallinfo() to work. */ /* #define HAVE_USR_INCLUDE_MALLOC_H */ #if HAVE_USR_INCLUDE_MALLOC_H #include "/usr/include/malloc.h" #else /* SVID2/XPG mallinfo structure */ struct mallinfo { int arena; /* total space allocated from system */ int ordblks; /* number of non-inuse chunks */ int smblks; /* unused -- always zero */ int hblks; /* number of mmapped regions */ int hblkhd; /* total space in mmapped regions */ int usmblks; /* unused -- always zero */ int fsmblks; /* unused -- always zero */ int uordblks; /* total allocated space */ int fordblks; /* total non-inuse space */ int keepcost; /* top-most, releasable (via malloc_trim) space */ }; /* SVID2/XPG mallopt options */ #define M_MXFAST 1 /* UNUSED in this malloc */ #define M_NLBLKS 2 /* UNUSED in this malloc */ #define M_GRAIN 3 /* UNUSED in this malloc */ #define M_KEEP 4 /* UNUSED in this malloc */ #endif /* mallopt options that actually do something */ #define M_TRIM_THRESHOLD -1 #define M_TOP_PAD -2 #define M_MMAP_THRESHOLD -3 #define M_MMAP_MAX -4 #ifndef DEFAULT_TRIM_THRESHOLD #define DEFAULT_TRIM_THRESHOLD (128L * 1024L) #endif /* M_TRIM_THRESHOLD is the maximum amount of unused top-most memory to keep before releasing via malloc_trim in free(). Automatic trimming is mainly useful in long-lived programs. Because trimming via sbrk can be slow on some systems, and can sometimes be wasteful (in cases where programs immediately afterward allocate more large chunks) the value should be high enough so that your overall system performance would improve by releasing. The trim threshold and the mmap control parameters (see below) can be traded off with one another. Trimming and mmapping are two different ways of releasing unused memory back to the system. Between these two, it is often possible to keep system-level demands of a long-lived program down to a bare minimum. For example, in one test suite of sessions measuring the XF86 X server on Linux, using a trim threshold of 128K and a mmap threshold of 192K led to near-minimal long term resource consumption. If you are using this malloc in a long-lived program, it should pay to experiment with these values. As a rough guide, you might set to a value close to the average size of a process (program) running on your system. Releasing this much memory would allow such a process to run in memory. Generally, it's worth it to tune for trimming rather tham memory mapping when a program undergoes phases where several large chunks are allocated and released in ways that can reuse each other's storage, perhaps mixed with phases where there are no such chunks at all. And in well-behaved long-lived programs, controlling release of large blocks via trimming versus mapping is usually faster. However, in most programs, these parameters serve mainly as protection against the system-level effects of carrying around massive amounts of unneeded memory. Since frequent calls to sbrk, mmap, and munmap otherwise degrade performance, the default parameters are set to relatively high values that serve only as safeguards. The default trim value is high enough to cause trimming only in fairly extreme (by current memory consumption standards) cases. It must be greater than page size to have any useful effect. To disable trimming completely, you can set to (unsigned long)(-1); */ #ifndef DEFAULT_TOP_PAD #define DEFAULT_TOP_PAD (0) #endif /* M_TOP_PAD is the amount of extra `padding' space to allocate or retain whenever sbrk is called. It is used in two ways internally: * When sbrk is called to extend the top of the arena to satisfy a new malloc request, this much padding is added to the sbrk request. * When malloc_trim is called automatically from free(), it is used as the `pad' argument. In both cases, the actual amount of padding is rounded so that the end of the arena is always a system page boundary. The main reason for using padding is to avoid calling sbrk so often. Having even a small pad greatly reduces the likelihood that nearly every malloc request during program start-up (or after trimming) will invoke sbrk, which needlessly wastes time. Automatic rounding-up to page-size units is normally sufficient to avoid measurable overhead, so the default is 0. However, in systems where sbrk is relatively slow, it can pay to increase this value, at the expense of carrying around more memory than the program needs. */ #ifndef DEFAULT_MMAP_THRESHOLD #define DEFAULT_MMAP_THRESHOLD (128 * 1024) #endif /* M_MMAP_THRESHOLD is the request size threshold for using mmap() to service a request. Requests of at least this size that cannot be allocated using already-existing space will be serviced via mmap. (If enough normal freed space already exists it is used instead.) Using mmap segregates relatively large chunks of memory so that they can be individually obtained and released from the host system. A request serviced through mmap is never reused by any other request (at least not directly; the system may just so happen to remap successive requests to the same locations). Segregating space in this way has the benefit that mmapped space can ALWAYS be individually released back to the system, which helps keep the system level memory demands of a long-lived program low. Mapped memory can never become `locked' between other chunks, as can happen with normally allocated chunks, which menas that even trimming via malloc_trim would not release them. However, it has the disadvantages that: 1. The space cannot be reclaimed, consolidated, and then used to service later requests, as happens with normal chunks. 2. It can lead to more wastage because of mmap page alignment requirements 3. It causes malloc performance to be more dependent on host system memory management support routines which may vary in implementation quality and may impose arbitrary limitations. Generally, servicing a request via normal malloc steps is faster than going through a system's mmap. All together, these considerations should lead you to use mmap only for relatively large requests. */ #ifndef DEFAULT_MMAP_MAX #if HAVE_MMAP #define DEFAULT_MMAP_MAX (64) #else #define DEFAULT_MMAP_MAX (0) #endif #endif /* M_MMAP_MAX is the maximum number of requests to simultaneously service using mmap. This parameter exists because: 1. Some systems have a limited number of internal tables for use by mmap. 2. In most systems, overreliance on mmap can degrade overall performance. 3. If a program allocates many large regions, it is probably better off using normal sbrk-based allocation routines that can reclaim and reallocate normal heap memory. Using a small value allows transition into this mode after the first few allocations. Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, the default value is 0, and attempts to set it to non-zero values in mallopt will fail. */ /* Special defines for linux libc Except when compiled using these special defines for Linux libc using weak aliases, this malloc is NOT designed to work in multithreaded applications. No semaphores or other concurrency control are provided to ensure that multiple malloc or free calls don't run at the same time, which could be disasterous. A single semaphore could be used across malloc, realloc, and free (which is essentially the effect of the linux weak alias approach). It would be hard to obtain finer granularity. */ #ifdef INTERNAL_LINUX_C_LIB #if __STD_C Void_t * __default_morecore_init (ptrdiff_t); Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; #else Void_t * __default_morecore_init (); Void_t *(*__morecore)() = __default_morecore_init; #endif #define MORECORE (*__morecore) #define MORECORE_FAILURE 0 #define MORECORE_CLEARS 1 #else /* INTERNAL_LINUX_C_LIB */ #ifndef INTERNAL_NEWLIB #if __STD_C extern Void_t* sbrk(ptrdiff_t); #else extern Void_t* sbrk(); #endif #endif #ifndef MORECORE #define MORECORE sbrk #endif #ifndef MORECORE_FAILURE #define MORECORE_FAILURE -1 #endif #ifndef MORECORE_CLEARS #define MORECORE_CLEARS 1 #endif #endif /* INTERNAL_LINUX_C_LIB */ #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) #define cALLOc __libc_calloc #define fREe __libc_free #define mALLOc __libc_malloc #define mEMALIGn __libc_memalign #define rEALLOc __libc_realloc #define vALLOc __libc_valloc #define pvALLOc __libc_pvalloc #define mALLINFo __libc_mallinfo #define mALLOPt __libc_mallopt #pragma weak calloc = __libc_calloc #pragma weak free = __libc_free #pragma weak cfree = __libc_free #pragma weak malloc = __libc_malloc #pragma weak memalign = __libc_memalign #pragma weak realloc = __libc_realloc #pragma weak valloc = __libc_valloc #pragma weak pvalloc = __libc_pvalloc #pragma weak mallinfo = __libc_mallinfo #pragma weak mallopt = __libc_mallopt #else #ifdef INTERNAL_NEWLIB #define cALLOc _calloc_r #define fREe _free_r #define mALLOc _malloc_r #define mEMALIGn _memalign_r #define rEALLOc _realloc_r #define vALLOc _valloc_r #define pvALLOc _pvalloc_r #define mALLINFo _mallinfo_r #define mALLOPt _mallopt_r #define malloc_stats _malloc_stats_r #define malloc_trim _malloc_trim_r #define malloc_usable_size _malloc_usable_size_r #define malloc_update_mallinfo __malloc_update_mallinfo #define malloc_av_ __malloc_av_ #define malloc_current_mallinfo __malloc_current_mallinfo #define malloc_max_sbrked_mem __malloc_max_sbrked_mem #define malloc_max_total_mem __malloc_max_total_mem #define malloc_sbrk_base __malloc_sbrk_base #define malloc_top_pad __malloc_top_pad #define malloc_trim_threshold __malloc_trim_threshold #else /* ! INTERNAL_NEWLIB */ #define cALLOc calloc #define fREe free #define mALLOc malloc #define mEMALIGn memalign #define rEALLOc realloc #define vALLOc valloc #define pvALLOc pvalloc #define mALLINFo mallinfo #define mALLOPt mallopt #endif /* ! INTERNAL_NEWLIB */ #endif /* Public routines */ #if __STD_C Void_t* mALLOc(RARG size_t); void fREe(RARG Void_t*); Void_t* rEALLOc(RARG Void_t*, size_t); Void_t* mEMALIGn(RARG size_t, size_t); Void_t* vALLOc(RARG size_t); Void_t* pvALLOc(RARG size_t); Void_t* cALLOc(RARG size_t, size_t); void cfree(Void_t*); int malloc_trim(RARG size_t); size_t malloc_usable_size(RARG Void_t*); void malloc_stats(RONEARG); int mALLOPt(RARG int, int); struct mallinfo mALLINFo(RONEARG); #else Void_t* mALLOc(); void fREe(); Void_t* rEALLOc(); Void_t* mEMALIGn(); Void_t* vALLOc(); Void_t* pvALLOc(); Void_t* cALLOc(); void cfree(); int malloc_trim(); size_t malloc_usable_size(); void malloc_stats(); int mALLOPt(); struct mallinfo mALLINFo(); #endif #ifdef __cplusplus }; /* end of extern "C" */ #endif /* ---------- To make a malloc.h, end cutting here ------------ */ /* Emulation of sbrk for WIN32 All code within the ifdef WIN32 is untested by me. */ #ifdef WIN32 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \ ~(malloc_getpagesize-1)) /* resrve 64MB to insure large contiguous space */ #define RESERVED_SIZE (1024*1024*64) #define NEXT_SIZE (2048*1024) #define TOP_MEMORY ((unsigned long)2*1024*1024*1024) struct GmListElement; typedef struct GmListElement GmListElement; struct GmListElement { GmListElement* next; void* base; }; static GmListElement* head = 0; static unsigned int gNextAddress = 0; static unsigned int gAddressBase = 0; static unsigned int gAllocatedSize = 0; static GmListElement* makeGmListElement (void* bas) { GmListElement* this; this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); ASSERT (this); if (this) { this->base = bas; this->next = head; head = this; } return this; } void gcleanup () { BOOL rval; ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase)); if (gAddressBase && (gNextAddress - gAddressBase)) { rval = VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, MEM_DECOMMIT); ASSERT (rval); } while (head) { GmListElement* next = head->next; rval = VirtualFree (head->base, 0, MEM_RELEASE); ASSERT (rval); LocalFree (head); head = next; } } static void* findRegion (void* start_address, unsigned long size) { MEMORY_BASIC_INFORMATION info; while ((unsigned long)start_address < TOP_MEMORY) { VirtualQuery (start_address, &info, sizeof (info)); if (info.State != MEM_FREE) start_address = (char*)info.BaseAddress + info.RegionSize; else if (info.RegionSize >= size) return start_address; else start_address = (char*)info.BaseAddress + info.RegionSize; } return NULL; } void* wsbrk (long size) { void* tmp; if (size > 0) { if (gAddressBase == 0) { gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); gNextAddress = gAddressBase = (unsigned int)VirtualAlloc (NULL, gAllocatedSize, MEM_RESERVE, PAGE_NOACCESS); } else if (AlignPage (gNextAddress + size) > (gAddressBase + gAllocatedSize)) { long new_size = max (NEXT_SIZE, AlignPage (size)); void* new_address = (void*)(gAddressBase+gAllocatedSize); do { new_address = findRegion (new_address, new_size); if (new_address == 0) return (void*)-1; gAddressBase = gNextAddress = (unsigned int)VirtualAlloc (new_address, new_size, MEM_RESERVE, PAGE_NOACCESS); // repeat in case of race condition // The region that we found has been snagged // by another thread } while (gAddressBase == 0); ASSERT (new_address == (void*)gAddressBase); gAllocatedSize = new_size; if (!makeGmListElement ((void*)gAddressBase)) return (void*)-1; } if ((size + gNextAddress) > AlignPage (gNextAddress)) { void* res; res = VirtualAlloc ((void*)AlignPage (gNextAddress), (size + gNextAddress - AlignPage (gNextAddress)), MEM_COMMIT, PAGE_READWRITE); if (res == 0) return (void*)-1; } tmp = (void*)gNextAddress; gNextAddress = (unsigned int)tmp + size; return tmp; } else if (size < 0) { unsigned int alignedGoal = AlignPage (gNextAddress + size); /* Trim by releasing the virtual memory */ if (alignedGoal >= gAddressBase) { VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, MEM_DECOMMIT); gNextAddress = gNextAddress + size; return (void*)gNextAddress; } else { VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, MEM_DECOMMIT); gNextAddress = gAddressBase; return (void*)-1; } } else { return (void*)gNextAddress; } } #endif /* Type declarations */ struct malloc_chunk { INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk; }; typedef struct malloc_chunk* mchunkptr; /* malloc_chunk details: (The following includes lightly edited explanations by Colin Plumb.) Chunks of memory are maintained using a `boundary tag' method as described in e.g., Knuth or Standish. (See the paper by Paul Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such techniques.) Sizes of free chunks are stored both in the front of each chunk and at the end. This makes consolidating fragmented chunks into bigger chunks very fast. The size fields also hold bits representing whether chunks are free or in use. An allocated chunk looks like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk, if allocated | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | User data starts here... . . . . (malloc_usable_space() bytes) . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Where "chunk" is the front of the chunk for the purpose of most of the malloc code, but "mem" is the pointer that is returned to the user. "Nextchunk" is the beginning of the next contiguous chunk. Chunks always begin on even word boundries, so the mem portion (which is returned to the user) is also on an even word boundary, and thus double-word aligned. Free chunks are stored in circular doubly-linked lists, and look like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ The P (PREV_INUSE) bit, stored in the unused low-order bit of the chunk size (which is always a multiple of two words), is an in-use bit for the *previous* chunk. If that bit is *clear*, then the word before the current chunk size contains the previous chunk size, and can be used to find the front of the previous chunk. (The very first chunk allocated always has this bit set, preventing access to non-existent (or non-owned) memory.) Note that the `foot' of the current chunk is actually represented as the prev_size of the NEXT chunk. (This makes it easier to deal with alignments etc). The two exceptions to all this are 1. The special chunk `top', which doesn't bother using the trailing size field since there is no next contiguous chunk that would have to index off it. (After initialization, `top' is forced to always exist. If it would become less than MINSIZE bytes long, it is replenished via malloc_extend_top.) 2. Chunks allocated via mmap, which have the second-lowest-order bit (IS_MMAPPED) set in their size fields. Because they are never merged or traversed from any other chunk, they have no foot size or inuse information. Available chunks are kept in any of several places (all declared below): * `av': An array of chunks serving as bin headers for consolidated chunks. Each bin is doubly linked. The bins are approximately proportionally (log) spaced. There are a lot of these bins (128). This may look excessive, but works very well in practice. All procedures maintain the invariant that no consolidated chunk physically borders another one. Chunks in bins are kept in size order, with ties going to the approximately least recently used chunk. The chunks in each bin are maintained in decreasing sorted order by size. This is irrelevant for the small bins, which all contain the same-sized chunks, but facilitates best-fit allocation for larger chunks. (These lists are just sequential. Keeping them in order almost never requires enough traversal to warrant using fancier ordered data structures.) Chunks of the same size are linked with the most recently freed at the front, and allocations are taken from the back. This results in LRU or FIFO allocation order, which tends to give each chunk an equal opportunity to be consolidated with adjacent freed chunks, resulting in larger free chunks and less fragmentation. * `top': The top-most available chunk (i.e., the one bordering the end of available memory) is treated specially. It is never included in any bin, is used only if no other chunk is available, and is released back to the system if it is very large (see M_TRIM_THRESHOLD). * `last_remainder': A bin holding only the remainder of the most recently split (non-top) chunk. This bin is checked before other non-fitting chunks, so as to provide better locality for runs of sequentially allocated chunks. * Implicitly, through the host system's memory mapping tables. If supported, requests greater than a threshold are usually serviced via calls to mmap, and then later released via munmap. */ /* sizes, alignments */ #define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) #ifndef MALLOC_ALIGNMENT #define MALLOC_ALIGN 8 #define MALLOC_ALIGNMENT (SIZE_SZ < 4 ? 8 : (SIZE_SZ + SIZE_SZ)) #else #define MALLOC_ALIGN MALLOC_ALIGNMENT #endif #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) #define MINSIZE (sizeof(struct malloc_chunk)) /* conversion from malloc headers to user pointers, and back */ #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) /* pad request bytes into a usable size */ #define request2size(req) \ (((unsigned long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ (unsigned long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \ (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) /* Check if m has acceptable alignment */ #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) /* Physical chunk operations */ /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ #define PREV_INUSE 0x1 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ #define IS_MMAPPED 0x2 /* Bits to mask off when extracting size */ #define SIZE_BITS (PREV_INUSE|IS_MMAPPED) /* Ptr to next physical malloc_chunk. */ #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) /* Ptr to previous physical malloc_chunk */ #define prev_chunk(p)\ ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) /* Treat space at ptr + offset as a chunk */ #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) /* Dealing with use bits */ /* extract p's inuse bit */ #define inuse(p)\ ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) /* extract inuse bit of previous chunk */ #define prev_inuse(p) ((p)->size & PREV_INUSE) /* check for mmap()'ed chunk */ #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) /* set/clear chunk as in use without otherwise disturbing */ #define set_inuse(p)\ ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE #define clear_inuse(p)\ ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) /* check/set/clear inuse bits in known places */ #define inuse_bit_at_offset(p, s)\ (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) #define set_inuse_bit_at_offset(p, s)\ (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) #define clear_inuse_bit_at_offset(p, s)\ (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) /* Dealing with size fields */ /* Get size, ignoring use bits */ #define chunksize(p) ((p)->size & ~(SIZE_BITS)) /* Set size at head, without disturbing its use bit */ #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) /* Set size/use ignoring previous bits in header */ #define set_head(p, s) ((p)->size = (s)) /* Set size at footer (only when chunk is not in use) */ #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) /* Bins The bins, `av_' are an array of pairs of pointers serving as the heads of (initially empty) doubly-linked lists of chunks, laid out in a way so that each pair can be treated as if it were in a malloc_chunk. (This way, the fd/bk offsets for linking bin heads and chunks are the same). Bins for sizes < 512 bytes contain chunks of all the same size, spaced 8 bytes apart. Larger bins are approximately logarithmically spaced. (See the table below.) The `av_' array is never mentioned directly in the code, but instead via bin access macros. Bin layout: 64 bins of size 8 32 bins of size 64 16 bins of size 512 8 bins of size 4096 4 bins of size 32768 2 bins of size 262144 1 bin of size what's left There is actually a little bit of slop in the numbers in bin_index for the sake of speed. This makes no difference elsewhere. The special chunks `top' and `last_remainder' get their own bins, (this is implemented via yet more trickery with the av_ array), although `top' is never properly linked to its bin since it is always handled specially. */ #ifdef SEPARATE_OBJECTS #define av_ malloc_av_ #endif #define NAV 128 /* number of bins */ typedef struct malloc_chunk* mbinptr; /* access macros */ #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) #define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) /* The first 2 bins are never indexed. The corresponding av_ cells are instead used for bookkeeping. This is not to save space, but to simplify indexing, maintain locality, and avoid some initialization tests. */ #define top (bin_at(0)->fd) /* The topmost chunk */ #define last_remainder (bin_at(1)) /* remainder from last split */ /* Because top initially points to its own bin with initial zero size, thus forcing extension on the first malloc request, we avoid having any special code in malloc to check whether it even exists yet. But we still need to in malloc_extend_top. */ #define initial_top ((mchunkptr)(bin_at(0))) /* Helper macro to initialize bins */ #define IAV(i) bin_at(i), bin_at(i) #ifdef DEFINE_MALLOC STATIC mbinptr av_[NAV * 2 + 2] = { 0, 0, IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) }; #else extern mbinptr av_[NAV * 2 + 2]; #endif /* field-extraction macros */ #define first(b) ((b)->fd) #define last(b) ((b)->bk) /* Indexing into bins */ #define bin_index(sz) \ (((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ 126) /* bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold identically sized chunks. This is exploited in malloc. */ #define MAX_SMALLBIN_SIZE 512 #define SMALLBIN_WIDTH 8 #define SMALLBIN_WIDTH_BITS 3 #define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1 #define smallbin_index(sz) (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS) /* Requests are `small' if both the corresponding and the next bin are small */ #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) /* To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. `binblocks' is a one-word bitvector recording whether groups of BINBLOCKWIDTH bins have any (possibly) non-empty bins, so they can be skipped over all at once during during traversals. The bits are NOT always cleared as soon as all bins in a block are empty, but instead only when all are noticed to be empty during traversal in malloc. */ #define BINBLOCKWIDTH 4 /* bins per block */ #define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ /* bin<->block macros */ #define idx2binblock(ix) ((unsigned long)1 << (ix / BINBLOCKWIDTH)) #define mark_binblock(ii) (binblocks |= idx2binblock(ii)) #define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) /* Other static bookkeeping data */ #ifdef SEPARATE_OBJECTS #define trim_threshold malloc_trim_threshold #define top_pad malloc_top_pad #define n_mmaps_max malloc_n_mmaps_max #define mmap_threshold malloc_mmap_threshold #define sbrk_base malloc_sbrk_base #define max_sbrked_mem malloc_max_sbrked_mem #define max_total_mem malloc_max_total_mem #define current_mallinfo malloc_current_mallinfo #define n_mmaps malloc_n_mmaps #define max_n_mmaps malloc_max_n_mmaps #define mmapped_mem malloc_mmapped_mem #define max_mmapped_mem malloc_max_mmapped_mem #endif /* variables holding tunable values */ #ifdef DEFINE_MALLOC STATIC unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; STATIC unsigned long top_pad = DEFAULT_TOP_PAD; #if HAVE_MMAP STATIC unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; STATIC unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; #endif /* The first value returned from sbrk */ STATIC char* sbrk_base = (char*)(-1); /* The maximum memory obtained from system via sbrk */ STATIC unsigned long max_sbrked_mem = 0; /* The maximum via either sbrk or mmap */ STATIC unsigned long max_total_mem = 0; /* internal working copy of mallinfo */ STATIC struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #if HAVE_MMAP /* Tracking mmaps */ STATIC unsigned int n_mmaps = 0; STATIC unsigned int max_n_mmaps = 0; STATIC unsigned long mmapped_mem = 0; STATIC unsigned long max_mmapped_mem = 0; #endif #else /* ! DEFINE_MALLOC */ extern unsigned long trim_threshold; extern unsigned long top_pad; #if HAVE_MMAP extern unsigned int n_mmaps_max; extern unsigned long mmap_threshold; #endif extern char* sbrk_base; extern unsigned long max_sbrked_mem; extern unsigned long max_total_mem; extern struct mallinfo current_mallinfo; #if HAVE_MMAP extern unsigned int n_mmaps; extern unsigned int max_n_mmaps; extern unsigned long mmapped_mem; extern unsigned long max_mmapped_mem; #endif #endif /* ! DEFINE_MALLOC */ /* The total memory obtained from system via sbrk */ #define sbrked_mem (current_mallinfo.arena) /* Debugging support */ #if DEBUG /* These routines make a number of assertions about the states of data structures that should be true at all times. If any are not true, it's very likely that a user program has somehow trashed memory. (It's also possible that there is a coding error in malloc. In which case, please report it!) */ #if __STD_C static void do_check_chunk(mchunkptr p) #else static void do_check_chunk(p) mchunkptr p; #endif { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; /* No checkable chunk is mmapped */ assert(!chunk_is_mmapped(p)); /* Check for legal address ... */ assert((char*)p >= sbrk_base); if (p != top) assert((char*)p + sz <= (char*)top); else assert((char*)p + sz <= sbrk_base + sbrked_mem); } #if __STD_C static void do_check_free_chunk(mchunkptr p) #else static void do_check_free_chunk(p) mchunkptr p; #endif { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; mchunkptr next = chunk_at_offset(p, sz); do_check_chunk(p); /* Check whether it claims to be free ... */ assert(!inuse(p)); /* Unless a special marker, must have OK fields */ if ((long)sz >= (long)MINSIZE) { assert((sz & MALLOC_ALIGN_MASK) == 0); assert(aligned_OK(chunk2mem(p))); /* ... matching footer field */ assert(next->prev_size == sz); /* ... and is fully consolidated */ assert(prev_inuse(p)); assert (next == top || inuse(next)); /* ... and has minimally sane links */ assert(p->fd->bk == p); assert(p->bk->fd == p); } else /* markers are always of size SIZE_SZ */ assert(sz == SIZE_SZ); } #if __STD_C static void do_check_inuse_chunk(mchunkptr p) #else static void do_check_inuse_chunk(p) mchunkptr p; #endif { mchunkptr next = next_chunk(p); do_check_chunk(p); /* Check whether it claims to be in use ... */ assert(inuse(p)); /* ... and is surrounded by OK chunks. Since more things can be checked with free chunks than inuse ones, if an inuse chunk borders them and debug is on, it's worth doing them. */ if (!prev_inuse(p)) { mchunkptr prv = prev_chunk(p); assert(next_chunk(prv) == p); do_check_free_chunk(prv); } if (next == top) { assert(prev_inuse(next)); assert(chunksize(next) >= MINSIZE); } else if (!inuse(next)) do_check_free_chunk(next); } #if __STD_C static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) #else static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; #endif { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = long_sub_size_t(sz, s); do_check_inuse_chunk(p); /* Legal size ... */ assert((long)sz >= (long)MINSIZE); assert((sz & MALLOC_ALIGN_MASK) == 0); assert(room >= 0); assert(room < (long)MINSIZE); /* ... and alignment */ assert(aligned_OK(chunk2mem(p))); /* ... and was allocated at front of an available chunk */ assert(prev_inuse(p)); } #define check_free_chunk(P) do_check_free_chunk(P) #define check_inuse_chunk(P) do_check_inuse_chunk(P) #define check_chunk(P) do_check_chunk(P) #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) #else #define check_free_chunk(P) #define check_inuse_chunk(P) #define check_chunk(P) #define check_malloced_chunk(P,N) #endif /* Macro-based internal utilities */ /* Linking chunks in bin lists. Call these only with variables, not arbitrary expressions, as arguments. */ /* Place chunk p of size s in its bin, in size order, putting it ahead of others of same size. */ #define frontlink(P, S, IDX, BK, FD) \ { \ if (S < MAX_SMALLBIN_SIZE) \ { \ IDX = smallbin_index(S); \ mark_binblock(IDX); \ BK = bin_at(IDX); \ FD = BK->fd; \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \ else \ { \ IDX = bin_index(S); \ BK = bin_at(IDX); \ FD = BK->fd; \ if (FD == BK) mark_binblock(IDX); \ else \ { \ while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ BK = FD->bk; \ } \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \ } /* take a chunk off a list */ #define unlink(P, BK, FD) \ { \ BK = P->bk; \ FD = P->fd; \ FD->bk = BK; \ BK->fd = FD; \ } \ /* Place p as the last remainder */ #define link_last_remainder(P) \ { \ last_remainder->fd = last_remainder->bk = P; \ P->fd = P->bk = last_remainder; \ } /* Clear the last_remainder bin */ #define clear_last_remainder \ (last_remainder->fd = last_remainder->bk = last_remainder) /* Routines dealing with mmap(). */ #if HAVE_MMAP #ifdef DEFINE_MALLOC #if __STD_C static mchunkptr mmap_chunk(size_t size) #else static mchunkptr mmap_chunk(size) size_t size; #endif { size_t page_mask = malloc_getpagesize - 1; mchunkptr p; #ifndef MAP_ANONYMOUS static int fd = -1; #endif if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because * there is no following chunk whose prev_size field could be used. */ size = (size + SIZE_SZ + page_mask) & ~page_mask; #ifdef MAP_ANONYMOUS p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); #else /* !MAP_ANONYMOUS */ if (fd < 0) { fd = open("/dev/zero", O_RDWR); if(fd < 0) return 0; } p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); #endif if(p == (mchunkptr)-1) return 0; n_mmaps++; if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; /* We demand that eight bytes into a page must be 8-byte aligned. */ assert(aligned_OK(chunk2mem(p))); /* The offset to the start of the mmapped region is stored * in the prev_size field of the chunk; normally it is zero, * but that can be changed in memalign(). */ p->prev_size = 0; set_head(p, size|IS_MMAPPED); mmapped_mem += size; if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) max_mmapped_mem = mmapped_mem; if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; return p; } #endif /* DEFINE_MALLOC */ #ifdef SEPARATE_OBJECTS #define munmap_chunk malloc_munmap_chunk #endif #ifdef DEFINE_FREE #if __STD_C STATIC void munmap_chunk(mchunkptr p) #else STATIC void munmap_chunk(p) mchunkptr p; #endif { INTERNAL_SIZE_T size = chunksize(p); int ret; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); n_mmaps--; mmapped_mem -= (size + p->prev_size); ret = munmap((char *)p - p->prev_size, size + p->prev_size); /* munmap returns non-zero on failure */ assert(ret == 0); } #else /* ! DEFINE_FREE */ #if __STD_C extern void munmap_chunk(mchunkptr); #else extern void munmap_chunk(); #endif #endif /* ! DEFINE_FREE */ #if HAVE_MREMAP #ifdef DEFINE_REALLOC #if __STD_C static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) #else static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; #endif { size_t page_mask = malloc_getpagesize - 1; INTERNAL_SIZE_T offset = p->prev_size; INTERNAL_SIZE_T size = chunksize(p); char *cp; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((size + offset) & (malloc_getpagesize-1)) == 0); /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); if (cp == (char *)-1) return 0; p = (mchunkptr)(cp + offset); assert(aligned_OK(chunk2mem(p))); assert((p->prev_size == offset)); set_head(p, (new_size - offset)|IS_MMAPPED); mmapped_mem -= size + offset; mmapped_mem += new_size; if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) max_mmapped_mem = mmapped_mem; if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; return p; } #endif /* DEFINE_REALLOC */ #endif /* HAVE_MREMAP */ #endif /* HAVE_MMAP */ #ifdef DEFINE_MALLOC /* Extend the top-most chunk by obtaining memory from system. Main interface to sbrk (but see also malloc_trim). */ #if __STD_C static void malloc_extend_top(RARG INTERNAL_SIZE_T nb) #else static void malloc_extend_top(RARG nb) RDECL INTERNAL_SIZE_T nb; #endif { char* brk; /* return value from sbrk */ INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ int correction_failed = 0; /* whether we should relax the assertion */ char* new_brk; /* return of 2nd sbrk call */ INTERNAL_SIZE_T top_size; /* new size of top chunk */ mchunkptr old_top = top; /* Record state of old top */ INTERNAL_SIZE_T old_top_size = chunksize(old_top); char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); /* Pad request with top_pad plus minimal overhead */ INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; unsigned long pagesz = malloc_getpagesize; /* If not the first time through, round to preserve page boundary */ /* Otherwise, we need to correct to a page size below anyway. */ /* (We also correct below if an intervening foreign sbrk call.) */ if (sbrk_base != (char*)(-1)) sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); brk = (char*)(MORECORE (sbrk_size)); /* Fail if sbrk failed or if a foreign sbrk call killed our space */ if (brk == (char*)(MORECORE_FAILURE) || (brk < old_end && old_top != initial_top)) return; sbrked_mem += sbrk_size; if (brk == old_end /* can just add bytes to current top, unless previous correction failed */ && ((POINTER_UINT)old_end & (pagesz - 1)) == 0) { top_size = sbrk_size + old_top_size; set_head(top, top_size | PREV_INUSE); } else { if (sbrk_base == (char*)(-1)) /* First time through. Record base */ sbrk_base = brk; else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ sbrked_mem += brk - (char*)old_end; /* Guarantee alignment of first new chunk made from this space */ front_misalign = (POINTER_UINT)chunk2mem(brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { correction = (MALLOC_ALIGNMENT) - front_misalign; brk += correction; } else correction = 0; /* Guarantee the next brk will be at a page boundary */ correction += pagesz - ((POINTER_UINT)(brk + sbrk_size) & (pagesz - 1)); /* To guarantee page boundary, correction should be less than pagesz */ correction &= (pagesz - 1); /* Allocate correction */ new_brk = (char*)(MORECORE (correction)); if (new_brk == (char*)(MORECORE_FAILURE)) { correction = 0; correction_failed = 1; new_brk = brk + sbrk_size; if (front_misalign > 0) new_brk -= (MALLOC_ALIGNMENT) - front_misalign; } sbrked_mem += correction; top = (mchunkptr)brk; top_size = new_brk - brk + correction; set_head(top, top_size | PREV_INUSE); if (old_top != initial_top) { /* There must have been an intervening foreign sbrk call. */ /* A double fencepost is necessary to prevent consolidation */ /* If not enough space to do this, then user did something very wrong */ if (old_top_size < MINSIZE) { set_head(top, PREV_INUSE); /* will force null return from malloc */ return; } /* Also keep size a multiple of MALLOC_ALIGNMENT */ old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; set_head_size(old_top, old_top_size); chunk_at_offset(old_top, old_top_size )->size = SIZE_SZ|PREV_INUSE; chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = SIZE_SZ|PREV_INUSE; /* If possible, release the rest. */ if (old_top_size >= MINSIZE) fREe(RCALL chunk2mem(old_top)); } } if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) max_sbrked_mem = sbrked_mem; #if HAVE_MMAP if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; #else if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = sbrked_mem; #endif /* We always land on a page boundary */ assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0 || correction_failed); } #endif /* DEFINE_MALLOC */ /* Main public routines */ #ifdef DEFINE_MALLOC /* Malloc Algorthim: The requested size is first converted into a usable form, `nb'. This currently means to add 4 bytes overhead plus possibly more to obtain 8-byte alignment and/or to obtain a size of at least MINSIZE (currently 16 bytes), the smallest allocatable size. (All fits are considered `exact' if they are within MINSIZE bytes.) From there, the first successful of the following steps is taken: 1. The bin corresponding to the request size is scanned, and if a chunk of exactly the right size is found, it is taken. 2. The most recently remaindered chunk is used if it is big enough. This is a form of (roving) first fit, used only in the absence of exact fits. Runs of consecutive requests use the remainder of the chunk used for the previous such request whenever possible. This limited use of a first-fit style allocation strategy tends to give contiguous chunks coextensive lifetimes, which improves locality and can reduce fragmentation in the long run. 3. Other bins are scanned in increasing size order, using a chunk big enough to fulfill the request, and splitting off any remainder. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected. 4. If large enough, the chunk bordering the end of memory (`top') is split off. (This use of `top' is in accord with the best-fit search rule. In effect, `top' is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations). 5. If the request size meets the mmap threshold and the system supports mmap, and there are few enough currently allocated mmapped regions, and a call to mmap succeeds, the request is allocated via direct memory mapping. 6. Otherwise, the top of memory is extended by obtaining more space from the system (normally using sbrk, but definable to anything else via the MORECORE macro). Memory is gathered from the system (in system page-sized units) in a way that allows chunks obtained across different sbrk calls to be consolidated, but does not require contiguous memory. Thus, it should be safe to intersperse mallocs with other sbrk calls. All allocations are made from the the `lowest' part of any found chunk. (The implementation invariant is that prev_inuse is always true of any allocated chunk; i.e., that each allocated chunk borders either a previously allocated and still in-use chunk, or the base of its memory arena.) */ #if __STD_C Void_t* mALLOc(RARG size_t bytes) #else Void_t* mALLOc(RARG bytes) RDECL size_t bytes; #endif { #ifdef MALLOC_PROVIDED return malloc (bytes); // Make sure that the pointer returned by malloc is returned back. #else mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T victim_size; /* its size */ int idx; /* index for bin traversal */ mbinptr bin; /* associated bin */ mchunkptr remainder; /* remainder from a split */ long remainder_size; /* its size */ int remainder_index; /* its bin index */ unsigned long block; /* block traverser bit */ int startidx; /* first bin of a traversed block */ mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */ mbinptr q; /* misc temp */ INTERNAL_SIZE_T nb = request2size(bytes); /* padded request size; */ /* Check for overflow and just fail, if so. */ if (nb > INT_MAX || nb < bytes) { RERRNO = ENOMEM; return 0; } MALLOC_LOCK; /* Check for exact match in a bin */ if (is_small_request(nb)) /* Faster version for small requests */ { idx = smallbin_index(nb); /* No traversal or size check necessary for small bins. */ q = bin_at(idx); victim = last(q); #if MALLOC_ALIGN != 16 /* Also scan the next one, since it would have a remainder < MINSIZE */ if (victim == q) { q = next_bin(q); victim = last(q); } #endif if (victim != q) { victim_size = chunksize(victim); unlink(victim, bck, fwd); set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ } else { idx = bin_index(nb); bin = bin_at(idx); for (victim = last(bin); victim != bin; victim = victim->bk) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* too big */ { --idx; /* adjust to rescan below after checking last remainder */ break; } else if (remainder_size >= 0) /* exact fit */ { unlink(victim, bck, fwd); set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } } ++idx; } /* Try to use the last split-off remainder */ if ( (victim = last_remainder->fd) != last_remainder) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* re-split */ { remainder = chunk_at_offset(victim, nb); set_head(victim, nb | PREV_INUSE); link_last_remainder(remainder); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } clear_last_remainder; if (remainder_size >= 0) /* exhaust */ { set_inuse_bit_at_offset(victim, victim_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } /* Else place in bin */ frontlink(victim, victim_size, remainder_index, bck, fwd); } /* If there are any possibly nonempty big-enough blocks, search for best fitting chunk by scanning bins in blockwidth units. */ if ( (block = idx2binblock(idx)) <= binblocks) { /* Get to the first marked block */ if ( (block & binblocks) == 0) { /* force to an even block boundary */ idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; block <<= 1; while ((block & binblocks) == 0) { idx += BINBLOCKWIDTH; block <<= 1; } } /* For each possibly nonempty block ... */ for (;;) { startidx = idx; /* (track incomplete blocks) */ q = bin = bin_at(idx); /* For each bin in this block ... */ do { /* Find and use first big enough chunk ... */ for (victim = last(bin); victim != bin; victim = victim->bk) { victim_size = chunksize(victim); remainder_size = long_sub_size_t(victim_size, nb); if (remainder_size >= (long)MINSIZE) /* split */ { remainder = chunk_at_offset(victim, nb); set_head(victim, nb | PREV_INUSE); unlink(victim, bck, fwd); link_last_remainder(remainder); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } else if (remainder_size >= 0) /* take */ { set_inuse_bit_at_offset(victim, victim_size); unlink(victim, bck, fwd); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); } } bin = next_bin(bin); #if MALLOC_ALIGN == 16 if (idx < MAX_SMALLBIN) { bin = next_bin(bin); ++idx; } #endif } while ((++idx & (BINBLOCKWIDTH - 1)) != 0); /* Clear out the block bit. */ do /* Possibly backtrack to try to clear a partial block */ { if ((startidx & (BINBLOCKWIDTH - 1)) == 0) { binblocks &= ~block; break; } --startidx; q = prev_bin(q); } while (first(q) == q); /* Get to the next possibly nonempty block */ if ( (block <<= 1) <= binblocks && (block != 0) ) { while ((block & binblocks) == 0) { idx += BINBLOCKWIDTH; block <<= 1; } } else break; } } /* Try to use top chunk */ /* Require that there be a remainder, ensuring top always exists */ remainder_size = long_sub_size_t(chunksize(top), nb); if (chunksize(top) < nb || remainder_size < (long)MINSIZE) { #if HAVE_MMAP /* If big and would otherwise need to extend, try to use mmap instead */ if ((unsigned long)nb >= (unsigned long)mmap_threshold && (victim = mmap_chunk(nb)) != 0) { MALLOC_UNLOCK; return chunk2mem(victim); } #endif /* Try to extend */ malloc_extend_top(RCALL nb); remainder_size = long_sub_size_t(chunksize(top), nb); if (chunksize(top) < nb || remainder_size < (long)MINSIZE) { MALLOC_UNLOCK; return 0; /* propagate failure */ } } victim = top; set_head(victim, nb | PREV_INUSE); top = chunk_at_offset(victim, nb); set_head(top, remainder_size | PREV_INUSE); check_malloced_chunk(victim, nb); MALLOC_UNLOCK; return chunk2mem(victim); #endif /* MALLOC_PROVIDED */ } #endif /* DEFINE_MALLOC */ #ifdef DEFINE_FREE /* free() algorithm : cases: 1. free(0) has no effect. 2. If the chunk was allocated via mmap, it is release via munmap(). 3. If a returned chunk borders the current high end of memory, it is consolidated into the top, and if the total unused topmost memory exceeds the trim threshold, malloc_trim is called. 4. Other chunks are consolidated as they arrive, and placed in corresponding bins. (This includes the case of consolidating with the current `last_remainder'). */ #if __STD_C void fREe(RARG Void_t* mem) #else void fREe(RARG mem) RDECL Void_t* mem; #endif { #ifdef MALLOC_PROVIDED free (mem); #else mchunkptr p; /* chunk corresponding to mem */ INTERNAL_SIZE_T hd; /* its head field */ INTERNAL_SIZE_T sz; /* its size */ int idx; /* its bin index */ mchunkptr next; /* next contiguous chunk */ INTERNAL_SIZE_T nextsz; /* its size */ INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ int islr; /* track whether merging with last_remainder */ if (mem == 0) /* free(0) has no effect */ return; MALLOC_LOCK; p = mem2chunk(mem); hd = p->size; #if HAVE_MMAP if (hd & IS_MMAPPED) /* release mmapped memory. */ { munmap_chunk(p); MALLOC_UNLOCK; return; } #endif check_inuse_chunk(p); sz = hd & ~PREV_INUSE; next = chunk_at_offset(p, sz); nextsz = chunksize(next); if (next == top) /* merge with top */ { sz += nextsz; if (!(hd & PREV_INUSE)) /* consolidate backward */ { prevsz = p->prev_size; p = chunk_at_offset(p, -prevsz); sz += prevsz; unlink(p, bck, fwd); } set_head(p, sz | PREV_INUSE); top = p; if ((unsigned long)(sz) >= (unsigned long)trim_threshold) malloc_trim(RCALL top_pad); MALLOC_UNLOCK; return; } set_head(next, nextsz); /* clear inuse bit */ islr = 0; if (!(hd & PREV_INUSE)) /* consolidate backward */ { prevsz = p->prev_size; p = chunk_at_offset(p, -prevsz); sz += prevsz; if (p->fd == last_remainder) /* keep as last_remainder */ islr = 1; else unlink(p, bck, fwd); } if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ { sz += nextsz; if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ { islr = 1; link_last_remainder(p); } else unlink(next, bck, fwd); } set_head(p, sz | PREV_INUSE); set_foot(p, sz); if (!islr) frontlink(p, sz, idx, bck, fwd); MALLOC_UNLOCK; #endif /* MALLOC_PROVIDED */ } #endif /* DEFINE_FREE */ #ifdef DEFINE_REALLOC /* Realloc algorithm: Chunks that were obtained via mmap cannot be extended or shrunk unless HAVE_MREMAP is defined, in which case mremap is used. Otherwise, if their reallocation is for additional space, they are copied. If for less, they are just left alone. Otherwise, if the reallocation is for additional space, and the chunk can be extended, it is, else a malloc-copy-free sequence is taken. There are several different ways that a chunk could be extended. All are tried: * Extending forward into following adjacent free chunk. * Shifting backwards, joining preceding adjacent space * Both shifting backwards and extending forward. * Extending into newly sbrked space Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of zero (re)allocates a minimum-sized chunk. If the reallocation is for less space, and the new request is for a `small' (<512 bytes) size, then the newly unused space is lopped off and freed. The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is no longer supported. I don't know of any programs still relying on this feature, and allowing it would also allow too many other incorrect usages of realloc to be sensible. */ #if __STD_C Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes) #else Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes; #endif { #ifdef MALLOC_PROVIDED realloc (oldmem, bytes); #else INTERNAL_SIZE_T nb; /* padded request size */ mchunkptr oldp; /* chunk corresponding to oldmem */ INTERNAL_SIZE_T oldsize; /* its size */ mchunkptr newp; /* chunk to return */ INTERNAL_SIZE_T newsize; /* its size */ Void_t* newmem; /* corresponding user mem */ mchunkptr next; /* next contiguous chunk after oldp */ INTERNAL_SIZE_T nextsize; /* its size */ mchunkptr prev; /* previous contiguous chunk before oldp */ INTERNAL_SIZE_T prevsize; /* its size */ mchunkptr remainder; /* holds split off extra space from newp */ INTERNAL_SIZE_T remainder_size; /* its size */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ #ifdef REALLOC_ZERO_BYTES_FREES if (bytes == 0) { fREe(RCALL oldmem); return 0; } #endif /* realloc of null is supposed to be same as malloc */ if (oldmem == 0) return mALLOc(RCALL bytes); MALLOC_LOCK; newp = oldp = mem2chunk(oldmem); newsize = oldsize = chunksize(oldp); nb = request2size(bytes); /* Check for overflow and just fail, if so. */ if (nb > INT_MAX || nb < bytes) { RERRNO = ENOMEM; return 0; } #if HAVE_MMAP if (chunk_is_mmapped(oldp)) { #if HAVE_MREMAP newp = mremap_chunk(oldp, nb); if(newp) { MALLOC_UNLOCK; return chunk2mem(newp); } #endif /* Note the extra SIZE_SZ overhead. */ if(oldsize - SIZE_SZ >= nb) { MALLOC_UNLOCK; return oldmem; /* do nothing */ } /* Must alloc, copy, free. */ newmem = mALLOc(RCALL bytes); if (newmem == 0) { MALLOC_UNLOCK; return 0; /* propagate failure */ } MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); munmap_chunk(oldp); MALLOC_UNLOCK; return newmem; } #endif check_inuse_chunk(oldp); if ((long)(oldsize) < (long)(nb)) { /* Try expanding forward */ next = chunk_at_offset(oldp, oldsize); if (next == top || !inuse(next)) { nextsize = chunksize(next); /* Forward into top only if a remainder */ if (next == top) { if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) { newsize += nextsize; top = chunk_at_offset(oldp, nb); set_head(top, (newsize - nb) | PREV_INUSE); set_head_size(oldp, nb); MALLOC_UNLOCK; return chunk2mem(oldp); } } /* Forward into next chunk */ else if (((long)(nextsize + newsize) >= (long)(nb))) { unlink(next, bck, fwd); newsize += nextsize; goto split; } } else { next = 0; nextsize = 0; } /* Try shifting backwards. */ if (!prev_inuse(oldp)) { prev = prev_chunk(oldp); prevsize = chunksize(prev); /* try forward + backward first to save a later consolidation */ if (next != 0) { /* into top */ if (next == top) { if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) { unlink(prev, bck, fwd); newp = prev; newsize += prevsize + nextsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); top = chunk_at_offset(newp, nb); set_head(top, (newsize - nb) | PREV_INUSE); set_head_size(newp, nb); MALLOC_UNLOCK; return newmem; } } /* into next chunk */ else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) { unlink(next, bck, fwd); unlink(prev, bck, fwd); newp = prev; newsize += nextsize + prevsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); goto split; } } /* backward only */ if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) { unlink(prev, bck, fwd); newp = prev; newsize += prevsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); goto split; } } /* Must allocate */ newmem = mALLOc (RCALL bytes); if (newmem == 0) /* propagate failure */ { MALLOC_UNLOCK; return 0; } /* Avoid copy if newp is next chunk after oldp. */ /* (This can only happen when new chunk is sbrk'ed.) */ if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) { newsize += chunksize(newp); newp = oldp; goto split; } /* Otherwise copy, free, and exit */ MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); fREe(RCALL oldmem); MALLOC_UNLOCK; return newmem; } split: /* split off extra room in old or expanded chunk */ remainder_size = long_sub_size_t(newsize, nb); if (remainder_size >= (long)MINSIZE) /* split off remainder */ { remainder = chunk_at_offset(newp, nb); set_head_size(newp, nb); set_head(remainder, remainder_size | PREV_INUSE); set_inuse_bit_at_offset(remainder, remainder_size); fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */ } else { set_head_size(newp, newsize); set_inuse_bit_at_offset(newp, newsize); } check_inuse_chunk(newp); MALLOC_UNLOCK; return chunk2mem(newp); #endif /* MALLOC_PROVIDED */ } #endif /* DEFINE_REALLOC */ #ifdef DEFINE_MEMALIGN /* memalign algorithm: memalign requests more than enough space from malloc, finds a spot within that chunk that meets the alignment request, and then possibly frees the leading and trailing space. The alignment argument must be a power of two. This property is not checked by memalign, so misuse may result in random runtime errors. 8-byte alignment is guaranteed by normal malloc calls, so don't bother calling memalign with an argument of 8 or less. Overreliance on memalign is a sure way to fragment space. */ #if __STD_C Void_t* mEMALIGn(RARG size_t alignment, size_t bytes) #else Void_t* mEMALIGn(RARG alignment, bytes) RDECL size_t alignment; size_t bytes; #endif { INTERNAL_SIZE_T nb; /* padded request size */ char* m; /* memory returned by malloc call */ mchunkptr p; /* corresponding chunk */ char* brk; /* alignment point within p */ mchunkptr newp; /* chunk to return */ INTERNAL_SIZE_T newsize; /* its size */ INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ mchunkptr remainder; /* spare room at end to split off */ long remainder_size; /* its size */ /* If need less alignment than we give anyway, just relay to malloc */ if (alignment <= MALLOC_ALIGNMENT) return mALLOc(RCALL bytes); /* Otherwise, ensure that it is at least a minimum chunk size */ if (alignment < MINSIZE) alignment = MINSIZE; /* Call malloc with worst case padding to hit alignment. */ nb = request2size(bytes); /* Check for overflow. */ if (nb > INT_MAX || nb < bytes) { RERRNO = ENOMEM; return 0; } m = (char*)(mALLOc(RCALL nb + alignment + MINSIZE)); if (m == 0) return 0; /* propagate failure */ MALLOC_LOCK; p = mem2chunk(m); if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ { #if HAVE_MMAP if(chunk_is_mmapped(p)) { MALLOC_UNLOCK; return chunk2mem(p); /* nothing more to do */ } #endif } else /* misaligned */ { /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MINSIZE, if the first calculation places us at a spot with less than MINSIZE leader, we can move to the next aligned spot -- we've allocated enough total room so that this is always possible. */ brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment); if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment; newp = (mchunkptr)brk; leadsize = brk - (char*)(p); newsize = chunksize(p) - leadsize; #if HAVE_MMAP if(chunk_is_mmapped(p)) { newp->prev_size = p->prev_size + leadsize; set_head(newp, newsize|IS_MMAPPED); MALLOC_UNLOCK; return chunk2mem(newp); } #endif /* give back leader, use the rest */ set_head(newp, newsize | PREV_INUSE); set_inuse_bit_at_offset(newp, newsize); set_head_size(p, leadsize); fREe(RCALL chunk2mem(p)); p = newp; assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); } /* Also give back spare room at the end */ remainder_size = long_sub_size_t(chunksize(p), nb); if (remainder_size >= (long)MINSIZE) { remainder = chunk_at_offset(p, nb); set_head(remainder, remainder_size | PREV_INUSE); set_head_size(p, nb); fREe(RCALL chunk2mem(remainder)); } check_inuse_chunk(p); MALLOC_UNLOCK; return chunk2mem(p); } #endif /* DEFINE_MEMALIGN */ #ifdef DEFINE_VALLOC /* valloc just invokes memalign with alignment argument equal to the page size of the system (or as near to this as can be figured out from all the includes/defines above.) */ #if __STD_C Void_t* vALLOc(RARG size_t bytes) #else Void_t* vALLOc(RARG bytes) RDECL size_t bytes; #endif { return mEMALIGn (RCALL malloc_getpagesize, bytes); } #endif /* DEFINE_VALLOC */ #ifdef DEFINE_PVALLOC /* pvalloc just invokes valloc for the nearest pagesize that will accommodate request */ #if __STD_C Void_t* pvALLOc(RARG size_t bytes) #else Void_t* pvALLOc(RARG bytes) RDECL size_t bytes; #endif { size_t pagesize = malloc_getpagesize; return mEMALIGn (RCALL pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); } #endif /* DEFINE_PVALLOC */ #ifdef DEFINE_CALLOC /* calloc calls malloc, then zeroes out the allocated chunk. */ #if __STD_C Void_t* cALLOc(RARG size_t n, size_t elem_size) #else Void_t* cALLOc(RARG n, elem_size) RDECL size_t n; size_t elem_size; #endif { mchunkptr p; INTERNAL_SIZE_T csz; INTERNAL_SIZE_T sz = n * elem_size; #if MORECORE_CLEARS mchunkptr oldtop; INTERNAL_SIZE_T oldtopsize; #endif Void_t* mem; /* check if expand_top called, in which case don't need to clear */ #if MORECORE_CLEARS MALLOC_LOCK; oldtop = top; oldtopsize = chunksize(top); #endif mem = mALLOc (RCALL sz); if (mem == 0) { #if MORECORE_CLEARS MALLOC_UNLOCK; #endif return 0; } else { p = mem2chunk(mem); /* Two optional cases in which clearing not necessary */ #if HAVE_MMAP if (chunk_is_mmapped(p)) { #if MORECORE_CLEARS MALLOC_UNLOCK; #endif return mem; } #endif csz = chunksize(p); #if MORECORE_CLEARS if (p == oldtop && csz > oldtopsize) { /* clear only the bytes from non-freshly-sbrked memory */ csz = oldtopsize; } MALLOC_UNLOCK; #endif MALLOC_ZERO(mem, csz - SIZE_SZ); return mem; } } #endif /* DEFINE_CALLOC */ #if defined(DEFINE_CFREE) && !defined(__CYGWIN__) /* cfree just calls free. It is needed/defined on some systems that pair it with calloc, presumably for odd historical reasons. */ #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) #if !defined(INTERNAL_NEWLIB) || !defined(_REENT_ONLY) #if __STD_C void cfree(Void_t *mem) #else void cfree(mem) Void_t *mem; #endif { #ifdef INTERNAL_NEWLIB fREe(_REENT, mem); #else fREe(mem); #endif } #endif #endif #endif /* DEFINE_CFREE */ #ifdef DEFINE_FREE /* Malloc_trim gives memory back to the system (via negative arguments to sbrk) if there is unused memory at the `high' end of the malloc pool. You can call this after freeing large blocks of memory to potentially reduce the system-level memory requirements of a program. However, it cannot guarantee to reduce memory. Under some allocation patterns, some large free blocks of memory will be locked between two used chunks, so they cannot be given back to the system. The `pad' argument to malloc_trim represents the amount of free trailing space to leave untrimmed. If this argument is zero, only the minimum amount of memory to maintain internal data structures will be left (one page or less). Non-zero arguments can be supplied to maintain enough trailing space to service future expected allocations without having to re-obtain memory from the system. Malloc_trim returns 1 if it actually released any memory, else 0. */ #if __STD_C int malloc_trim(RARG size_t pad) #else int malloc_trim(RARG pad) RDECL size_t pad; #endif { long top_size; /* Amount of top-most memory */ long extra; /* Amount to release */ char* current_brk; /* address returned by pre-check sbrk call */ char* new_brk; /* address returned by negative sbrk call */ unsigned long pagesz = malloc_getpagesize; MALLOC_LOCK; top_size = chunksize(top); extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; if (extra < (long)pagesz) /* Not enough memory to release */ { MALLOC_UNLOCK; return 0; } else { /* Test to make sure no one else called sbrk */ current_brk = (char*)(MORECORE (0)); if (current_brk != (char*)(top) + top_size) { MALLOC_UNLOCK; return 0; /* Apparently we don't own memory; must fail */ } else { new_brk = (char*)(MORECORE (-extra)); if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ { /* Try to figure out what we have */ current_brk = (char*)(MORECORE (0)); top_size = current_brk - (char*)top; if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ { sbrked_mem = current_brk - sbrk_base; set_head(top, top_size | PREV_INUSE); } check_chunk(top); MALLOC_UNLOCK; return 0; } else { /* Success. Adjust top accordingly. */ set_head(top, (top_size - extra) | PREV_INUSE); sbrked_mem -= extra; check_chunk(top); MALLOC_UNLOCK; return 1; } } } } #endif /* DEFINE_FREE */ #ifdef DEFINE_MALLOC_USABLE_SIZE /* malloc_usable_size: This routine tells you how many bytes you can actually use in an allocated chunk, which may be more than you requested (although often not). You can use this many bytes without worrying about overwriting other allocated objects. Not a particularly great programming practice, but still sometimes useful. */ #if __STD_C size_t malloc_usable_size(RARG Void_t* mem) #else size_t malloc_usable_size(RARG mem) RDECL Void_t* mem; #endif { mchunkptr p; if (mem == 0) return 0; else { p = mem2chunk(mem); if(!chunk_is_mmapped(p)) { if (!inuse(p)) return 0; #if DEBUG MALLOC_LOCK; check_inuse_chunk(p); MALLOC_UNLOCK; #endif return chunksize(p) - SIZE_SZ; } return chunksize(p) - 2*SIZE_SZ; } } #endif /* DEFINE_MALLOC_USABLE_SIZE */ #ifdef DEFINE_MALLINFO /* Utility to update current_mallinfo for malloc_stats and mallinfo() */ STATIC void malloc_update_mallinfo() { int i; mbinptr b; mchunkptr p; #if DEBUG mchunkptr q; #endif INTERNAL_SIZE_T avail = chunksize(top); int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; for (i = 1; i < NAV; ++i) { b = bin_at(i); for (p = last(b); p != b; p = p->bk) { #if DEBUG check_free_chunk(p); for (q = next_chunk(p); q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; q = next_chunk(q)) check_inuse_chunk(q); #endif avail += chunksize(p); navail++; } } current_mallinfo.ordblks = navail; current_mallinfo.uordblks = sbrked_mem - avail; current_mallinfo.fordblks = avail; #if HAVE_MMAP current_mallinfo.hblks = n_mmaps; current_mallinfo.hblkhd = mmapped_mem; #endif current_mallinfo.keepcost = chunksize(top); } #else /* ! DEFINE_MALLINFO */ #if __STD_C extern void malloc_update_mallinfo(void); #else extern void malloc_update_mallinfo(); #endif #endif /* ! DEFINE_MALLINFO */ #ifdef DEFINE_MALLOC_STATS /* malloc_stats: Prints on stderr the amount of space obtain from the system (both via sbrk and mmap), the maximum amount (which may be more than current if malloc_trim and/or munmap got called), the maximum number of simultaneous mmap regions used, and the current number of bytes allocated via malloc (or realloc, etc) but not yet freed. (Note that this is the number of bytes allocated, not the number requested. It will be larger than the number requested because of alignment and bookkeeping overhead.) */ #if __STD_C void malloc_stats(RONEARG) #else void malloc_stats(RONEARG) RDECL #endif { unsigned long local_max_total_mem; int local_sbrked_mem; struct mallinfo local_mallinfo; #if HAVE_MMAP unsigned long local_mmapped_mem, local_max_n_mmaps; #endif FILE *fp; MALLOC_LOCK; malloc_update_mallinfo(); local_max_total_mem = max_total_mem; local_sbrked_mem = sbrked_mem; local_mallinfo = current_mallinfo; #if HAVE_MMAP local_mmapped_mem = mmapped_mem; local_max_n_mmaps = max_n_mmaps; #endif MALLOC_UNLOCK; #ifdef INTERNAL_NEWLIB _REENT_SMALL_CHECK_INIT(reent_ptr); fp = _stderr_r(reent_ptr); #define fprintf fiprintf #else fp = stderr; #endif fprintf(fp, "max system bytes = %10u\n", (unsigned int)(local_max_total_mem)); #if HAVE_MMAP fprintf(fp, "system bytes = %10u\n", (unsigned int)(local_sbrked_mem + local_mmapped_mem)); fprintf(fp, "in use bytes = %10u\n", (unsigned int)(local_mallinfo.uordblks + local_mmapped_mem)); #else fprintf(fp, "system bytes = %10u\n", (unsigned int)local_sbrked_mem); fprintf(fp, "in use bytes = %10u\n", (unsigned int)local_mallinfo.uordblks); #endif #if HAVE_MMAP fprintf(fp, "max mmap regions = %10u\n", (unsigned int)local_max_n_mmaps); #endif } #endif /* DEFINE_MALLOC_STATS */ #ifdef DEFINE_MALLINFO /* mallinfo returns a copy of updated current mallinfo. */ #if __STD_C struct mallinfo mALLINFo(RONEARG) #else struct mallinfo mALLINFo(RONEARG) RDECL #endif { struct mallinfo ret; MALLOC_LOCK; malloc_update_mallinfo(); ret = current_mallinfo; MALLOC_UNLOCK; return ret; } #endif /* DEFINE_MALLINFO */ #ifdef DEFINE_MALLOPT /* mallopt: mallopt is the general SVID/XPG interface to tunable parameters. The format is to provide a (parameter-number, parameter-value) pair. mallopt then sets the corresponding parameter to the argument value if it can (i.e., so long as the value is meaningful), and returns 1 if successful else 0. See descriptions of tunable parameters above. */ #if __STD_C int mALLOPt(RARG int param_number, int value) #else int mALLOPt(RARG param_number, value) RDECL int param_number; int value; #endif { MALLOC_LOCK; switch(param_number) { case M_TRIM_THRESHOLD: trim_threshold = value; MALLOC_UNLOCK; return 1; case M_TOP_PAD: top_pad = value; MALLOC_UNLOCK; return 1; case M_MMAP_THRESHOLD: #if HAVE_MMAP mmap_threshold = value; #endif MALLOC_UNLOCK; return 1; case M_MMAP_MAX: #if HAVE_MMAP n_mmaps_max = value; MALLOC_UNLOCK; return 1; #else MALLOC_UNLOCK; return value == 0; #endif default: MALLOC_UNLOCK; return 0; } } #endif /* DEFINE_MALLOPT */ /* History: V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) * Fixed ordering problem with boundary-stamping V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) * Added pvalloc, as recommended by H.J. Liu * Added 64bit pointer support mainly from Wolfram Gloger * Added anonymously donated WIN32 sbrk emulation * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen * malloc_extend_top: fix mask error that caused wastage after foreign sbrks * Add linux mremap support code from HJ Liu V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) * Integrated most documentation with the code. * Add support for mmap, with help from Wolfram Gloger ([email protected]). * Use last_remainder in more cases. * Pack bins using idea from [email protected] * Use ordered bins instead of best-fit threshhold * Eliminate block-local decls to simplify tracing and debugging. * Support another case of realloc via move into top * Fix error occuring when initial sbrk_base not word-aligned. * Rely on page size for units instead of SBRK_UNIT to avoid surprises about sbrk alignment conventions. * Add mallinfo, mallopt. Thanks to Raymond Nijssen ([email protected]) for the suggestion. * Add `pad' argument to malloc_trim and top_pad mallopt parameter. * More precautions for cases where other routines call sbrk, courtesy of Wolfram Gloger ([email protected]). * Added macros etc., allowing use in linux libc from H.J. Lu ([email protected]) * Inverted this history list V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) * Re-tuned and fixed to behave more nicely with V2.6.0 changes. * Removed all preallocation code since under current scheme the work required to undo bad preallocations exceeds the work saved in good cases for most test programs. * No longer use return list or unconsolidated bins since no scheme using them consistently outperforms those that don't given above changes. * Use best fit for very large chunks to prevent some worst-cases. * Added some support for debugging V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) * Removed footers when chunks are in use. Thanks to Paul Wilson ([email protected]) for the suggestion. V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) * Added malloc_trim, with help from Wolfram Gloger ([email protected]). V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) * realloc: try to expand in both directions * malloc: swap order of clean-bin strategy; * realloc: only conditionally expand backwards * Try not to scavenge used bins * Use bin counts as a guide to preallocation * Occasionally bin return list chunks in first scan * Add a few optimizations from [email protected] V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) * faster bin computation & slightly different binning * merged all consolidations to one part of malloc proper (eliminating old malloc_find_space & malloc_clean_bin) * Scan 2 returns chunks (not just 1) * Propagate failure in realloc if malloc returns 0 * Add stuff to allow compilation on non-ANSI compilers from [email protected] V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) * removed potential for odd address access in prev_chunk * removed dependency on getpagesize.h * misc cosmetics and a bit more internal documentation * anticosmetics: mangled names in macros to evade debugger strangeness * tested on sparc, hp-700, dec-mips, rs6000 with gcc & native cc (hp, dec only) allowing Detlefs & Zorn comparison study (in SIGPLAN Notices.) Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) * Based loosely on libg++-1.2X malloc. (It retains some of the overall structure of old version, but most details differ.) */ #endif
the_stack_data/45450691.c
#include <stdio.h> #include <getopt.h> int main(int argc, char **argv){ // checks if the arg count isnt less than 2 if(argc < 2){ printf("Invalid syntax!\n"); printf("use 'echo -h' for help!\n"); return 1; } int c; while ((c = getopt (argc, argv, "hv")) != -1){ switch (c) { case 'h': printf("echo: Prints out text\n"); printf("Syntax:\n"); printf("echo [text]\n"); return 0; case 'v': printf("echo command, part of snekutils\n"); printf("version 1.0\n"); return 0; default: return 0; } } // for every arg, print out the arg for(int i=1 ; i<argc ; i++) { fputs(argv[i], stdout); printf(" "); } printf("\n"); return 0; }
the_stack_data/237641849.c
#include <time.h> int timer_create(clockid_t clockid, struct sigevent *restrict evp, timer_t *restrict timerid) { return 0; } /* POSIX(199309) LINK(rt) */
the_stack_data/69318.c
#include<stdio.h> #include<stdlib.h> typedef struct node { int value; struct node* next; } node_t; node_t * construct_3() { //Allocate three pointers: //x for the first Node, and temporary pointers y and z for the other two Nodes. //Allocate three Node pointees and store references to them in the three pointers. //Dereference each pointer to store the appropriate number into the value field in its pointee. //Dereference each pointer to access the .next field in its pointee, //and use pointer assignment to set the .next field to point to the appropriate Node. node_t *x, *y, *z; x = (node_t *)malloc(sizeof(node_t *)*2); y = (node_t *)malloc(sizeof(node_t *)*2); z = (node_t *)malloc(sizeof(node_t *)*2); x->value = 1; y->value = 2; z->value = 3; x->next = y; y->next = z; z->next = x; return x; } //You can ignore the following code for testing int dump_all(node_t*); int main (int argc, char ** argv) { node_t * x = construct_3(); return dump_all(x); } int dump_all(node_t * x) { printf("x -> %d\n", x->value); node_t * y = x->next; printf("%d -> %d\n", x->value, y->value); node_t * z = y->next; printf("%d -> %d\n", y->value, z->value); if(z->next != x) { free(x); free(y); free(z); printf("failed"); return -1; } else { printf("%d -> %d\n", z->value, x->value); free(x); free(y); free(z); return 0; } }
the_stack_data/111078499.c
#include <stdio.h> #include <stdlib.h> typedef struct { int* data; int size; int totalElements; } Dynamic_Array; void initArray(Dynamic_Array* dynamicArray, int size) { dynamicArray->size = size; dynamicArray->totalElements = 0; dynamicArray->data = (int*) malloc(sizeof(int) * dynamicArray->size); } void addElement(int data, Dynamic_Array* dynamicArray) { if (dynamicArray->size == dynamicArray->totalElements) { dynamicArray->size *= 2; dynamicArray->data = (int*) realloc(dynamicArray->data, sizeof(int) * (dynamicArray->size)); } dynamicArray->data[(dynamicArray->totalElements)++] = data; } void resizeArray(Dynamic_Array* dynamicArray) { dynamicArray->size /= 4; dynamicArray->data = (int*) realloc(dynamicArray->data, sizeof(int) * (dynamicArray->size)); } void removeLastElement(Dynamic_Array* dynamicArray) { dynamicArray->data[(dynamicArray->totalElements)--] = 0; if (dynamicArray->totalElements == dynamicArray->size / 4) { resizeArray(dynamicArray); } } void freeArray(Dynamic_Array* dynamicArray) { free(dynamicArray->data); dynamicArray->data = NULL; dynamicArray->size = dynamicArray->totalElements = 0; } int main() { Dynamic_Array dynamicArray; initArray(&dynamicArray, 5); for (int i = 0; i < 12; i++) { addElement(i, &dynamicArray); } for (int i = 0; i < dynamicArray.totalElements; i++) { printf("%d ", dynamicArray.data[i]); } printf("\n"); for (int i = 0; i < 10; i++) { removeLastElement(&dynamicArray); } printf("%d %d\n", dynamicArray.size, dynamicArray.totalElements); for (int i = 0; i < dynamicArray.totalElements; i++) { printf("%d ", dynamicArray.data[i]); } free(dynamicArray.data); return 0; }
the_stack_data/61075590.c
void staircase(int n) { for(int i=1; i<=n; i++) //No of Rows { for(int j=n; j>i; j--) //No of Space printf(" "); for(int j=1; j<=i; j++) //No of # printf("#"); printf("\n"); } }
the_stack_data/237642692.c
//@ ltl invariant negative: ( ([] ( (<> ( ( AP((gate_l1 != 0)) && (! AP((gate_l0 != 0)))) && ( (X AP((gate_l1 != 0))) && (X AP((gate_l0 != 0)))))) || (! ( ( (! AP((gate_l0 != 0))) && (! AP((gate_l1 != 0)))) && ( (! (X AP((gate_l1 != 0)))) && (X AP((gate_l0 != 0)))))))) || (! ([] (<> AP((1.0 <= _diverge_delta)))))); extern float __VERIFIER_nondet_float(void); extern int __VERIFIER_nondet_int(void); char __VERIFIER_nondet_bool(void) { return __VERIFIER_nondet_int() != 0; } char t14_l1, _x_t14_l1; char t14_l0, _x_t14_l0; char t14_evt1, _x_t14_evt1; char t14_evt0, _x_t14_evt0; float t14_x, _x_t14_x; char t13_l1, _x_t13_l1; char t13_l0, _x_t13_l0; char t13_evt1, _x_t13_evt1; char t13_evt0, _x_t13_evt0; float t13_x, _x_t13_x; char t12_l1, _x_t12_l1; char t12_l0, _x_t12_l0; char t12_evt1, _x_t12_evt1; char t12_evt0, _x_t12_evt0; float t12_x, _x_t12_x; float t11_x, _x_t11_x; char t10_l1, _x_t10_l1; char t10_l0, _x_t10_l0; float _diverge_delta, _x__diverge_delta; char t10_evt1, _x_t10_evt1; char t10_evt0, _x_t10_evt0; float t10_x, _x_t10_x; char t9_evt1, _x_t9_evt1; char t9_evt0, _x_t9_evt0; char t2_l0, _x_t2_l0; char t9_l1, _x_t9_l1; int controller_cnt, _x_controller_cnt; char t2_evt0, _x_t2_evt0; char t1_l1, _x_t1_l1; char t1_l0, _x_t1_l0; char gate_l1, _x_gate_l1; char t1_evt1, _x_t1_evt1; float t2_x, _x_t2_x; char t1_evt0, _x_t1_evt0; char t0_l1, _x_t0_l1; char gate_evt1, _x_gate_evt1; float t1_x, _x_t1_x; char t0_evt1, _x_t0_evt1; char t0_l0, _x_t0_l0; char gate_evt0, _x_gate_evt0; char t0_evt0, _x_t0_evt0; float gate_y, _x_gate_y; float t0_x, _x_t0_x; float delta, _x_delta; char t7_evt1, _x_t7_evt1; float t8_x, _x_t8_x; char t2_l1, _x_t2_l1; char t3_evt0, _x_t3_evt0; char gate_l0, _x_gate_l0; char t11_l0, _x_t11_l0; char controller_l0, _x_controller_l0; char t11_l1, _x_t11_l1; char controller_l1, _x_controller_l1; char t2_evt1, _x_t2_evt1; float t3_x, _x_t3_x; char t11_evt0, _x_t11_evt0; char controller_evt0, _x_controller_evt0; char t11_evt1, _x_t11_evt1; char controller_evt1, _x_controller_evt1; char t9_l0, _x_t9_l0; float controller_z, _x_controller_z; char t4_l0, _x_t4_l0; char t3_evt1, _x_t3_evt1; float t4_x, _x_t4_x; char t3_l0, _x_t3_l0; char t3_l1, _x_t3_l1; char t4_evt0, _x_t4_evt0; char t4_evt1, _x_t4_evt1; float t5_x, _x_t5_x; char t4_l1, _x_t4_l1; char t5_evt0, _x_t5_evt0; char t5_evt1, _x_t5_evt1; char controller_evt2, _x_controller_evt2; float t6_x, _x_t6_x; char t5_l0, _x_t5_l0; char t5_l1, _x_t5_l1; char t6_evt0, _x_t6_evt0; char t6_evt1, _x_t6_evt1; float t7_x, _x_t7_x; char t6_l0, _x_t6_l0; char t6_l1, _x_t6_l1; char t7_evt0, _x_t7_evt0; char t7_l0, _x_t7_l0; char t7_l1, _x_t7_l1; char t8_evt0, _x_t8_evt0; char t8_evt1, _x_t8_evt1; float t9_x, _x_t9_x; char t8_l0, _x_t8_l0; char t8_l1, _x_t8_l1; int main() { t14_l1 = __VERIFIER_nondet_bool(); t14_l0 = __VERIFIER_nondet_bool(); t14_evt1 = __VERIFIER_nondet_bool(); t14_evt0 = __VERIFIER_nondet_bool(); t14_x = __VERIFIER_nondet_float(); t13_l1 = __VERIFIER_nondet_bool(); t13_l0 = __VERIFIER_nondet_bool(); t13_evt1 = __VERIFIER_nondet_bool(); t13_evt0 = __VERIFIER_nondet_bool(); t13_x = __VERIFIER_nondet_float(); t12_l1 = __VERIFIER_nondet_bool(); t12_l0 = __VERIFIER_nondet_bool(); t12_evt1 = __VERIFIER_nondet_bool(); t12_evt0 = __VERIFIER_nondet_bool(); t12_x = __VERIFIER_nondet_float(); t11_x = __VERIFIER_nondet_float(); t10_l1 = __VERIFIER_nondet_bool(); t10_l0 = __VERIFIER_nondet_bool(); _diverge_delta = __VERIFIER_nondet_float(); t10_evt1 = __VERIFIER_nondet_bool(); t10_evt0 = __VERIFIER_nondet_bool(); t10_x = __VERIFIER_nondet_float(); t9_evt1 = __VERIFIER_nondet_bool(); t9_evt0 = __VERIFIER_nondet_bool(); t2_l0 = __VERIFIER_nondet_bool(); t9_l1 = __VERIFIER_nondet_bool(); controller_cnt = __VERIFIER_nondet_int(); t2_evt0 = __VERIFIER_nondet_bool(); t1_l1 = __VERIFIER_nondet_bool(); t1_l0 = __VERIFIER_nondet_bool(); gate_l1 = __VERIFIER_nondet_bool(); t1_evt1 = __VERIFIER_nondet_bool(); t2_x = __VERIFIER_nondet_float(); t1_evt0 = __VERIFIER_nondet_bool(); t0_l1 = __VERIFIER_nondet_bool(); gate_evt1 = __VERIFIER_nondet_bool(); t1_x = __VERIFIER_nondet_float(); t0_evt1 = __VERIFIER_nondet_bool(); t0_l0 = __VERIFIER_nondet_bool(); gate_evt0 = __VERIFIER_nondet_bool(); t0_evt0 = __VERIFIER_nondet_bool(); gate_y = __VERIFIER_nondet_float(); t0_x = __VERIFIER_nondet_float(); delta = __VERIFIER_nondet_float(); t7_evt1 = __VERIFIER_nondet_bool(); t8_x = __VERIFIER_nondet_float(); t2_l1 = __VERIFIER_nondet_bool(); t3_evt0 = __VERIFIER_nondet_bool(); gate_l0 = __VERIFIER_nondet_bool(); t11_l0 = __VERIFIER_nondet_bool(); controller_l0 = __VERIFIER_nondet_bool(); t11_l1 = __VERIFIER_nondet_bool(); controller_l1 = __VERIFIER_nondet_bool(); t2_evt1 = __VERIFIER_nondet_bool(); t3_x = __VERIFIER_nondet_float(); t11_evt0 = __VERIFIER_nondet_bool(); controller_evt0 = __VERIFIER_nondet_bool(); t11_evt1 = __VERIFIER_nondet_bool(); controller_evt1 = __VERIFIER_nondet_bool(); t9_l0 = __VERIFIER_nondet_bool(); controller_z = __VERIFIER_nondet_float(); t4_l0 = __VERIFIER_nondet_bool(); t3_evt1 = __VERIFIER_nondet_bool(); t4_x = __VERIFIER_nondet_float(); t3_l0 = __VERIFIER_nondet_bool(); t3_l1 = __VERIFIER_nondet_bool(); t4_evt0 = __VERIFIER_nondet_bool(); t4_evt1 = __VERIFIER_nondet_bool(); t5_x = __VERIFIER_nondet_float(); t4_l1 = __VERIFIER_nondet_bool(); t5_evt0 = __VERIFIER_nondet_bool(); t5_evt1 = __VERIFIER_nondet_bool(); controller_evt2 = __VERIFIER_nondet_bool(); t6_x = __VERIFIER_nondet_float(); t5_l0 = __VERIFIER_nondet_bool(); t5_l1 = __VERIFIER_nondet_bool(); t6_evt0 = __VERIFIER_nondet_bool(); t6_evt1 = __VERIFIER_nondet_bool(); t7_x = __VERIFIER_nondet_float(); t6_l0 = __VERIFIER_nondet_bool(); t6_l1 = __VERIFIER_nondet_bool(); t7_evt0 = __VERIFIER_nondet_bool(); t7_l0 = __VERIFIER_nondet_bool(); t7_l1 = __VERIFIER_nondet_bool(); t8_evt0 = __VERIFIER_nondet_bool(); t8_evt1 = __VERIFIER_nondet_bool(); t9_x = __VERIFIER_nondet_float(); t8_l0 = __VERIFIER_nondet_bool(); t8_l1 = __VERIFIER_nondet_bool(); int __ok = (((((((( !(t14_l0 != 0)) && ( !(t14_l1 != 0))) && (t14_x == 0.0)) && (((( !(t14_l0 != 0)) && ( !(t14_l1 != 0))) || ((t14_l0 != 0) && ( !(t14_l1 != 0)))) || (((t14_l1 != 0) && ( !(t14_l0 != 0))) || ((t14_l0 != 0) && (t14_l1 != 0))))) && (((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ((t14_evt0 != 0) && ( !(t14_evt1 != 0)))) || (((t14_evt1 != 0) && ( !(t14_evt0 != 0))) || ((t14_evt0 != 0) && (t14_evt1 != 0))))) && ((( !(t14_l0 != 0)) && ( !(t14_l1 != 0))) || (t14_x <= 5.0))) && ((((((( !(t13_l0 != 0)) && ( !(t13_l1 != 0))) && (t13_x == 0.0)) && (((( !(t13_l0 != 0)) && ( !(t13_l1 != 0))) || ((t13_l0 != 0) && ( !(t13_l1 != 0)))) || (((t13_l1 != 0) && ( !(t13_l0 != 0))) || ((t13_l0 != 0) && (t13_l1 != 0))))) && (((( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))) || ((t13_evt0 != 0) && ( !(t13_evt1 != 0)))) || (((t13_evt1 != 0) && ( !(t13_evt0 != 0))) || ((t13_evt0 != 0) && (t13_evt1 != 0))))) && ((( !(t13_l0 != 0)) && ( !(t13_l1 != 0))) || (t13_x <= 5.0))) && ((((((( !(t12_l0 != 0)) && ( !(t12_l1 != 0))) && (t12_x == 0.0)) && (((( !(t12_l0 != 0)) && ( !(t12_l1 != 0))) || ((t12_l0 != 0) && ( !(t12_l1 != 0)))) || (((t12_l1 != 0) && ( !(t12_l0 != 0))) || ((t12_l0 != 0) && (t12_l1 != 0))))) && (((( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))) || ((t12_evt0 != 0) && ( !(t12_evt1 != 0)))) || (((t12_evt1 != 0) && ( !(t12_evt0 != 0))) || ((t12_evt0 != 0) && (t12_evt1 != 0))))) && ((( !(t12_l0 != 0)) && ( !(t12_l1 != 0))) || (t12_x <= 5.0))) && ((((((( !(t11_l0 != 0)) && ( !(t11_l1 != 0))) && (t11_x == 0.0)) && (((( !(t11_l0 != 0)) && ( !(t11_l1 != 0))) || ((t11_l0 != 0) && ( !(t11_l1 != 0)))) || (((t11_l1 != 0) && ( !(t11_l0 != 0))) || ((t11_l0 != 0) && (t11_l1 != 0))))) && (((( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))) || ((t11_evt0 != 0) && ( !(t11_evt1 != 0)))) || (((t11_evt1 != 0) && ( !(t11_evt0 != 0))) || ((t11_evt0 != 0) && (t11_evt1 != 0))))) && ((( !(t11_l0 != 0)) && ( !(t11_l1 != 0))) || (t11_x <= 5.0))) && ((((((( !(t10_l0 != 0)) && ( !(t10_l1 != 0))) && (t10_x == 0.0)) && (((( !(t10_l0 != 0)) && ( !(t10_l1 != 0))) || ((t10_l0 != 0) && ( !(t10_l1 != 0)))) || (((t10_l1 != 0) && ( !(t10_l0 != 0))) || ((t10_l0 != 0) && (t10_l1 != 0))))) && (((( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))) || ((t10_evt0 != 0) && ( !(t10_evt1 != 0)))) || (((t10_evt1 != 0) && ( !(t10_evt0 != 0))) || ((t10_evt0 != 0) && (t10_evt1 != 0))))) && ((( !(t10_l0 != 0)) && ( !(t10_l1 != 0))) || (t10_x <= 5.0))) && ((((((( !(t9_l0 != 0)) && ( !(t9_l1 != 0))) && (t9_x == 0.0)) && (((( !(t9_l0 != 0)) && ( !(t9_l1 != 0))) || ((t9_l0 != 0) && ( !(t9_l1 != 0)))) || (((t9_l1 != 0) && ( !(t9_l0 != 0))) || ((t9_l0 != 0) && (t9_l1 != 0))))) && (((( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))) || ((t9_evt0 != 0) && ( !(t9_evt1 != 0)))) || (((t9_evt1 != 0) && ( !(t9_evt0 != 0))) || ((t9_evt0 != 0) && (t9_evt1 != 0))))) && ((( !(t9_l0 != 0)) && ( !(t9_l1 != 0))) || (t9_x <= 5.0))) && ((((((( !(t8_l0 != 0)) && ( !(t8_l1 != 0))) && (t8_x == 0.0)) && (((( !(t8_l0 != 0)) && ( !(t8_l1 != 0))) || ((t8_l0 != 0) && ( !(t8_l1 != 0)))) || (((t8_l1 != 0) && ( !(t8_l0 != 0))) || ((t8_l0 != 0) && (t8_l1 != 0))))) && (((( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))) || ((t8_evt0 != 0) && ( !(t8_evt1 != 0)))) || (((t8_evt1 != 0) && ( !(t8_evt0 != 0))) || ((t8_evt0 != 0) && (t8_evt1 != 0))))) && ((( !(t8_l0 != 0)) && ( !(t8_l1 != 0))) || (t8_x <= 5.0))) && ((((((( !(t7_l0 != 0)) && ( !(t7_l1 != 0))) && (t7_x == 0.0)) && (((( !(t7_l0 != 0)) && ( !(t7_l1 != 0))) || ((t7_l0 != 0) && ( !(t7_l1 != 0)))) || (((t7_l1 != 0) && ( !(t7_l0 != 0))) || ((t7_l0 != 0) && (t7_l1 != 0))))) && (((( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))) || ((t7_evt0 != 0) && ( !(t7_evt1 != 0)))) || (((t7_evt1 != 0) && ( !(t7_evt0 != 0))) || ((t7_evt0 != 0) && (t7_evt1 != 0))))) && ((( !(t7_l0 != 0)) && ( !(t7_l1 != 0))) || (t7_x <= 5.0))) && ((((((( !(t6_l0 != 0)) && ( !(t6_l1 != 0))) && (t6_x == 0.0)) && (((( !(t6_l0 != 0)) && ( !(t6_l1 != 0))) || ((t6_l0 != 0) && ( !(t6_l1 != 0)))) || (((t6_l1 != 0) && ( !(t6_l0 != 0))) || ((t6_l0 != 0) && (t6_l1 != 0))))) && (((( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))) || ((t6_evt0 != 0) && ( !(t6_evt1 != 0)))) || (((t6_evt1 != 0) && ( !(t6_evt0 != 0))) || ((t6_evt0 != 0) && (t6_evt1 != 0))))) && ((( !(t6_l0 != 0)) && ( !(t6_l1 != 0))) || (t6_x <= 5.0))) && ((((((( !(t5_l0 != 0)) && ( !(t5_l1 != 0))) && (t5_x == 0.0)) && (((( !(t5_l0 != 0)) && ( !(t5_l1 != 0))) || ((t5_l0 != 0) && ( !(t5_l1 != 0)))) || (((t5_l1 != 0) && ( !(t5_l0 != 0))) || ((t5_l0 != 0) && (t5_l1 != 0))))) && (((( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))) || ((t5_evt0 != 0) && ( !(t5_evt1 != 0)))) || (((t5_evt1 != 0) && ( !(t5_evt0 != 0))) || ((t5_evt0 != 0) && (t5_evt1 != 0))))) && ((( !(t5_l0 != 0)) && ( !(t5_l1 != 0))) || (t5_x <= 5.0))) && ((((((( !(t4_l0 != 0)) && ( !(t4_l1 != 0))) && (t4_x == 0.0)) && (((( !(t4_l0 != 0)) && ( !(t4_l1 != 0))) || ((t4_l0 != 0) && ( !(t4_l1 != 0)))) || (((t4_l1 != 0) && ( !(t4_l0 != 0))) || ((t4_l0 != 0) && (t4_l1 != 0))))) && (((( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))) || ((t4_evt0 != 0) && ( !(t4_evt1 != 0)))) || (((t4_evt1 != 0) && ( !(t4_evt0 != 0))) || ((t4_evt0 != 0) && (t4_evt1 != 0))))) && ((( !(t4_l0 != 0)) && ( !(t4_l1 != 0))) || (t4_x <= 5.0))) && ((((((( !(t3_l0 != 0)) && ( !(t3_l1 != 0))) && (t3_x == 0.0)) && (((( !(t3_l0 != 0)) && ( !(t3_l1 != 0))) || ((t3_l0 != 0) && ( !(t3_l1 != 0)))) || (((t3_l1 != 0) && ( !(t3_l0 != 0))) || ((t3_l0 != 0) && (t3_l1 != 0))))) && (((( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))) || ((t3_evt0 != 0) && ( !(t3_evt1 != 0)))) || (((t3_evt1 != 0) && ( !(t3_evt0 != 0))) || ((t3_evt0 != 0) && (t3_evt1 != 0))))) && ((( !(t3_l0 != 0)) && ( !(t3_l1 != 0))) || (t3_x <= 5.0))) && ((((((( !(t2_l0 != 0)) && ( !(t2_l1 != 0))) && (t2_x == 0.0)) && (((( !(t2_l0 != 0)) && ( !(t2_l1 != 0))) || ((t2_l0 != 0) && ( !(t2_l1 != 0)))) || (((t2_l1 != 0) && ( !(t2_l0 != 0))) || ((t2_l0 != 0) && (t2_l1 != 0))))) && (((( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))) || ((t2_evt0 != 0) && ( !(t2_evt1 != 0)))) || (((t2_evt1 != 0) && ( !(t2_evt0 != 0))) || ((t2_evt0 != 0) && (t2_evt1 != 0))))) && ((( !(t2_l0 != 0)) && ( !(t2_l1 != 0))) || (t2_x <= 5.0))) && ((((((( !(t1_l0 != 0)) && ( !(t1_l1 != 0))) && (t1_x == 0.0)) && (((( !(t1_l0 != 0)) && ( !(t1_l1 != 0))) || ((t1_l0 != 0) && ( !(t1_l1 != 0)))) || (((t1_l1 != 0) && ( !(t1_l0 != 0))) || ((t1_l0 != 0) && (t1_l1 != 0))))) && (((( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))) || ((t1_evt0 != 0) && ( !(t1_evt1 != 0)))) || (((t1_evt1 != 0) && ( !(t1_evt0 != 0))) || ((t1_evt0 != 0) && (t1_evt1 != 0))))) && ((( !(t1_l0 != 0)) && ( !(t1_l1 != 0))) || (t1_x <= 5.0))) && ((((((( !(t0_l0 != 0)) && ( !(t0_l1 != 0))) && (t0_x == 0.0)) && (((( !(t0_l0 != 0)) && ( !(t0_l1 != 0))) || ((t0_l0 != 0) && ( !(t0_l1 != 0)))) || (((t0_l1 != 0) && ( !(t0_l0 != 0))) || ((t0_l0 != 0) && (t0_l1 != 0))))) && (((( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))) || ((t0_evt0 != 0) && ( !(t0_evt1 != 0)))) || (((t0_evt1 != 0) && ( !(t0_evt0 != 0))) || ((t0_evt0 != 0) && (t0_evt1 != 0))))) && ((( !(t0_l0 != 0)) && ( !(t0_l1 != 0))) || (t0_x <= 5.0))) && (((((((( !(controller_l0 != 0)) && ( !(controller_l1 != 0))) && (controller_z == 0.0)) && (((( !(controller_l0 != 0)) && ( !(controller_l1 != 0))) || ((controller_l0 != 0) && ( !(controller_l1 != 0)))) || (((controller_l1 != 0) && ( !(controller_l0 != 0))) || ((controller_l0 != 0) && (controller_l1 != 0))))) && (((( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))) || (( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0))))) || ((( !(controller_evt2 != 0)) && ((controller_evt1 != 0) && ( !(controller_evt0 != 0)))) || ((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && (controller_evt1 != 0))) || ((controller_evt2 != 0) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))) && (((((((((((((((((controller_cnt == 0) || (controller_cnt == 1)) || (controller_cnt == 2)) || (controller_cnt == 3)) || (controller_cnt == 4)) || (controller_cnt == 5)) || (controller_cnt == 6)) || (controller_cnt == 7)) || (controller_cnt == 8)) || (controller_cnt == 9)) || (controller_cnt == 10)) || (controller_cnt == 11)) || (controller_cnt == 12)) || (controller_cnt == 13)) || (controller_cnt == 14)) || (controller_cnt == 15)) || (controller_cnt == 16))) && ((controller_z <= 1.0) || ( !(((controller_l0 != 0) && ( !(controller_l1 != 0))) || ((controller_l0 != 0) && (controller_l1 != 0)))))) && (((((((( !(gate_l0 != 0)) && ( !(gate_l1 != 0))) && (gate_y == 0.0)) && (((( !(gate_l0 != 0)) && ( !(gate_l1 != 0))) || ((gate_l0 != 0) && ( !(gate_l1 != 0)))) || (((gate_l1 != 0) && ( !(gate_l0 != 0))) || ((gate_l0 != 0) && (gate_l1 != 0))))) && (((( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))) || ((gate_evt0 != 0) && ( !(gate_evt1 != 0)))) || (((gate_evt1 != 0) && ( !(gate_evt0 != 0))) || ((gate_evt0 != 0) && (gate_evt1 != 0))))) && ((gate_y <= 1.0) || ( !((gate_l0 != 0) && ( !(gate_l1 != 0)))))) && ((gate_y <= 2.0) || ( !((gate_l0 != 0) && (gate_l1 != 0))))) && (0.0 <= delta)))))))))))))))))) && (delta == _diverge_delta)); while (__ok) { _x_t14_l1 = __VERIFIER_nondet_bool(); _x_t14_l0 = __VERIFIER_nondet_bool(); _x_t14_evt1 = __VERIFIER_nondet_bool(); _x_t14_evt0 = __VERIFIER_nondet_bool(); _x_t14_x = __VERIFIER_nondet_float(); _x_t13_l1 = __VERIFIER_nondet_bool(); _x_t13_l0 = __VERIFIER_nondet_bool(); _x_t13_evt1 = __VERIFIER_nondet_bool(); _x_t13_evt0 = __VERIFIER_nondet_bool(); _x_t13_x = __VERIFIER_nondet_float(); _x_t12_l1 = __VERIFIER_nondet_bool(); _x_t12_l0 = __VERIFIER_nondet_bool(); _x_t12_evt1 = __VERIFIER_nondet_bool(); _x_t12_evt0 = __VERIFIER_nondet_bool(); _x_t12_x = __VERIFIER_nondet_float(); _x_t11_x = __VERIFIER_nondet_float(); _x_t10_l1 = __VERIFIER_nondet_bool(); _x_t10_l0 = __VERIFIER_nondet_bool(); _x__diverge_delta = __VERIFIER_nondet_float(); _x_t10_evt1 = __VERIFIER_nondet_bool(); _x_t10_evt0 = __VERIFIER_nondet_bool(); _x_t10_x = __VERIFIER_nondet_float(); _x_t9_evt1 = __VERIFIER_nondet_bool(); _x_t9_evt0 = __VERIFIER_nondet_bool(); _x_t2_l0 = __VERIFIER_nondet_bool(); _x_t9_l1 = __VERIFIER_nondet_bool(); _x_controller_cnt = __VERIFIER_nondet_int(); _x_t2_evt0 = __VERIFIER_nondet_bool(); _x_t1_l1 = __VERIFIER_nondet_bool(); _x_t1_l0 = __VERIFIER_nondet_bool(); _x_gate_l1 = __VERIFIER_nondet_bool(); _x_t1_evt1 = __VERIFIER_nondet_bool(); _x_t2_x = __VERIFIER_nondet_float(); _x_t1_evt0 = __VERIFIER_nondet_bool(); _x_t0_l1 = __VERIFIER_nondet_bool(); _x_gate_evt1 = __VERIFIER_nondet_bool(); _x_t1_x = __VERIFIER_nondet_float(); _x_t0_evt1 = __VERIFIER_nondet_bool(); _x_t0_l0 = __VERIFIER_nondet_bool(); _x_gate_evt0 = __VERIFIER_nondet_bool(); _x_t0_evt0 = __VERIFIER_nondet_bool(); _x_gate_y = __VERIFIER_nondet_float(); _x_t0_x = __VERIFIER_nondet_float(); _x_delta = __VERIFIER_nondet_float(); _x_t7_evt1 = __VERIFIER_nondet_bool(); _x_t8_x = __VERIFIER_nondet_float(); _x_t2_l1 = __VERIFIER_nondet_bool(); _x_t3_evt0 = __VERIFIER_nondet_bool(); _x_gate_l0 = __VERIFIER_nondet_bool(); _x_t11_l0 = __VERIFIER_nondet_bool(); _x_controller_l0 = __VERIFIER_nondet_bool(); _x_t11_l1 = __VERIFIER_nondet_bool(); _x_controller_l1 = __VERIFIER_nondet_bool(); _x_t2_evt1 = __VERIFIER_nondet_bool(); _x_t3_x = __VERIFIER_nondet_float(); _x_t11_evt0 = __VERIFIER_nondet_bool(); _x_controller_evt0 = __VERIFIER_nondet_bool(); _x_t11_evt1 = __VERIFIER_nondet_bool(); _x_controller_evt1 = __VERIFIER_nondet_bool(); _x_t9_l0 = __VERIFIER_nondet_bool(); _x_controller_z = __VERIFIER_nondet_float(); _x_t4_l0 = __VERIFIER_nondet_bool(); _x_t3_evt1 = __VERIFIER_nondet_bool(); _x_t4_x = __VERIFIER_nondet_float(); _x_t3_l0 = __VERIFIER_nondet_bool(); _x_t3_l1 = __VERIFIER_nondet_bool(); _x_t4_evt0 = __VERIFIER_nondet_bool(); _x_t4_evt1 = __VERIFIER_nondet_bool(); _x_t5_x = __VERIFIER_nondet_float(); _x_t4_l1 = __VERIFIER_nondet_bool(); _x_t5_evt0 = __VERIFIER_nondet_bool(); _x_t5_evt1 = __VERIFIER_nondet_bool(); _x_controller_evt2 = __VERIFIER_nondet_bool(); _x_t6_x = __VERIFIER_nondet_float(); _x_t5_l0 = __VERIFIER_nondet_bool(); _x_t5_l1 = __VERIFIER_nondet_bool(); _x_t6_evt0 = __VERIFIER_nondet_bool(); _x_t6_evt1 = __VERIFIER_nondet_bool(); _x_t7_x = __VERIFIER_nondet_float(); _x_t6_l0 = __VERIFIER_nondet_bool(); _x_t6_l1 = __VERIFIER_nondet_bool(); _x_t7_evt0 = __VERIFIER_nondet_bool(); _x_t7_l0 = __VERIFIER_nondet_bool(); _x_t7_l1 = __VERIFIER_nondet_bool(); _x_t8_evt0 = __VERIFIER_nondet_bool(); _x_t8_evt1 = __VERIFIER_nondet_bool(); _x_t9_x = __VERIFIER_nondet_float(); _x_t8_l0 = __VERIFIER_nondet_bool(); _x_t8_l1 = __VERIFIER_nondet_bool(); __ok = (((((((((((((((((((((((((((((((( !(_x_t14_l0 != 0)) && ( !(_x_t14_l1 != 0))) || ((_x_t14_l0 != 0) && ( !(_x_t14_l1 != 0)))) || (((_x_t14_l1 != 0) && ( !(_x_t14_l0 != 0))) || ((_x_t14_l0 != 0) && (_x_t14_l1 != 0)))) && (((( !(_x_t14_evt0 != 0)) && ( !(_x_t14_evt1 != 0))) || ((_x_t14_evt0 != 0) && ( !(_x_t14_evt1 != 0)))) || (((_x_t14_evt1 != 0) && ( !(_x_t14_evt0 != 0))) || ((_x_t14_evt0 != 0) && (_x_t14_evt1 != 0))))) && ((( !(_x_t14_l0 != 0)) && ( !(_x_t14_l1 != 0))) || (_x_t14_x <= 5.0))) && (((((t14_l0 != 0) == (_x_t14_l0 != 0)) && ((t14_l1 != 0) == (_x_t14_l1 != 0))) && ((delta + (t14_x + (-1.0 * _x_t14_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))))))) && (((((_x_t14_l0 != 0) && ( !(_x_t14_l1 != 0))) && ((t14_evt0 != 0) && ( !(t14_evt1 != 0)))) && (_x_t14_x == 0.0)) || ( !((( !(t14_l0 != 0)) && ( !(t14_l1 != 0))) && ((delta == 0.0) && ( !(( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))))))))) && (((((_x_t14_l1 != 0) && ( !(_x_t14_l0 != 0))) && ( !(t14_x <= 2.0))) && (((t14_evt0 != 0) && (t14_evt1 != 0)) && (t14_x == _x_t14_x))) || ( !(((t14_l0 != 0) && ( !(t14_l1 != 0))) && ((delta == 0.0) && ( !(( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))))))))) && (((t14_x == _x_t14_x) && (((_x_t14_l0 != 0) && (_x_t14_l1 != 0)) && ((t14_evt0 != 0) && (t14_evt1 != 0)))) || ( !(((t14_l1 != 0) && ( !(t14_l0 != 0))) && ((delta == 0.0) && ( !(( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))))))))) && ((((( !(_x_t14_l0 != 0)) && ( !(_x_t14_l1 != 0))) && (t14_x <= 5.0)) && (((t14_evt1 != 0) && ( !(t14_evt0 != 0))) && (t14_x == _x_t14_x))) || ( !(((t14_l0 != 0) && (t14_l1 != 0)) && ((delta == 0.0) && ( !(( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))))))))) && (((((((((((( !(_x_t13_l0 != 0)) && ( !(_x_t13_l1 != 0))) || ((_x_t13_l0 != 0) && ( !(_x_t13_l1 != 0)))) || (((_x_t13_l1 != 0) && ( !(_x_t13_l0 != 0))) || ((_x_t13_l0 != 0) && (_x_t13_l1 != 0)))) && (((( !(_x_t13_evt0 != 0)) && ( !(_x_t13_evt1 != 0))) || ((_x_t13_evt0 != 0) && ( !(_x_t13_evt1 != 0)))) || (((_x_t13_evt1 != 0) && ( !(_x_t13_evt0 != 0))) || ((_x_t13_evt0 != 0) && (_x_t13_evt1 != 0))))) && ((( !(_x_t13_l0 != 0)) && ( !(_x_t13_l1 != 0))) || (_x_t13_x <= 5.0))) && (((((t13_l0 != 0) == (_x_t13_l0 != 0)) && ((t13_l1 != 0) == (_x_t13_l1 != 0))) && ((delta + (t13_x + (-1.0 * _x_t13_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))))))) && (((((_x_t13_l0 != 0) && ( !(_x_t13_l1 != 0))) && ((t13_evt0 != 0) && ( !(t13_evt1 != 0)))) && (_x_t13_x == 0.0)) || ( !((( !(t13_l0 != 0)) && ( !(t13_l1 != 0))) && ((delta == 0.0) && ( !(( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))))))))) && (((((_x_t13_l1 != 0) && ( !(_x_t13_l0 != 0))) && ( !(t13_x <= 2.0))) && (((t13_evt0 != 0) && (t13_evt1 != 0)) && (t13_x == _x_t13_x))) || ( !(((t13_l0 != 0) && ( !(t13_l1 != 0))) && ((delta == 0.0) && ( !(( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))))))))) && (((t13_x == _x_t13_x) && (((_x_t13_l0 != 0) && (_x_t13_l1 != 0)) && ((t13_evt0 != 0) && (t13_evt1 != 0)))) || ( !(((t13_l1 != 0) && ( !(t13_l0 != 0))) && ((delta == 0.0) && ( !(( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))))))))) && ((((( !(_x_t13_l0 != 0)) && ( !(_x_t13_l1 != 0))) && (t13_x <= 5.0)) && (((t13_evt1 != 0) && ( !(t13_evt0 != 0))) && (t13_x == _x_t13_x))) || ( !(((t13_l0 != 0) && (t13_l1 != 0)) && ((delta == 0.0) && ( !(( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))))))))) && (((((((((((( !(_x_t12_l0 != 0)) && ( !(_x_t12_l1 != 0))) || ((_x_t12_l0 != 0) && ( !(_x_t12_l1 != 0)))) || (((_x_t12_l1 != 0) && ( !(_x_t12_l0 != 0))) || ((_x_t12_l0 != 0) && (_x_t12_l1 != 0)))) && (((( !(_x_t12_evt0 != 0)) && ( !(_x_t12_evt1 != 0))) || ((_x_t12_evt0 != 0) && ( !(_x_t12_evt1 != 0)))) || (((_x_t12_evt1 != 0) && ( !(_x_t12_evt0 != 0))) || ((_x_t12_evt0 != 0) && (_x_t12_evt1 != 0))))) && ((( !(_x_t12_l0 != 0)) && ( !(_x_t12_l1 != 0))) || (_x_t12_x <= 5.0))) && (((((t12_l0 != 0) == (_x_t12_l0 != 0)) && ((t12_l1 != 0) == (_x_t12_l1 != 0))) && ((delta + (t12_x + (-1.0 * _x_t12_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))))))) && (((((_x_t12_l0 != 0) && ( !(_x_t12_l1 != 0))) && ((t12_evt0 != 0) && ( !(t12_evt1 != 0)))) && (_x_t12_x == 0.0)) || ( !((( !(t12_l0 != 0)) && ( !(t12_l1 != 0))) && ((delta == 0.0) && ( !(( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))))))))) && (((((_x_t12_l1 != 0) && ( !(_x_t12_l0 != 0))) && ( !(t12_x <= 2.0))) && (((t12_evt0 != 0) && (t12_evt1 != 0)) && (t12_x == _x_t12_x))) || ( !(((t12_l0 != 0) && ( !(t12_l1 != 0))) && ((delta == 0.0) && ( !(( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))))))))) && (((t12_x == _x_t12_x) && (((_x_t12_l0 != 0) && (_x_t12_l1 != 0)) && ((t12_evt0 != 0) && (t12_evt1 != 0)))) || ( !(((t12_l1 != 0) && ( !(t12_l0 != 0))) && ((delta == 0.0) && ( !(( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))))))))) && ((((( !(_x_t12_l0 != 0)) && ( !(_x_t12_l1 != 0))) && (t12_x <= 5.0)) && (((t12_evt1 != 0) && ( !(t12_evt0 != 0))) && (t12_x == _x_t12_x))) || ( !(((t12_l0 != 0) && (t12_l1 != 0)) && ((delta == 0.0) && ( !(( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0))))))))) && (((((((((((( !(_x_t11_l0 != 0)) && ( !(_x_t11_l1 != 0))) || ((_x_t11_l0 != 0) && ( !(_x_t11_l1 != 0)))) || (((_x_t11_l1 != 0) && ( !(_x_t11_l0 != 0))) || ((_x_t11_l0 != 0) && (_x_t11_l1 != 0)))) && (((( !(_x_t11_evt0 != 0)) && ( !(_x_t11_evt1 != 0))) || ((_x_t11_evt0 != 0) && ( !(_x_t11_evt1 != 0)))) || (((_x_t11_evt1 != 0) && ( !(_x_t11_evt0 != 0))) || ((_x_t11_evt0 != 0) && (_x_t11_evt1 != 0))))) && ((( !(_x_t11_l0 != 0)) && ( !(_x_t11_l1 != 0))) || (_x_t11_x <= 5.0))) && (((((t11_l0 != 0) == (_x_t11_l0 != 0)) && ((t11_l1 != 0) == (_x_t11_l1 != 0))) && ((delta + (t11_x + (-1.0 * _x_t11_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))))))) && (((((_x_t11_l0 != 0) && ( !(_x_t11_l1 != 0))) && ((t11_evt0 != 0) && ( !(t11_evt1 != 0)))) && (_x_t11_x == 0.0)) || ( !((( !(t11_l0 != 0)) && ( !(t11_l1 != 0))) && ((delta == 0.0) && ( !(( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))))))))) && (((((_x_t11_l1 != 0) && ( !(_x_t11_l0 != 0))) && ( !(t11_x <= 2.0))) && (((t11_evt0 != 0) && (t11_evt1 != 0)) && (t11_x == _x_t11_x))) || ( !(((t11_l0 != 0) && ( !(t11_l1 != 0))) && ((delta == 0.0) && ( !(( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))))))))) && (((t11_x == _x_t11_x) && (((_x_t11_l0 != 0) && (_x_t11_l1 != 0)) && ((t11_evt0 != 0) && (t11_evt1 != 0)))) || ( !(((t11_l1 != 0) && ( !(t11_l0 != 0))) && ((delta == 0.0) && ( !(( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))))))))) && ((((( !(_x_t11_l0 != 0)) && ( !(_x_t11_l1 != 0))) && (t11_x <= 5.0)) && (((t11_evt1 != 0) && ( !(t11_evt0 != 0))) && (t11_x == _x_t11_x))) || ( !(((t11_l0 != 0) && (t11_l1 != 0)) && ((delta == 0.0) && ( !(( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0))))))))) && (((((((((((( !(_x_t10_l0 != 0)) && ( !(_x_t10_l1 != 0))) || ((_x_t10_l0 != 0) && ( !(_x_t10_l1 != 0)))) || (((_x_t10_l1 != 0) && ( !(_x_t10_l0 != 0))) || ((_x_t10_l0 != 0) && (_x_t10_l1 != 0)))) && (((( !(_x_t10_evt0 != 0)) && ( !(_x_t10_evt1 != 0))) || ((_x_t10_evt0 != 0) && ( !(_x_t10_evt1 != 0)))) || (((_x_t10_evt1 != 0) && ( !(_x_t10_evt0 != 0))) || ((_x_t10_evt0 != 0) && (_x_t10_evt1 != 0))))) && ((( !(_x_t10_l0 != 0)) && ( !(_x_t10_l1 != 0))) || (_x_t10_x <= 5.0))) && (((((t10_l0 != 0) == (_x_t10_l0 != 0)) && ((t10_l1 != 0) == (_x_t10_l1 != 0))) && ((delta + (t10_x + (-1.0 * _x_t10_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))))))) && (((((_x_t10_l0 != 0) && ( !(_x_t10_l1 != 0))) && ((t10_evt0 != 0) && ( !(t10_evt1 != 0)))) && (_x_t10_x == 0.0)) || ( !((( !(t10_l0 != 0)) && ( !(t10_l1 != 0))) && ((delta == 0.0) && ( !(( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))))))))) && (((((_x_t10_l1 != 0) && ( !(_x_t10_l0 != 0))) && ( !(t10_x <= 2.0))) && (((t10_evt0 != 0) && (t10_evt1 != 0)) && (t10_x == _x_t10_x))) || ( !(((t10_l0 != 0) && ( !(t10_l1 != 0))) && ((delta == 0.0) && ( !(( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))))))))) && (((t10_x == _x_t10_x) && (((_x_t10_l0 != 0) && (_x_t10_l1 != 0)) && ((t10_evt0 != 0) && (t10_evt1 != 0)))) || ( !(((t10_l1 != 0) && ( !(t10_l0 != 0))) && ((delta == 0.0) && ( !(( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))))))))) && ((((( !(_x_t10_l0 != 0)) && ( !(_x_t10_l1 != 0))) && (t10_x <= 5.0)) && (((t10_evt1 != 0) && ( !(t10_evt0 != 0))) && (t10_x == _x_t10_x))) || ( !(((t10_l0 != 0) && (t10_l1 != 0)) && ((delta == 0.0) && ( !(( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0))))))))) && (((((((((((( !(_x_t9_l0 != 0)) && ( !(_x_t9_l1 != 0))) || ((_x_t9_l0 != 0) && ( !(_x_t9_l1 != 0)))) || (((_x_t9_l1 != 0) && ( !(_x_t9_l0 != 0))) || ((_x_t9_l0 != 0) && (_x_t9_l1 != 0)))) && (((( !(_x_t9_evt0 != 0)) && ( !(_x_t9_evt1 != 0))) || ((_x_t9_evt0 != 0) && ( !(_x_t9_evt1 != 0)))) || (((_x_t9_evt1 != 0) && ( !(_x_t9_evt0 != 0))) || ((_x_t9_evt0 != 0) && (_x_t9_evt1 != 0))))) && ((( !(_x_t9_l0 != 0)) && ( !(_x_t9_l1 != 0))) || (_x_t9_x <= 5.0))) && (((((t9_l0 != 0) == (_x_t9_l0 != 0)) && ((t9_l1 != 0) == (_x_t9_l1 != 0))) && ((delta + (t9_x + (-1.0 * _x_t9_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))))))) && (((((_x_t9_l0 != 0) && ( !(_x_t9_l1 != 0))) && ((t9_evt0 != 0) && ( !(t9_evt1 != 0)))) && (_x_t9_x == 0.0)) || ( !((( !(t9_l0 != 0)) && ( !(t9_l1 != 0))) && ((delta == 0.0) && ( !(( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))))))))) && (((((_x_t9_l1 != 0) && ( !(_x_t9_l0 != 0))) && ( !(t9_x <= 2.0))) && (((t9_evt0 != 0) && (t9_evt1 != 0)) && (t9_x == _x_t9_x))) || ( !(((t9_l0 != 0) && ( !(t9_l1 != 0))) && ((delta == 0.0) && ( !(( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))))))))) && (((t9_x == _x_t9_x) && (((_x_t9_l0 != 0) && (_x_t9_l1 != 0)) && ((t9_evt0 != 0) && (t9_evt1 != 0)))) || ( !(((t9_l1 != 0) && ( !(t9_l0 != 0))) && ((delta == 0.0) && ( !(( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))))))))) && ((((( !(_x_t9_l0 != 0)) && ( !(_x_t9_l1 != 0))) && (t9_x <= 5.0)) && (((t9_evt1 != 0) && ( !(t9_evt0 != 0))) && (t9_x == _x_t9_x))) || ( !(((t9_l0 != 0) && (t9_l1 != 0)) && ((delta == 0.0) && ( !(( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0))))))))) && (((((((((((( !(_x_t8_l0 != 0)) && ( !(_x_t8_l1 != 0))) || ((_x_t8_l0 != 0) && ( !(_x_t8_l1 != 0)))) || (((_x_t8_l1 != 0) && ( !(_x_t8_l0 != 0))) || ((_x_t8_l0 != 0) && (_x_t8_l1 != 0)))) && (((( !(_x_t8_evt0 != 0)) && ( !(_x_t8_evt1 != 0))) || ((_x_t8_evt0 != 0) && ( !(_x_t8_evt1 != 0)))) || (((_x_t8_evt1 != 0) && ( !(_x_t8_evt0 != 0))) || ((_x_t8_evt0 != 0) && (_x_t8_evt1 != 0))))) && ((( !(_x_t8_l0 != 0)) && ( !(_x_t8_l1 != 0))) || (_x_t8_x <= 5.0))) && (((((t8_l0 != 0) == (_x_t8_l0 != 0)) && ((t8_l1 != 0) == (_x_t8_l1 != 0))) && ((delta + (t8_x + (-1.0 * _x_t8_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))))))) && (((((_x_t8_l0 != 0) && ( !(_x_t8_l1 != 0))) && ((t8_evt0 != 0) && ( !(t8_evt1 != 0)))) && (_x_t8_x == 0.0)) || ( !((( !(t8_l0 != 0)) && ( !(t8_l1 != 0))) && ((delta == 0.0) && ( !(( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))))))))) && (((((_x_t8_l1 != 0) && ( !(_x_t8_l0 != 0))) && ( !(t8_x <= 2.0))) && (((t8_evt0 != 0) && (t8_evt1 != 0)) && (t8_x == _x_t8_x))) || ( !(((t8_l0 != 0) && ( !(t8_l1 != 0))) && ((delta == 0.0) && ( !(( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))))))))) && (((t8_x == _x_t8_x) && (((_x_t8_l0 != 0) && (_x_t8_l1 != 0)) && ((t8_evt0 != 0) && (t8_evt1 != 0)))) || ( !(((t8_l1 != 0) && ( !(t8_l0 != 0))) && ((delta == 0.0) && ( !(( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))))))))) && ((((( !(_x_t8_l0 != 0)) && ( !(_x_t8_l1 != 0))) && (t8_x <= 5.0)) && (((t8_evt1 != 0) && ( !(t8_evt0 != 0))) && (t8_x == _x_t8_x))) || ( !(((t8_l0 != 0) && (t8_l1 != 0)) && ((delta == 0.0) && ( !(( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0))))))))) && (((((((((((( !(_x_t7_l0 != 0)) && ( !(_x_t7_l1 != 0))) || ((_x_t7_l0 != 0) && ( !(_x_t7_l1 != 0)))) || (((_x_t7_l1 != 0) && ( !(_x_t7_l0 != 0))) || ((_x_t7_l0 != 0) && (_x_t7_l1 != 0)))) && (((( !(_x_t7_evt0 != 0)) && ( !(_x_t7_evt1 != 0))) || ((_x_t7_evt0 != 0) && ( !(_x_t7_evt1 != 0)))) || (((_x_t7_evt1 != 0) && ( !(_x_t7_evt0 != 0))) || ((_x_t7_evt0 != 0) && (_x_t7_evt1 != 0))))) && ((( !(_x_t7_l0 != 0)) && ( !(_x_t7_l1 != 0))) || (_x_t7_x <= 5.0))) && (((((t7_l0 != 0) == (_x_t7_l0 != 0)) && ((t7_l1 != 0) == (_x_t7_l1 != 0))) && ((delta + (t7_x + (-1.0 * _x_t7_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))))))) && (((((_x_t7_l0 != 0) && ( !(_x_t7_l1 != 0))) && ((t7_evt0 != 0) && ( !(t7_evt1 != 0)))) && (_x_t7_x == 0.0)) || ( !((( !(t7_l0 != 0)) && ( !(t7_l1 != 0))) && ((delta == 0.0) && ( !(( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))))))))) && (((((_x_t7_l1 != 0) && ( !(_x_t7_l0 != 0))) && ( !(t7_x <= 2.0))) && (((t7_evt0 != 0) && (t7_evt1 != 0)) && (t7_x == _x_t7_x))) || ( !(((t7_l0 != 0) && ( !(t7_l1 != 0))) && ((delta == 0.0) && ( !(( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))))))))) && (((t7_x == _x_t7_x) && (((_x_t7_l0 != 0) && (_x_t7_l1 != 0)) && ((t7_evt0 != 0) && (t7_evt1 != 0)))) || ( !(((t7_l1 != 0) && ( !(t7_l0 != 0))) && ((delta == 0.0) && ( !(( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))))))))) && ((((( !(_x_t7_l0 != 0)) && ( !(_x_t7_l1 != 0))) && (t7_x <= 5.0)) && (((t7_evt1 != 0) && ( !(t7_evt0 != 0))) && (t7_x == _x_t7_x))) || ( !(((t7_l0 != 0) && (t7_l1 != 0)) && ((delta == 0.0) && ( !(( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0))))))))) && (((((((((((( !(_x_t6_l0 != 0)) && ( !(_x_t6_l1 != 0))) || ((_x_t6_l0 != 0) && ( !(_x_t6_l1 != 0)))) || (((_x_t6_l1 != 0) && ( !(_x_t6_l0 != 0))) || ((_x_t6_l0 != 0) && (_x_t6_l1 != 0)))) && (((( !(_x_t6_evt0 != 0)) && ( !(_x_t6_evt1 != 0))) || ((_x_t6_evt0 != 0) && ( !(_x_t6_evt1 != 0)))) || (((_x_t6_evt1 != 0) && ( !(_x_t6_evt0 != 0))) || ((_x_t6_evt0 != 0) && (_x_t6_evt1 != 0))))) && ((( !(_x_t6_l0 != 0)) && ( !(_x_t6_l1 != 0))) || (_x_t6_x <= 5.0))) && (((((t6_l0 != 0) == (_x_t6_l0 != 0)) && ((t6_l1 != 0) == (_x_t6_l1 != 0))) && ((delta + (t6_x + (-1.0 * _x_t6_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))))))) && (((((_x_t6_l0 != 0) && ( !(_x_t6_l1 != 0))) && ((t6_evt0 != 0) && ( !(t6_evt1 != 0)))) && (_x_t6_x == 0.0)) || ( !((( !(t6_l0 != 0)) && ( !(t6_l1 != 0))) && ((delta == 0.0) && ( !(( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))))))))) && (((((_x_t6_l1 != 0) && ( !(_x_t6_l0 != 0))) && ( !(t6_x <= 2.0))) && (((t6_evt0 != 0) && (t6_evt1 != 0)) && (t6_x == _x_t6_x))) || ( !(((t6_l0 != 0) && ( !(t6_l1 != 0))) && ((delta == 0.0) && ( !(( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))))))))) && (((t6_x == _x_t6_x) && (((_x_t6_l0 != 0) && (_x_t6_l1 != 0)) && ((t6_evt0 != 0) && (t6_evt1 != 0)))) || ( !(((t6_l1 != 0) && ( !(t6_l0 != 0))) && ((delta == 0.0) && ( !(( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))))))))) && ((((( !(_x_t6_l0 != 0)) && ( !(_x_t6_l1 != 0))) && (t6_x <= 5.0)) && (((t6_evt1 != 0) && ( !(t6_evt0 != 0))) && (t6_x == _x_t6_x))) || ( !(((t6_l0 != 0) && (t6_l1 != 0)) && ((delta == 0.0) && ( !(( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0))))))))) && (((((((((((( !(_x_t5_l0 != 0)) && ( !(_x_t5_l1 != 0))) || ((_x_t5_l0 != 0) && ( !(_x_t5_l1 != 0)))) || (((_x_t5_l1 != 0) && ( !(_x_t5_l0 != 0))) || ((_x_t5_l0 != 0) && (_x_t5_l1 != 0)))) && (((( !(_x_t5_evt0 != 0)) && ( !(_x_t5_evt1 != 0))) || ((_x_t5_evt0 != 0) && ( !(_x_t5_evt1 != 0)))) || (((_x_t5_evt1 != 0) && ( !(_x_t5_evt0 != 0))) || ((_x_t5_evt0 != 0) && (_x_t5_evt1 != 0))))) && ((( !(_x_t5_l0 != 0)) && ( !(_x_t5_l1 != 0))) || (_x_t5_x <= 5.0))) && (((((t5_l0 != 0) == (_x_t5_l0 != 0)) && ((t5_l1 != 0) == (_x_t5_l1 != 0))) && ((delta + (t5_x + (-1.0 * _x_t5_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))))))) && (((((_x_t5_l0 != 0) && ( !(_x_t5_l1 != 0))) && ((t5_evt0 != 0) && ( !(t5_evt1 != 0)))) && (_x_t5_x == 0.0)) || ( !((( !(t5_l0 != 0)) && ( !(t5_l1 != 0))) && ((delta == 0.0) && ( !(( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))))))))) && (((((_x_t5_l1 != 0) && ( !(_x_t5_l0 != 0))) && ( !(t5_x <= 2.0))) && (((t5_evt0 != 0) && (t5_evt1 != 0)) && (t5_x == _x_t5_x))) || ( !(((t5_l0 != 0) && ( !(t5_l1 != 0))) && ((delta == 0.0) && ( !(( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))))))))) && (((t5_x == _x_t5_x) && (((_x_t5_l0 != 0) && (_x_t5_l1 != 0)) && ((t5_evt0 != 0) && (t5_evt1 != 0)))) || ( !(((t5_l1 != 0) && ( !(t5_l0 != 0))) && ((delta == 0.0) && ( !(( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))))))))) && ((((( !(_x_t5_l0 != 0)) && ( !(_x_t5_l1 != 0))) && (t5_x <= 5.0)) && (((t5_evt1 != 0) && ( !(t5_evt0 != 0))) && (t5_x == _x_t5_x))) || ( !(((t5_l0 != 0) && (t5_l1 != 0)) && ((delta == 0.0) && ( !(( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0))))))))) && (((((((((((( !(_x_t4_l0 != 0)) && ( !(_x_t4_l1 != 0))) || ((_x_t4_l0 != 0) && ( !(_x_t4_l1 != 0)))) || (((_x_t4_l1 != 0) && ( !(_x_t4_l0 != 0))) || ((_x_t4_l0 != 0) && (_x_t4_l1 != 0)))) && (((( !(_x_t4_evt0 != 0)) && ( !(_x_t4_evt1 != 0))) || ((_x_t4_evt0 != 0) && ( !(_x_t4_evt1 != 0)))) || (((_x_t4_evt1 != 0) && ( !(_x_t4_evt0 != 0))) || ((_x_t4_evt0 != 0) && (_x_t4_evt1 != 0))))) && ((( !(_x_t4_l0 != 0)) && ( !(_x_t4_l1 != 0))) || (_x_t4_x <= 5.0))) && (((((t4_l0 != 0) == (_x_t4_l0 != 0)) && ((t4_l1 != 0) == (_x_t4_l1 != 0))) && ((delta + (t4_x + (-1.0 * _x_t4_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))))))) && (((((_x_t4_l0 != 0) && ( !(_x_t4_l1 != 0))) && ((t4_evt0 != 0) && ( !(t4_evt1 != 0)))) && (_x_t4_x == 0.0)) || ( !((( !(t4_l0 != 0)) && ( !(t4_l1 != 0))) && ((delta == 0.0) && ( !(( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))))))))) && (((((_x_t4_l1 != 0) && ( !(_x_t4_l0 != 0))) && ( !(t4_x <= 2.0))) && (((t4_evt0 != 0) && (t4_evt1 != 0)) && (t4_x == _x_t4_x))) || ( !(((t4_l0 != 0) && ( !(t4_l1 != 0))) && ((delta == 0.0) && ( !(( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))))))))) && (((t4_x == _x_t4_x) && (((_x_t4_l0 != 0) && (_x_t4_l1 != 0)) && ((t4_evt0 != 0) && (t4_evt1 != 0)))) || ( !(((t4_l1 != 0) && ( !(t4_l0 != 0))) && ((delta == 0.0) && ( !(( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))))))))) && ((((( !(_x_t4_l0 != 0)) && ( !(_x_t4_l1 != 0))) && (t4_x <= 5.0)) && (((t4_evt1 != 0) && ( !(t4_evt0 != 0))) && (t4_x == _x_t4_x))) || ( !(((t4_l0 != 0) && (t4_l1 != 0)) && ((delta == 0.0) && ( !(( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0))))))))) && (((((((((((( !(_x_t3_l0 != 0)) && ( !(_x_t3_l1 != 0))) || ((_x_t3_l0 != 0) && ( !(_x_t3_l1 != 0)))) || (((_x_t3_l1 != 0) && ( !(_x_t3_l0 != 0))) || ((_x_t3_l0 != 0) && (_x_t3_l1 != 0)))) && (((( !(_x_t3_evt0 != 0)) && ( !(_x_t3_evt1 != 0))) || ((_x_t3_evt0 != 0) && ( !(_x_t3_evt1 != 0)))) || (((_x_t3_evt1 != 0) && ( !(_x_t3_evt0 != 0))) || ((_x_t3_evt0 != 0) && (_x_t3_evt1 != 0))))) && ((( !(_x_t3_l0 != 0)) && ( !(_x_t3_l1 != 0))) || (_x_t3_x <= 5.0))) && (((((t3_l0 != 0) == (_x_t3_l0 != 0)) && ((t3_l1 != 0) == (_x_t3_l1 != 0))) && ((delta + (t3_x + (-1.0 * _x_t3_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))))))) && (((((_x_t3_l0 != 0) && ( !(_x_t3_l1 != 0))) && ((t3_evt0 != 0) && ( !(t3_evt1 != 0)))) && (_x_t3_x == 0.0)) || ( !((( !(t3_l0 != 0)) && ( !(t3_l1 != 0))) && ((delta == 0.0) && ( !(( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))))))))) && (((((_x_t3_l1 != 0) && ( !(_x_t3_l0 != 0))) && ( !(t3_x <= 2.0))) && (((t3_evt0 != 0) && (t3_evt1 != 0)) && (t3_x == _x_t3_x))) || ( !(((t3_l0 != 0) && ( !(t3_l1 != 0))) && ((delta == 0.0) && ( !(( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))))))))) && (((t3_x == _x_t3_x) && (((_x_t3_l0 != 0) && (_x_t3_l1 != 0)) && ((t3_evt0 != 0) && (t3_evt1 != 0)))) || ( !(((t3_l1 != 0) && ( !(t3_l0 != 0))) && ((delta == 0.0) && ( !(( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))))))))) && ((((( !(_x_t3_l0 != 0)) && ( !(_x_t3_l1 != 0))) && (t3_x <= 5.0)) && (((t3_evt1 != 0) && ( !(t3_evt0 != 0))) && (t3_x == _x_t3_x))) || ( !(((t3_l0 != 0) && (t3_l1 != 0)) && ((delta == 0.0) && ( !(( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0))))))))) && (((((((((((( !(_x_t2_l0 != 0)) && ( !(_x_t2_l1 != 0))) || ((_x_t2_l0 != 0) && ( !(_x_t2_l1 != 0)))) || (((_x_t2_l1 != 0) && ( !(_x_t2_l0 != 0))) || ((_x_t2_l0 != 0) && (_x_t2_l1 != 0)))) && (((( !(_x_t2_evt0 != 0)) && ( !(_x_t2_evt1 != 0))) || ((_x_t2_evt0 != 0) && ( !(_x_t2_evt1 != 0)))) || (((_x_t2_evt1 != 0) && ( !(_x_t2_evt0 != 0))) || ((_x_t2_evt0 != 0) && (_x_t2_evt1 != 0))))) && ((( !(_x_t2_l0 != 0)) && ( !(_x_t2_l1 != 0))) || (_x_t2_x <= 5.0))) && (((((t2_l0 != 0) == (_x_t2_l0 != 0)) && ((t2_l1 != 0) == (_x_t2_l1 != 0))) && ((delta + (t2_x + (-1.0 * _x_t2_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))))))) && (((((_x_t2_l0 != 0) && ( !(_x_t2_l1 != 0))) && ((t2_evt0 != 0) && ( !(t2_evt1 != 0)))) && (_x_t2_x == 0.0)) || ( !((( !(t2_l0 != 0)) && ( !(t2_l1 != 0))) && ((delta == 0.0) && ( !(( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))))))))) && (((((_x_t2_l1 != 0) && ( !(_x_t2_l0 != 0))) && ( !(t2_x <= 2.0))) && (((t2_evt0 != 0) && (t2_evt1 != 0)) && (t2_x == _x_t2_x))) || ( !(((t2_l0 != 0) && ( !(t2_l1 != 0))) && ((delta == 0.0) && ( !(( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))))))))) && (((t2_x == _x_t2_x) && (((_x_t2_l0 != 0) && (_x_t2_l1 != 0)) && ((t2_evt0 != 0) && (t2_evt1 != 0)))) || ( !(((t2_l1 != 0) && ( !(t2_l0 != 0))) && ((delta == 0.0) && ( !(( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))))))))) && ((((( !(_x_t2_l0 != 0)) && ( !(_x_t2_l1 != 0))) && (t2_x <= 5.0)) && (((t2_evt1 != 0) && ( !(t2_evt0 != 0))) && (t2_x == _x_t2_x))) || ( !(((t2_l0 != 0) && (t2_l1 != 0)) && ((delta == 0.0) && ( !(( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0))))))))) && (((((((((((( !(_x_t1_l0 != 0)) && ( !(_x_t1_l1 != 0))) || ((_x_t1_l0 != 0) && ( !(_x_t1_l1 != 0)))) || (((_x_t1_l1 != 0) && ( !(_x_t1_l0 != 0))) || ((_x_t1_l0 != 0) && (_x_t1_l1 != 0)))) && (((( !(_x_t1_evt0 != 0)) && ( !(_x_t1_evt1 != 0))) || ((_x_t1_evt0 != 0) && ( !(_x_t1_evt1 != 0)))) || (((_x_t1_evt1 != 0) && ( !(_x_t1_evt0 != 0))) || ((_x_t1_evt0 != 0) && (_x_t1_evt1 != 0))))) && ((( !(_x_t1_l0 != 0)) && ( !(_x_t1_l1 != 0))) || (_x_t1_x <= 5.0))) && (((((t1_l0 != 0) == (_x_t1_l0 != 0)) && ((t1_l1 != 0) == (_x_t1_l1 != 0))) && ((delta + (t1_x + (-1.0 * _x_t1_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))))))) && (((((_x_t1_l0 != 0) && ( !(_x_t1_l1 != 0))) && ((t1_evt0 != 0) && ( !(t1_evt1 != 0)))) && (_x_t1_x == 0.0)) || ( !((( !(t1_l0 != 0)) && ( !(t1_l1 != 0))) && ((delta == 0.0) && ( !(( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))))))))) && (((((_x_t1_l1 != 0) && ( !(_x_t1_l0 != 0))) && ( !(t1_x <= 2.0))) && (((t1_evt0 != 0) && (t1_evt1 != 0)) && (t1_x == _x_t1_x))) || ( !(((t1_l0 != 0) && ( !(t1_l1 != 0))) && ((delta == 0.0) && ( !(( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))))))))) && (((t1_x == _x_t1_x) && (((_x_t1_l0 != 0) && (_x_t1_l1 != 0)) && ((t1_evt0 != 0) && (t1_evt1 != 0)))) || ( !(((t1_l1 != 0) && ( !(t1_l0 != 0))) && ((delta == 0.0) && ( !(( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))))))))) && ((((( !(_x_t1_l0 != 0)) && ( !(_x_t1_l1 != 0))) && (t1_x <= 5.0)) && (((t1_evt1 != 0) && ( !(t1_evt0 != 0))) && (t1_x == _x_t1_x))) || ( !(((t1_l0 != 0) && (t1_l1 != 0)) && ((delta == 0.0) && ( !(( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0))))))))) && (((((((((((( !(_x_t0_l0 != 0)) && ( !(_x_t0_l1 != 0))) || ((_x_t0_l0 != 0) && ( !(_x_t0_l1 != 0)))) || (((_x_t0_l1 != 0) && ( !(_x_t0_l0 != 0))) || ((_x_t0_l0 != 0) && (_x_t0_l1 != 0)))) && (((( !(_x_t0_evt0 != 0)) && ( !(_x_t0_evt1 != 0))) || ((_x_t0_evt0 != 0) && ( !(_x_t0_evt1 != 0)))) || (((_x_t0_evt1 != 0) && ( !(_x_t0_evt0 != 0))) || ((_x_t0_evt0 != 0) && (_x_t0_evt1 != 0))))) && ((( !(_x_t0_l0 != 0)) && ( !(_x_t0_l1 != 0))) || (_x_t0_x <= 5.0))) && (((((t0_l0 != 0) == (_x_t0_l0 != 0)) && ((t0_l1 != 0) == (_x_t0_l1 != 0))) && ((delta + (t0_x + (-1.0 * _x_t0_x))) == 0.0)) || ( !(( !(delta <= 0.0)) || (( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))))))) && (((((_x_t0_l0 != 0) && ( !(_x_t0_l1 != 0))) && ((t0_evt0 != 0) && ( !(t0_evt1 != 0)))) && (_x_t0_x == 0.0)) || ( !((( !(t0_l0 != 0)) && ( !(t0_l1 != 0))) && ((delta == 0.0) && ( !(( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))))))))) && (((((_x_t0_l1 != 0) && ( !(_x_t0_l0 != 0))) && ( !(t0_x <= 2.0))) && (((t0_evt0 != 0) && (t0_evt1 != 0)) && (t0_x == _x_t0_x))) || ( !(((t0_l0 != 0) && ( !(t0_l1 != 0))) && ((delta == 0.0) && ( !(( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))))))))) && (((t0_x == _x_t0_x) && (((_x_t0_l0 != 0) && (_x_t0_l1 != 0)) && ((t0_evt0 != 0) && (t0_evt1 != 0)))) || ( !(((t0_l1 != 0) && ( !(t0_l0 != 0))) && ((delta == 0.0) && ( !(( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))))))))) && ((((( !(_x_t0_l0 != 0)) && ( !(_x_t0_l1 != 0))) && (t0_x <= 5.0)) && (((t0_evt1 != 0) && ( !(t0_evt0 != 0))) && (t0_x == _x_t0_x))) || ( !(((t0_l0 != 0) && (t0_l1 != 0)) && ((delta == 0.0) && ( !(( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0))))))))) && (((((((((((((((((( !(_x_controller_l0 != 0)) && ( !(_x_controller_l1 != 0))) || ((_x_controller_l0 != 0) && ( !(_x_controller_l1 != 0)))) || (((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0))) || ((_x_controller_l0 != 0) && (_x_controller_l1 != 0)))) && (((( !(_x_controller_evt2 != 0)) && (( !(_x_controller_evt0 != 0)) && ( !(_x_controller_evt1 != 0)))) || (( !(_x_controller_evt2 != 0)) && ((_x_controller_evt0 != 0) && ( !(_x_controller_evt1 != 0))))) || ((( !(_x_controller_evt2 != 0)) && ((_x_controller_evt1 != 0) && ( !(_x_controller_evt0 != 0)))) || ((( !(_x_controller_evt2 != 0)) && ((_x_controller_evt0 != 0) && (_x_controller_evt1 != 0))) || ((_x_controller_evt2 != 0) && (( !(_x_controller_evt0 != 0)) && ( !(_x_controller_evt1 != 0)))))))) && (((((((((((((((((_x_controller_cnt == 0) || (_x_controller_cnt == 1)) || (_x_controller_cnt == 2)) || (_x_controller_cnt == 3)) || (_x_controller_cnt == 4)) || (_x_controller_cnt == 5)) || (_x_controller_cnt == 6)) || (_x_controller_cnt == 7)) || (_x_controller_cnt == 8)) || (_x_controller_cnt == 9)) || (_x_controller_cnt == 10)) || (_x_controller_cnt == 11)) || (_x_controller_cnt == 12)) || (_x_controller_cnt == 13)) || (_x_controller_cnt == 14)) || (_x_controller_cnt == 15)) || (_x_controller_cnt == 16))) && ((_x_controller_z <= 1.0) || ( !(((_x_controller_l0 != 0) && ( !(_x_controller_l1 != 0))) || ((_x_controller_l0 != 0) && (_x_controller_l1 != 0)))))) && ((((((controller_l0 != 0) == (_x_controller_l0 != 0)) && ((controller_l1 != 0) == (_x_controller_l1 != 0))) && ((delta + (controller_z + (-1.0 * _x_controller_z))) == 0.0)) && (controller_cnt == _x_controller_cnt)) || ( !(( !(delta <= 0.0)) || (( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))) && (((((_x_controller_l0 != 0) && ( !(_x_controller_l1 != 0))) && (( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0))))) && ((_x_controller_cnt == 1) && (_x_controller_z == 0.0))) || ( !((( !(controller_l0 != 0)) && ( !(controller_l1 != 0))) && ((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))))) && (((controller_z == _x_controller_z) && (((_x_controller_l0 != 0) && ( !(_x_controller_l1 != 0))) || ((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0))))) || ( !(((controller_l0 != 0) && ( !(controller_l1 != 0))) && ((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))))) && ((((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0)))) && ((controller_cnt + (-1 * _x_controller_cnt)) == -1)) || ((( !(controller_evt2 != 0)) && ((controller_evt1 != 0) && ( !(controller_evt0 != 0)))) && ((controller_cnt + (-1 * _x_controller_cnt)) == 1))) || ( !(((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))) && (((controller_l0 != 0) && ( !(controller_l1 != 0))) && ((_x_controller_l0 != 0) && ( !(_x_controller_l1 != 0)))))))) && (((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && (controller_evt1 != 0))) && ((controller_cnt == _x_controller_cnt) && (controller_z == 1.0))) || ( !(((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))) && (((controller_l0 != 0) && ( !(controller_l1 != 0))) && ((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0)))))))) && ((((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0))) || ((_x_controller_l0 != 0) && (_x_controller_l1 != 0))) || ( !(((controller_l1 != 0) && ( !(controller_l0 != 0))) && ((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))))) && (((controller_z == _x_controller_z) && (((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0)))) && ((controller_cnt + (-1 * _x_controller_cnt)) == -1)) || (((( !(controller_evt2 != 0)) && ((controller_evt1 != 0) && ( !(controller_evt0 != 0)))) && ((controller_cnt + (-1 * _x_controller_cnt)) == 1)) && ( !(controller_cnt <= 1))))) || ( !(((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))) && (((controller_l1 != 0) && ( !(controller_l0 != 0))) && ((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0)))))))) && ((((( !(controller_evt2 != 0)) && ((controller_evt1 != 0) && ( !(controller_evt0 != 0)))) && (controller_cnt == 1)) && ((_x_controller_cnt == 0) && (_x_controller_z == 0.0))) || ( !(((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))) && (((controller_l1 != 0) && ( !(controller_l0 != 0))) && ((_x_controller_l0 != 0) && (_x_controller_l1 != 0))))))) && (((controller_z == _x_controller_z) && ((( !(_x_controller_l0 != 0)) && ( !(_x_controller_l1 != 0))) || ((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0))))) || ( !(((controller_l0 != 0) && (controller_l1 != 0)) && ((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))))))) && ((((controller_cnt + (-1 * _x_controller_cnt)) == -1) && ((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0)))) && (controller_z <= 1.0))) || ( !(((delta == 0.0) && ( !(( !(controller_evt2 != 0)) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0)))))) && (((_x_controller_l1 != 0) && ( !(_x_controller_l0 != 0))) && ((controller_l0 != 0) && (controller_l1 != 0))))))) && ((((((((((((( !(_x_gate_l0 != 0)) && ( !(_x_gate_l1 != 0))) || ((_x_gate_l0 != 0) && ( !(_x_gate_l1 != 0)))) || (((_x_gate_l1 != 0) && ( !(_x_gate_l0 != 0))) || ((_x_gate_l0 != 0) && (_x_gate_l1 != 0)))) && (((( !(_x_gate_evt0 != 0)) && ( !(_x_gate_evt1 != 0))) || ((_x_gate_evt0 != 0) && ( !(_x_gate_evt1 != 0)))) || (((_x_gate_evt1 != 0) && ( !(_x_gate_evt0 != 0))) || ((_x_gate_evt0 != 0) && (_x_gate_evt1 != 0))))) && ((_x_gate_y <= 1.0) || ( !((_x_gate_l0 != 0) && ( !(_x_gate_l1 != 0)))))) && ((_x_gate_y <= 2.0) || ( !((_x_gate_l0 != 0) && (_x_gate_l1 != 0))))) && (((((gate_l0 != 0) == (_x_gate_l0 != 0)) && ((gate_l1 != 0) == (_x_gate_l1 != 0))) && ((delta + (gate_y + (-1.0 * _x_gate_y))) == 0.0)) || ( !((( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))) || ( !(delta <= 0.0)))))) && (((((_x_gate_l0 != 0) && ( !(_x_gate_l1 != 0))) && ((gate_evt0 != 0) && ( !(gate_evt1 != 0)))) && (_x_gate_y == 0.0)) || ( !((( !(gate_l0 != 0)) && ( !(gate_l1 != 0))) && ((delta == 0.0) && ( !(( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))))))))) && (((((_x_gate_l1 != 0) && ( !(_x_gate_l0 != 0))) && ((gate_evt0 != 0) && (gate_evt1 != 0))) && ((gate_y <= 1.0) && (gate_y == _x_gate_y))) || ( !(((gate_l0 != 0) && ( !(gate_l1 != 0))) && ((delta == 0.0) && ( !(( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))))))))) && (((_x_gate_y == 0.0) && (((_x_gate_l0 != 0) && (_x_gate_l1 != 0)) && ((gate_evt1 != 0) && ( !(gate_evt0 != 0))))) || ( !(((gate_l1 != 0) && ( !(gate_l0 != 0))) && ((delta == 0.0) && ( !(( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))))))))) && (((gate_y == _x_gate_y) && (((( !(_x_gate_l0 != 0)) && ( !(_x_gate_l1 != 0))) && (1.0 <= gate_y)) && (((gate_evt0 != 0) && (gate_evt1 != 0)) && (gate_y <= 2.0)))) || ( !(((gate_l0 != 0) && (gate_l1 != 0)) && ((delta == 0.0) && ( !(( !(gate_evt0 != 0)) && ( !(gate_evt1 != 0))))))))) && (0.0 <= _x_delta)))))))))))))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t0_evt0 != 0)) && ( !(t0_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t1_evt0 != 0)) && ( !(t1_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t2_evt0 != 0)) && ( !(t2_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t3_evt0 != 0)) && ( !(t3_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t4_evt0 != 0)) && ( !(t4_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t5_evt0 != 0)) && ( !(t5_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t6_evt0 != 0)) && ( !(t6_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t7_evt0 != 0)) && ( !(t7_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t8_evt0 != 0)) && ( !(t8_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t9_evt0 != 0)) && ( !(t9_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t10_evt0 != 0)) && ( !(t10_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t11_evt0 != 0)) && ( !(t11_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t12_evt0 != 0)) && ( !(t12_evt1 != 0)))))))) && ((( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0)))))))) && ((( !(t13_evt0 != 0)) && ( !(t13_evt1 != 0))) || ( !((delta == 0.0) && ( !(( !(t14_evt0 != 0)) && ( !(t14_evt1 != 0)))))))) && ((((gate_evt0 != 0) && ( !(gate_evt1 != 0))) == (( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && (controller_evt1 != 0)))) || ( !(delta == 0.0)))) && (( !(delta == 0.0)) || (((gate_evt1 != 0) && ( !(gate_evt0 != 0))) == ((controller_evt2 != 0) && (( !(controller_evt0 != 0)) && ( !(controller_evt1 != 0))))))) && (( !(delta == 0.0)) || ((( !(controller_evt2 != 0)) && ((controller_evt0 != 0) && ( !(controller_evt1 != 0)))) == (((t14_evt0 != 0) && ( !(t14_evt1 != 0))) || (((t13_evt0 != 0) && ( !(t13_evt1 != 0))) || (((t12_evt0 != 0) && ( !(t12_evt1 != 0))) || (((t11_evt0 != 0) && ( !(t11_evt1 != 0))) || (((t10_evt0 != 0) && ( !(t10_evt1 != 0))) || (((t9_evt0 != 0) && ( !(t9_evt1 != 0))) || (((t8_evt0 != 0) && ( !(t8_evt1 != 0))) || (((t7_evt0 != 0) && ( !(t7_evt1 != 0))) || (((t6_evt0 != 0) && ( !(t6_evt1 != 0))) || (((t5_evt0 != 0) && ( !(t5_evt1 != 0))) || (((t4_evt0 != 0) && ( !(t4_evt1 != 0))) || (((t3_evt0 != 0) && ( !(t3_evt1 != 0))) || (((t2_evt0 != 0) && ( !(t2_evt1 != 0))) || (((t0_evt0 != 0) && ( !(t0_evt1 != 0))) || ((t1_evt0 != 0) && ( !(t1_evt1 != 0)))))))))))))))))))) && (( !(delta == 0.0)) || ((( !(controller_evt2 != 0)) && ((controller_evt1 != 0) && ( !(controller_evt0 != 0)))) == (((t14_evt1 != 0) && ( !(t14_evt0 != 0))) || (((t13_evt1 != 0) && ( !(t13_evt0 != 0))) || (((t12_evt1 != 0) && ( !(t12_evt0 != 0))) || (((t11_evt1 != 0) && ( !(t11_evt0 != 0))) || (((t10_evt1 != 0) && ( !(t10_evt0 != 0))) || (((t9_evt1 != 0) && ( !(t9_evt0 != 0))) || (((t8_evt1 != 0) && ( !(t8_evt0 != 0))) || (((t7_evt1 != 0) && ( !(t7_evt0 != 0))) || (((t6_evt1 != 0) && ( !(t6_evt0 != 0))) || (((t5_evt1 != 0) && ( !(t5_evt0 != 0))) || (((t4_evt1 != 0) && ( !(t4_evt0 != 0))) || (((t3_evt1 != 0) && ( !(t3_evt0 != 0))) || (((t2_evt1 != 0) && ( !(t2_evt0 != 0))) || (((t0_evt1 != 0) && ( !(t0_evt0 != 0))) || ((t1_evt1 != 0) && ( !(t1_evt0 != 0)))))))))))))))))))) && (((delta == _x__diverge_delta) || ( !(1.0 <= _diverge_delta))) && ((1.0 <= _diverge_delta) || ((delta + (_diverge_delta + (-1.0 * _x__diverge_delta))) == 0.0)))); t14_l1 = _x_t14_l1; t14_l0 = _x_t14_l0; t14_evt1 = _x_t14_evt1; t14_evt0 = _x_t14_evt0; t14_x = _x_t14_x; t13_l1 = _x_t13_l1; t13_l0 = _x_t13_l0; t13_evt1 = _x_t13_evt1; t13_evt0 = _x_t13_evt0; t13_x = _x_t13_x; t12_l1 = _x_t12_l1; t12_l0 = _x_t12_l0; t12_evt1 = _x_t12_evt1; t12_evt0 = _x_t12_evt0; t12_x = _x_t12_x; t11_x = _x_t11_x; t10_l1 = _x_t10_l1; t10_l0 = _x_t10_l0; _diverge_delta = _x__diverge_delta; t10_evt1 = _x_t10_evt1; t10_evt0 = _x_t10_evt0; t10_x = _x_t10_x; t9_evt1 = _x_t9_evt1; t9_evt0 = _x_t9_evt0; t2_l0 = _x_t2_l0; t9_l1 = _x_t9_l1; controller_cnt = _x_controller_cnt; t2_evt0 = _x_t2_evt0; t1_l1 = _x_t1_l1; t1_l0 = _x_t1_l0; gate_l1 = _x_gate_l1; t1_evt1 = _x_t1_evt1; t2_x = _x_t2_x; t1_evt0 = _x_t1_evt0; t0_l1 = _x_t0_l1; gate_evt1 = _x_gate_evt1; t1_x = _x_t1_x; t0_evt1 = _x_t0_evt1; t0_l0 = _x_t0_l0; gate_evt0 = _x_gate_evt0; t0_evt0 = _x_t0_evt0; gate_y = _x_gate_y; t0_x = _x_t0_x; delta = _x_delta; t7_evt1 = _x_t7_evt1; t8_x = _x_t8_x; t2_l1 = _x_t2_l1; t3_evt0 = _x_t3_evt0; gate_l0 = _x_gate_l0; t11_l0 = _x_t11_l0; controller_l0 = _x_controller_l0; t11_l1 = _x_t11_l1; controller_l1 = _x_controller_l1; t2_evt1 = _x_t2_evt1; t3_x = _x_t3_x; t11_evt0 = _x_t11_evt0; controller_evt0 = _x_controller_evt0; t11_evt1 = _x_t11_evt1; controller_evt1 = _x_controller_evt1; t9_l0 = _x_t9_l0; controller_z = _x_controller_z; t4_l0 = _x_t4_l0; t3_evt1 = _x_t3_evt1; t4_x = _x_t4_x; t3_l0 = _x_t3_l0; t3_l1 = _x_t3_l1; t4_evt0 = _x_t4_evt0; t4_evt1 = _x_t4_evt1; t5_x = _x_t5_x; t4_l1 = _x_t4_l1; t5_evt0 = _x_t5_evt0; t5_evt1 = _x_t5_evt1; controller_evt2 = _x_controller_evt2; t6_x = _x_t6_x; t5_l0 = _x_t5_l0; t5_l1 = _x_t5_l1; t6_evt0 = _x_t6_evt0; t6_evt1 = _x_t6_evt1; t7_x = _x_t7_x; t6_l0 = _x_t6_l0; t6_l1 = _x_t6_l1; t7_evt0 = _x_t7_evt0; t7_l0 = _x_t7_l0; t7_l1 = _x_t7_l1; t8_evt0 = _x_t8_evt0; t8_evt1 = _x_t8_evt1; t9_x = _x_t9_x; t8_l0 = _x_t8_l0; t8_l1 = _x_t8_l1; } }
the_stack_data/175142695.c
/* -*- mode: C; c-basic-offset: 3; -*- */ /*--------------------------------------------------------------------*/ /*--- Wrappers for generic Unix system calls ---*/ /*--- syswrap-generic.c ---*/ /*--------------------------------------------------------------------*/ /* This file is part of Valgrind, a dynamic binary instrumentation framework. Copyright (C) 2000-2015 Julian Seward [email protected] This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. The GNU General Public License is contained in the file COPYING. */ #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) #include "pub_core_basics.h" #include "pub_core_vki.h" #include "pub_core_vkiscnums.h" #include "pub_core_threadstate.h" #include "pub_core_debuginfo.h" // VG_(di_notify_*) #include "pub_core_aspacemgr.h" #include "pub_core_transtab.h" // VG_(discard_translations) #include "pub_core_xarray.h" #include "pub_core_clientstate.h" // VG_(brk_base), VG_(brk_limit) #include "pub_core_debuglog.h" #include "pub_core_errormgr.h" #include "pub_core_gdbserver.h" // VG_(gdbserver) #include "pub_core_libcbase.h" #include "pub_core_libcassert.h" #include "pub_core_libcfile.h" #include "pub_core_libcprint.h" #include "pub_core_libcproc.h" #include "pub_core_libcsignal.h" #include "pub_core_machine.h" // VG_(get_SP) #include "pub_core_mallocfree.h" #include "pub_core_options.h" #include "pub_core_scheduler.h" #include "pub_core_signals.h" #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)() #include "pub_core_syscall.h" #include "pub_core_syswrap.h" #include "pub_core_tooliface.h" #include "pub_core_ume.h" #include "pub_core_stacks.h" #include "priv_types_n_macros.h" #include "priv_syswrap-generic.h" #include "config.h" void ML_(guess_and_register_stack) (Addr sp, ThreadState* tst) { Bool debug = False; NSegment const* seg; /* We don't really know where the client stack is, because its allocated by the client. The best we can do is look at the memory mappings and try to derive some useful information. We assume that sp starts near its highest possible value, and can only go down to the start of the mmaped segment. */ seg = VG_(am_find_nsegment)(sp); if (seg && VG_(am_is_valid_for_client)(sp, 1, VKI_PROT_READ | VKI_PROT_WRITE)) { tst->client_stack_highest_byte = (Addr)VG_PGROUNDUP(sp)-1; tst->client_stack_szB = tst->client_stack_highest_byte - seg->start + 1; VG_(register_stack)(seg->start, tst->client_stack_highest_byte); if (debug) VG_(printf)("tid %u: guessed client stack range [%#lx-%#lx]\n", tst->tid, seg->start, tst->client_stack_highest_byte); } else { VG_(message)(Vg_UserMsg, "!? New thread %u starts with SP(%#lx) unmapped\n", tst->tid, sp); tst->client_stack_highest_byte = 0; tst->client_stack_szB = 0; } } /* Returns True iff address range is something the client can plausibly mess with: all of it is either already belongs to the client or is free or a reservation. */ Bool ML_(valid_client_addr)(Addr start, SizeT size, ThreadId tid, const HChar *syscallname) { Bool ret; if (size == 0) return True; ret = VG_(am_is_valid_for_client_or_free_or_resvn) (start,size,VKI_PROT_NONE); if (0) VG_(printf)("%s: test=%#lx-%#lx ret=%d\n", syscallname, start, start+size-1, (Int)ret); if (!ret && syscallname != NULL) { VG_(message)(Vg_UserMsg, "Warning: client syscall %s tried " "to modify addresses %#lx-%#lx\n", syscallname, start, start+size-1); if (VG_(clo_verbosity) > 1) { VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size)); } } return ret; } Bool ML_(client_signal_OK)(Int sigNo) { /* signal 0 is OK for kill */ Bool ret = sigNo >= 0 && sigNo <= VG_SIGVGRTUSERMAX; //VG_(printf)("client_signal_OK(%d) -> %d\n", sigNo, ret); return ret; } /* Handy small function to help stop wrappers from segfaulting when presented with bogus client addresses. Is not used for generating user-visible errors. */ Bool ML_(safe_to_deref) ( const void *start, SizeT size ) { return VG_(am_is_valid_for_client)( (Addr)start, size, VKI_PROT_READ ); } /* --------------------------------------------------------------------- Doing mmap, mremap ------------------------------------------------------------------ */ /* AFAICT from kernel sources (mm/mprotect.c) and general experimentation, munmap, mprotect (and mremap??) work at the page level. So addresses and lengths must be adjusted for this. */ /* Mash around start and length so that the area exactly covers an integral number of pages. If we don't do that, memcheck's idea of addressible memory diverges from that of the kernel's, which causes the leak detector to crash. */ static void page_align_addr_and_len( Addr* a, SizeT* len) { Addr ra; ra = VG_PGROUNDDN(*a); *len = VG_PGROUNDUP(*a + *len) - ra; *a = ra; } static void notify_core_of_mmap(Addr a, SizeT len, UInt prot, UInt flags, Int fd, Off64T offset) { Bool d; /* 'a' is the return value from a real kernel mmap, hence: */ vg_assert(VG_IS_PAGE_ALIGNED(a)); /* whereas len is whatever the syscall supplied. So: */ len = VG_PGROUNDUP(len); d = VG_(am_notify_client_mmap)( a, len, prot, flags, fd, offset ); if (d) VG_(discard_translations)( a, (ULong)len, "notify_core_of_mmap" ); } static void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle) { Bool rr, ww, xx; /* 'a' is the return value from a real kernel mmap, hence: */ vg_assert(VG_IS_PAGE_ALIGNED(a)); /* whereas len is whatever the syscall supplied. So: */ len = VG_PGROUNDUP(len); rr = toBool(prot & VKI_PROT_READ); ww = toBool(prot & VKI_PROT_WRITE); xx = toBool(prot & VKI_PROT_EXEC); VG_TRACK( new_mem_mmap, a, len, rr, ww, xx, di_handle ); } /* When a client mmap has been successfully done, this function must be called. It notifies both aspacem and the tool of the new mapping. JRS 2008-Aug-14: But notice this is *very* obscure. The only place it is called from is POST(sys_io_setup). In particular, ML_(generic_PRE_sys_mmap), in m_syswrap, is the "normal case" handler for client mmap. But it doesn't call this function; instead it does the relevant notifications itself. Here, we just pass di_handle=0 to notify_tool_of_mmap as we have no better information. But really this function should be done away with; problem is I don't understand what POST(sys_io_setup) does or how it works. [However, this function is used lots for Darwin, because ML_(generic_PRE_sys_mmap) cannot be used for Darwin.] */ void ML_(notify_core_and_tool_of_mmap) ( Addr a, SizeT len, UInt prot, UInt flags, Int fd, Off64T offset ) { // XXX: unlike the other notify_core_and_tool* functions, this one doesn't // do anything with debug info (ie. it doesn't call VG_(di_notify_mmap)). // Should it? --njn notify_core_of_mmap(a, len, prot, flags, fd, offset); notify_tool_of_mmap(a, len, prot, 0/*di_handle*/); } void ML_(notify_core_and_tool_of_munmap) ( Addr a, SizeT len ) { Bool d; page_align_addr_and_len(&a, &len); d = VG_(am_notify_munmap)(a, len); VG_TRACK( die_mem_munmap, a, len ); VG_(di_notify_munmap)( a, len ); if (d) VG_(discard_translations)( a, (ULong)len, "ML_(notify_core_and_tool_of_munmap)" ); } void ML_(notify_core_and_tool_of_mprotect) ( Addr a, SizeT len, Int prot ) { Bool rr = toBool(prot & VKI_PROT_READ); Bool ww = toBool(prot & VKI_PROT_WRITE); Bool xx = toBool(prot & VKI_PROT_EXEC); Bool d; page_align_addr_and_len(&a, &len); d = VG_(am_notify_mprotect)(a, len, prot); VG_TRACK( change_mem_mprotect, a, len, rr, ww, xx ); VG_(di_notify_mprotect)( a, len, prot ); if (d) VG_(discard_translations)( a, (ULong)len, "ML_(notify_core_and_tool_of_mprotect)" ); } #if HAVE_MREMAP /* Expand (or shrink) an existing mapping, potentially moving it at the same time (controlled by the MREMAP_MAYMOVE flag). Nightmare. */ static SysRes do_mremap( Addr old_addr, SizeT old_len, Addr new_addr, SizeT new_len, UWord flags, ThreadId tid ) { # define MIN_SIZET(_aa,_bb) (_aa) < (_bb) ? (_aa) : (_bb) Bool ok, d; NSegment const* old_seg; Addr advised; Bool f_fixed = toBool(flags & VKI_MREMAP_FIXED); Bool f_maymove = toBool(flags & VKI_MREMAP_MAYMOVE); if (0) VG_(printf)("do_remap (old %#lx %lu) (new %#lx %lu) %s %s\n", old_addr,old_len,new_addr,new_len, flags & VKI_MREMAP_MAYMOVE ? "MAYMOVE" : "", flags & VKI_MREMAP_FIXED ? "FIXED" : ""); if (0) VG_(am_show_nsegments)(0, "do_remap: before"); if (flags & ~(VKI_MREMAP_FIXED | VKI_MREMAP_MAYMOVE)) goto eINVAL; if (!VG_IS_PAGE_ALIGNED(old_addr)) goto eINVAL; old_len = VG_PGROUNDUP(old_len); new_len = VG_PGROUNDUP(new_len); if (new_len == 0) goto eINVAL; /* kernel doesn't reject this, but we do. */ if (old_len == 0) goto eINVAL; /* reject wraparounds */ if (old_addr + old_len < old_addr) goto eINVAL; if (f_fixed == True && new_addr + new_len < new_len) goto eINVAL; /* kernel rejects all fixed, no-move requests (which are meaningless). */ if (f_fixed == True && f_maymove == False) goto eINVAL; /* Stay away from non-client areas. */ if (!ML_(valid_client_addr)(old_addr, old_len, tid, "mremap(old_addr)")) goto eINVAL; /* In all remaining cases, if the old range does not fall within a single segment, fail. */ old_seg = VG_(am_find_nsegment)( old_addr ); if (old_addr < old_seg->start || old_addr+old_len-1 > old_seg->end) goto eINVAL; if (old_seg->kind != SkAnonC && old_seg->kind != SkFileC && old_seg->kind != SkShmC) goto eINVAL; vg_assert(old_len > 0); vg_assert(new_len > 0); vg_assert(VG_IS_PAGE_ALIGNED(old_len)); vg_assert(VG_IS_PAGE_ALIGNED(new_len)); vg_assert(VG_IS_PAGE_ALIGNED(old_addr)); /* There are 3 remaining cases: * maymove == False new space has to be at old address, so: - shrink -> unmap end - same size -> do nothing - grow -> if can grow in-place, do so, else fail * maymove == True, fixed == False new space can be anywhere, so: - shrink -> unmap end - same size -> do nothing - grow -> if can grow in-place, do so, else move to anywhere large enough, else fail * maymove == True, fixed == True new space must be at new address, so: - if new address is not page aligned, fail - if new address range overlaps old one, fail - if new address range cannot be allocated, fail - else move to new address range with new size - else fail */ if (f_maymove == False) { /* new space has to be at old address */ if (new_len < old_len) goto shrink_in_place; if (new_len > old_len) goto grow_in_place_or_fail; goto same_in_place; } if (f_maymove == True && f_fixed == False) { /* new space can be anywhere */ if (new_len < old_len) goto shrink_in_place; if (new_len > old_len) goto grow_in_place_or_move_anywhere_or_fail; goto same_in_place; } if (f_maymove == True && f_fixed == True) { /* new space can only be at the new address */ if (!VG_IS_PAGE_ALIGNED(new_addr)) goto eINVAL; if (new_addr+new_len-1 < old_addr || new_addr > old_addr+old_len-1) { /* no overlap */ } else { goto eINVAL; } if (new_addr == 0) goto eINVAL; /* VG_(am_get_advisory_client_simple) interprets zero to mean non-fixed, which is not what we want */ advised = VG_(am_get_advisory_client_simple)(new_addr, new_len, &ok); if (!ok || advised != new_addr) goto eNOMEM; ok = VG_(am_relocate_nooverlap_client) ( &d, old_addr, old_len, new_addr, new_len ); if (ok) { VG_TRACK( copy_mem_remap, old_addr, new_addr, MIN_SIZET(old_len,new_len) ); if (new_len > old_len) VG_TRACK( new_mem_mmap, new_addr+old_len, new_len-old_len, old_seg->hasR, old_seg->hasW, old_seg->hasX, 0/*di_handle*/ ); VG_TRACK(die_mem_munmap, old_addr, old_len); if (d) { VG_(discard_translations)( old_addr, old_len, "do_remap(1)" ); VG_(discard_translations)( new_addr, new_len, "do_remap(2)" ); } return VG_(mk_SysRes_Success)( new_addr ); } goto eNOMEM; } /* end of the 3 cases */ /*NOTREACHED*/ vg_assert(0); grow_in_place_or_move_anywhere_or_fail: { /* try growing it in-place */ Addr needA = old_addr + old_len; SSizeT needL = new_len - old_len; vg_assert(needL > 0); vg_assert(needA > 0); advised = VG_(am_get_advisory_client_simple)( needA, needL, &ok ); if (ok) { /* Fixes bug #129866. */ ok = VG_(am_covered_by_single_free_segment) ( needA, needL ); } if (ok && advised == needA) { const NSegment *new_seg = VG_(am_extend_map_client)( old_addr, needL ); if (new_seg) { VG_TRACK( new_mem_mmap, needA, needL, new_seg->hasR, new_seg->hasW, new_seg->hasX, 0/*di_handle*/ ); return VG_(mk_SysRes_Success)( old_addr ); } } /* that failed. Look elsewhere. */ advised = VG_(am_get_advisory_client_simple)( 0, new_len, &ok ); if (ok) { Bool oldR = old_seg->hasR; Bool oldW = old_seg->hasW; Bool oldX = old_seg->hasX; /* assert new area does not overlap old */ vg_assert(advised+new_len-1 < old_addr || advised > old_addr+old_len-1); ok = VG_(am_relocate_nooverlap_client) ( &d, old_addr, old_len, advised, new_len ); if (ok) { VG_TRACK( copy_mem_remap, old_addr, advised, MIN_SIZET(old_len,new_len) ); if (new_len > old_len) VG_TRACK( new_mem_mmap, advised+old_len, new_len-old_len, oldR, oldW, oldX, 0/*di_handle*/ ); VG_TRACK(die_mem_munmap, old_addr, old_len); if (d) { VG_(discard_translations)( old_addr, old_len, "do_remap(4)" ); VG_(discard_translations)( advised, new_len, "do_remap(5)" ); } return VG_(mk_SysRes_Success)( advised ); } } goto eNOMEM; } /*NOTREACHED*/ vg_assert(0); grow_in_place_or_fail: { Addr needA = old_addr + old_len; SizeT needL = new_len - old_len; vg_assert(needA > 0); advised = VG_(am_get_advisory_client_simple)( needA, needL, &ok ); if (ok) { /* Fixes bug #129866. */ ok = VG_(am_covered_by_single_free_segment) ( needA, needL ); } if (!ok || advised != needA) goto eNOMEM; const NSegment *new_seg = VG_(am_extend_map_client)( old_addr, needL ); if (!new_seg) goto eNOMEM; VG_TRACK( new_mem_mmap, needA, needL, new_seg->hasR, new_seg->hasW, new_seg->hasX, 0/*di_handle*/ ); return VG_(mk_SysRes_Success)( old_addr ); } /*NOTREACHED*/ vg_assert(0); shrink_in_place: { SysRes sres = VG_(am_munmap_client)( &d, old_addr+new_len, old_len-new_len ); if (sr_isError(sres)) return sres; VG_TRACK( die_mem_munmap, old_addr+new_len, old_len-new_len ); if (d) VG_(discard_translations)( old_addr+new_len, old_len-new_len, "do_remap(7)" ); return VG_(mk_SysRes_Success)( old_addr ); } /*NOTREACHED*/ vg_assert(0); same_in_place: return VG_(mk_SysRes_Success)( old_addr ); /*NOTREACHED*/ vg_assert(0); eINVAL: return VG_(mk_SysRes_Error)( VKI_EINVAL ); eNOMEM: return VG_(mk_SysRes_Error)( VKI_ENOMEM ); # undef MIN_SIZET } #endif /* HAVE_MREMAP */ /* --------------------------------------------------------------------- File-descriptor tracking ------------------------------------------------------------------ */ /* One of these is allocated for each open file descriptor. */ typedef struct OpenFd { Int fd; /* The file descriptor */ HChar *pathname; /* NULL if not a regular file or unknown */ ExeContext *where; /* NULL if inherited from parent */ struct OpenFd *next, *prev; } OpenFd; /* List of allocated file descriptors. */ static OpenFd *allocated_fds = NULL; /* Count of open file descriptors. */ static Int fd_count = 0; /* Note the fact that a file descriptor was just closed. */ void ML_(record_fd_close)(Int fd) { OpenFd *i = allocated_fds; if (fd >= VG_(fd_hard_limit)) return; /* Valgrind internal */ while(i) { if(i->fd == fd) { if(i->prev) i->prev->next = i->next; else allocated_fds = i->next; if(i->next) i->next->prev = i->prev; if(i->pathname) VG_(free) (i->pathname); VG_(free) (i); fd_count--; break; } i = i->next; } } /* Note the fact that a file descriptor was just opened. If the tid is -1, this indicates an inherited fd. If the pathname is NULL, this either indicates a non-standard file (i.e. a pipe or socket or some such thing) or that we don't know the filename. If the fd is already open, then we're probably doing a dup2() to an existing fd, so just overwrite the existing one. */ void ML_(record_fd_open_with_given_name)(ThreadId tid, Int fd, const HChar *pathname) { OpenFd *i; if (fd >= VG_(fd_hard_limit)) return; /* Valgrind internal */ /* Check to see if this fd is already open. */ i = allocated_fds; while (i) { if (i->fd == fd) { if (i->pathname) VG_(free)(i->pathname); break; } i = i->next; } /* Not already one: allocate an OpenFd */ if (i == NULL) { i = VG_(malloc)("syswrap.rfdowgn.1", sizeof(OpenFd)); i->prev = NULL; i->next = allocated_fds; if(allocated_fds) allocated_fds->prev = i; allocated_fds = i; fd_count++; } i->fd = fd; i->pathname = VG_(strdup)("syswrap.rfdowgn.2", pathname); i->where = (tid == -1) ? NULL : VG_(record_ExeContext)(tid, 0/*first_ip_delta*/); } // Record opening of an fd, and find its name. void ML_(record_fd_open_named)(ThreadId tid, Int fd) { const HChar* buf; const HChar* name; if (VG_(resolve_filename)(fd, &buf)) name = buf; else name = NULL; ML_(record_fd_open_with_given_name)(tid, fd, name); } // Record opening of a nameless fd. void ML_(record_fd_open_nameless)(ThreadId tid, Int fd) { ML_(record_fd_open_with_given_name)(tid, fd, NULL); } // Return if a given file descriptor is already recorded. Bool ML_(fd_recorded)(Int fd) { OpenFd *i = allocated_fds; while (i) { if (i->fd == fd) return True; i = i->next; } return False; } /* Returned string must not be modified nor free'd. */ const HChar *ML_(find_fd_recorded_by_fd)(Int fd) { OpenFd *i = allocated_fds; while (i) { if (i->fd == fd) return i->pathname; i = i->next; } return NULL; } static HChar *unix_to_name(struct vki_sockaddr_un *sa, UInt len, HChar *name) { if (sa == NULL || len == 0 || sa->sun_path[0] == '\0') { VG_(sprintf)(name, "<unknown>"); } else { VG_(sprintf)(name, "%s", sa->sun_path); } return name; } static HChar *inet_to_name(struct vki_sockaddr_in *sa, UInt len, HChar *name) { if (sa == NULL || len == 0) { VG_(sprintf)(name, "<unknown>"); } else if (sa->sin_port == 0) { VG_(sprintf)(name, "<unbound>"); } else { UInt addr = VG_(ntohl)(sa->sin_addr.s_addr); VG_(sprintf)(name, "%u.%u.%u.%u:%u", (addr>>24) & 0xFF, (addr>>16) & 0xFF, (addr>>8) & 0xFF, addr & 0xFF, VG_(ntohs)(sa->sin_port)); } return name; } static void inet6_format(HChar *s, const UChar ip[16]) { static const unsigned char V4mappedprefix[12] = {0,0,0,0,0,0,0,0,0,0,0xff,0xff}; if (!VG_(memcmp)(ip, V4mappedprefix, 12)) { const struct vki_in_addr *sin_addr = (const struct vki_in_addr *)(ip + 12); UInt addr = VG_(ntohl)(sin_addr->s_addr); VG_(sprintf)(s, "::ffff:%u.%u.%u.%u", (addr>>24) & 0xFF, (addr>>16) & 0xFF, (addr>>8) & 0xFF, addr & 0xFF); } else { Bool compressing = False; Bool compressed = False; Int len = 0; Int i; for (i = 0; i < 16; i += 2) { UInt word = ((UInt)ip[i] << 8) | (UInt)ip[i+1]; if (word == 0 && !compressed) { compressing = True; } else { if (compressing) { compressing = False; compressed = True; s[len++] = ':'; } if (i > 0) { s[len++] = ':'; } len += VG_(sprintf)(s + len, "%x", word); } } if (compressing) { s[len++] = ':'; s[len++] = ':'; } s[len++] = 0; } return; } static HChar *inet6_to_name(struct vki_sockaddr_in6 *sa, UInt len, HChar *name) { if (sa == NULL || len == 0) { VG_(sprintf)(name, "<unknown>"); } else if (sa->sin6_port == 0) { VG_(sprintf)(name, "<unbound>"); } else { HChar addr[100]; // large enough inet6_format(addr, (void *)&(sa->sin6_addr)); VG_(sprintf)(name, "[%s]:%u", addr, VG_(ntohs)(sa->sin6_port)); } return name; } /* * Try get some details about a socket. */ static void getsockdetails(Int fd) { union u { struct vki_sockaddr a; struct vki_sockaddr_in in; struct vki_sockaddr_in6 in6; struct vki_sockaddr_un un; } laddr; Int llen; llen = sizeof(laddr); VG_(memset)(&laddr, 0, llen); if(VG_(getsockname)(fd, (struct vki_sockaddr *)&(laddr.a), &llen) != -1) { switch(laddr.a.sa_family) { case VKI_AF_INET: { HChar lname[32]; // large enough HChar pname[32]; // large enough struct vki_sockaddr_in paddr; Int plen = sizeof(struct vki_sockaddr_in); if (VG_(getpeername)(fd, (struct vki_sockaddr *)&paddr, &plen) != -1) { VG_(message)(Vg_UserMsg, "Open AF_INET socket %d: %s <-> %s\n", fd, inet_to_name(&(laddr.in), llen, lname), inet_to_name(&paddr, plen, pname)); } else { VG_(message)(Vg_UserMsg, "Open AF_INET socket %d: %s <-> unbound\n", fd, inet_to_name(&(laddr.in), llen, lname)); } return; } case VKI_AF_INET6: { HChar lname[128]; // large enough HChar pname[128]; // large enough struct vki_sockaddr_in6 paddr; Int plen = sizeof(struct vki_sockaddr_in6); if (VG_(getpeername)(fd, (struct vki_sockaddr *)&paddr, &plen) != -1) { VG_(message)(Vg_UserMsg, "Open AF_INET6 socket %d: %s <-> %s\n", fd, inet6_to_name(&(laddr.in6), llen, lname), inet6_to_name(&paddr, plen, pname)); } else { VG_(message)(Vg_UserMsg, "Open AF_INET6 socket %d: %s <-> unbound\n", fd, inet6_to_name(&(laddr.in6), llen, lname)); } return; } case VKI_AF_UNIX: { static char lname[256]; VG_(message)(Vg_UserMsg, "Open AF_UNIX socket %d: %s\n", fd, unix_to_name(&(laddr.un), llen, lname)); return; } default: VG_(message)(Vg_UserMsg, "Open pf-%d socket %d:\n", laddr.a.sa_family, fd); return; } } VG_(message)(Vg_UserMsg, "Open socket %d:\n", fd); } /* Dump out a summary, and a more detailed list, of open file descriptors. */ void VG_(show_open_fds) (const HChar* when) { OpenFd *i = allocated_fds; VG_(message)(Vg_UserMsg, "FILE DESCRIPTORS: %d open %s.\n", fd_count, when); while (i) { if (i->pathname) { VG_(message)(Vg_UserMsg, "Open file descriptor %d: %s\n", i->fd, i->pathname); } else { Int val; Int len = sizeof(val); if (VG_(getsockopt)(i->fd, VKI_SOL_SOCKET, VKI_SO_TYPE, &val, &len) == -1) { VG_(message)(Vg_UserMsg, "Open file descriptor %d:\n", i->fd); } else { getsockdetails(i->fd); } } if(i->where) { VG_(pp_ExeContext)(i->where); VG_(message)(Vg_UserMsg, "\n"); } else { VG_(message)(Vg_UserMsg, " <inherited from parent>\n"); VG_(message)(Vg_UserMsg, "\n"); } i = i->next; } VG_(message)(Vg_UserMsg, "\n"); } /* If /proc/self/fd doesn't exist (e.g. you've got a Linux kernel that doesn't have /proc support compiled in, or a non-Linux kernel), then we need to find out what file descriptors we inherited from our parent process the hard way - by checking each fd in turn. */ static void init_preopened_fds_without_proc_self_fd(void) { struct vki_rlimit lim; UInt count; Int i; if (VG_(getrlimit) (VKI_RLIMIT_NOFILE, &lim) == -1) { /* Hmm. getrlimit() failed. Now we're screwed, so just choose an arbitrarily high number. 1024 happens to be the limit in the 2.4 Linux kernels. */ count = 1024; } else { count = lim.rlim_cur; } for (i = 0; i < count; i++) if (VG_(fcntl)(i, VKI_F_GETFL, 0) != -1) ML_(record_fd_open_named)(-1, i); } /* Initialize the list of open file descriptors with the file descriptors we inherited from out parent process. */ void VG_(init_preopened_fds)(void) { // DDD: should probably use HAVE_PROC here or similar, instead. #if defined(VGO_linux) Int ret; struct vki_dirent64 d; SysRes f; f = VG_(open)("/proc/self/fd", VKI_O_RDONLY, 0); if (sr_isError(f)) { init_preopened_fds_without_proc_self_fd(); return; } while ((ret = VG_(getdents64)(sr_Res(f), &d, sizeof(d))) != 0) { if (ret == -1) goto out; if (VG_(strcmp)(d.d_name, ".") && VG_(strcmp)(d.d_name, "..")) { HChar* s; Int fno = VG_(strtoll10)(d.d_name, &s); if (*s == '\0') { if (fno != sr_Res(f)) if (VG_(clo_track_fds)) ML_(record_fd_open_named)(-1, fno); } else { VG_(message)(Vg_DebugMsg, "Warning: invalid file name in /proc/self/fd: %s\n", d.d_name); } } VG_(lseek)(sr_Res(f), d.d_off, VKI_SEEK_SET); } out: VG_(close)(sr_Res(f)); #elif defined(VGO_darwin) init_preopened_fds_without_proc_self_fd(); #elif defined(VGO_solaris) Int ret; Char buf[VKI_MAXGETDENTS_SIZE]; SysRes f; f = VG_(open)("/proc/self/fd", VKI_O_RDONLY, 0); if (sr_isError(f)) { init_preopened_fds_without_proc_self_fd(); return; } while ((ret = VG_(getdents64)(sr_Res(f), (struct vki_dirent64 *) buf, sizeof(buf))) > 0) { Int i = 0; while (i < ret) { /* Proceed one entry. */ struct vki_dirent64 *d = (struct vki_dirent64 *) (buf + i); if (VG_(strcmp)(d->d_name, ".") && VG_(strcmp)(d->d_name, "..")) { HChar *s; Int fno = VG_(strtoll10)(d->d_name, &s); if (*s == '\0') { if (fno != sr_Res(f)) if (VG_(clo_track_fds)) ML_(record_fd_open_named)(-1, fno); } else { VG_(message)(Vg_DebugMsg, "Warning: invalid file name in /proc/self/fd: %s\n", d->d_name); } } /* Move on the next entry. */ i += d->d_reclen; } } VG_(close)(sr_Res(f)); #else # error Unknown OS #endif } static HChar *strdupcat ( const HChar* cc, const HChar *s1, const HChar *s2, ArenaId aid ) { UInt len = VG_(strlen) ( s1 ) + VG_(strlen) ( s2 ) + 1; HChar *result = VG_(arena_malloc) ( aid, cc, len ); VG_(strcpy) ( result, s1 ); VG_(strcat) ( result, s2 ); return result; } static void pre_mem_read_sendmsg ( ThreadId tid, Bool read, const HChar *msg, Addr base, SizeT size ) { HChar *outmsg = strdupcat ( "di.syswrap.pmrs.1", "sendmsg", msg, VG_AR_CORE ); PRE_MEM_READ( outmsg, base, size ); VG_(free) ( outmsg ); } static void pre_mem_write_recvmsg ( ThreadId tid, Bool read, const HChar *msg, Addr base, SizeT size ) { HChar *outmsg = strdupcat ( "di.syswrap.pmwr.1", "recvmsg", msg, VG_AR_CORE ); if ( read ) PRE_MEM_READ( outmsg, base, size ); else PRE_MEM_WRITE( outmsg, base, size ); VG_(free) ( outmsg ); } static void post_mem_write_recvmsg ( ThreadId tid, Bool read, const HChar *fieldName, Addr base, SizeT size ) { if ( !read ) POST_MEM_WRITE( base, size ); } static void msghdr_foreachfield ( ThreadId tid, const HChar *name, struct vki_msghdr *msg, UInt length, void (*foreach_func)( ThreadId, Bool, const HChar *, Addr, SizeT ), Bool rekv /* "recv" apparently shadows some header decl on OSX108 */ ) { HChar *fieldName; if ( !msg ) return; fieldName = VG_(malloc) ( "di.syswrap.mfef", VG_(strlen)(name) + 32 ); VG_(sprintf) ( fieldName, "(%s)", name ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_name, sizeof( msg->msg_name ) ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_namelen, sizeof( msg->msg_namelen ) ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_iov, sizeof( msg->msg_iov ) ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_iovlen, sizeof( msg->msg_iovlen ) ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_control, sizeof( msg->msg_control ) ); foreach_func ( tid, True, fieldName, (Addr)&msg->msg_controllen, sizeof( msg->msg_controllen ) ); /* msg_flags is completely ignored for send_mesg, recv_mesg doesn't read the field, but does write to it. */ if ( rekv ) foreach_func ( tid, False, fieldName, (Addr)&msg->msg_flags, sizeof( msg->msg_flags ) ); if ( ML_(safe_to_deref)(&msg->msg_name, sizeof (void *)) && msg->msg_name ) { VG_(sprintf) ( fieldName, "(%s.msg_name)", name ); foreach_func ( tid, False, fieldName, (Addr)msg->msg_name, msg->msg_namelen ); } if ( ML_(safe_to_deref)(&msg->msg_iov, sizeof (void *)) && msg->msg_iov ) { struct vki_iovec *iov = msg->msg_iov; UInt i; VG_(sprintf) ( fieldName, "(%s.msg_iov)", name ); foreach_func ( tid, True, fieldName, (Addr)iov, msg->msg_iovlen * sizeof( struct vki_iovec ) ); for ( i = 0; i < msg->msg_iovlen; ++i, ++iov ) { UInt iov_len = iov->iov_len <= length ? iov->iov_len : length; VG_(sprintf) ( fieldName, "(%s.msg_iov[%u])", name, i ); foreach_func ( tid, False, fieldName, (Addr)iov->iov_base, iov_len ); length = length - iov_len; } } if ( ML_(safe_to_deref) (&msg->msg_control, sizeof (void *)) && msg->msg_control ) { VG_(sprintf) ( fieldName, "(%s.msg_control)", name ); foreach_func ( tid, False, fieldName, (Addr)msg->msg_control, msg->msg_controllen ); } VG_(free) ( fieldName ); } static void check_cmsg_for_fds(ThreadId tid, struct vki_msghdr *msg) { struct vki_cmsghdr *cm = VKI_CMSG_FIRSTHDR(msg); while (cm) { if (cm->cmsg_level == VKI_SOL_SOCKET && cm->cmsg_type == VKI_SCM_RIGHTS ) { Int *fds = (Int *) VKI_CMSG_DATA(cm); Int fdc = (cm->cmsg_len - VKI_CMSG_ALIGN(sizeof(struct vki_cmsghdr))) / sizeof(int); Int i; for (i = 0; i < fdc; i++) if(VG_(clo_track_fds)) // XXX: must we check the range on these fds with // ML_(fd_allowed)()? ML_(record_fd_open_named)(tid, fds[i]); } cm = VKI_CMSG_NXTHDR(msg, cm); } } /* GrP kernel ignores sa_len (at least on Darwin); this checks the rest */ static void pre_mem_read_sockaddr ( ThreadId tid, const HChar *description, struct vki_sockaddr *sa, UInt salen ) { HChar *outmsg; struct vki_sockaddr_un* saun = (struct vki_sockaddr_un *)sa; struct vki_sockaddr_in* sin = (struct vki_sockaddr_in *)sa; struct vki_sockaddr_in6* sin6 = (struct vki_sockaddr_in6 *)sa; # ifdef VKI_AF_BLUETOOTH struct vki_sockaddr_rc* rc = (struct vki_sockaddr_rc *)sa; # endif # ifdef VKI_AF_NETLINK struct vki_sockaddr_nl* nl = (struct vki_sockaddr_nl *)sa; # endif /* NULL/zero-length sockaddrs are legal */ if ( sa == NULL || salen == 0 ) return; outmsg = VG_(malloc) ( "di.syswrap.pmr_sockaddr.1", VG_(strlen)( description ) + 30 ); VG_(sprintf) ( outmsg, description, "sa_family" ); PRE_MEM_READ( outmsg, (Addr) &sa->sa_family, sizeof(vki_sa_family_t)); switch (sa->sa_family) { case VKI_AF_UNIX: VG_(sprintf) ( outmsg, description, "sun_path" ); PRE_MEM_RASCIIZ( outmsg, (Addr) saun->sun_path ); // GrP fixme max of sun_len-2? what about nul char? break; case VKI_AF_INET: VG_(sprintf) ( outmsg, description, "sin_port" ); PRE_MEM_READ( outmsg, (Addr) &sin->sin_port, sizeof (sin->sin_port) ); VG_(sprintf) ( outmsg, description, "sin_addr" ); PRE_MEM_READ( outmsg, (Addr) &sin->sin_addr, sizeof (sin->sin_addr) ); break; case VKI_AF_INET6: VG_(sprintf) ( outmsg, description, "sin6_port" ); PRE_MEM_READ( outmsg, (Addr) &sin6->sin6_port, sizeof (sin6->sin6_port) ); VG_(sprintf) ( outmsg, description, "sin6_flowinfo" ); PRE_MEM_READ( outmsg, (Addr) &sin6->sin6_flowinfo, sizeof (sin6->sin6_flowinfo) ); VG_(sprintf) ( outmsg, description, "sin6_addr" ); PRE_MEM_READ( outmsg, (Addr) &sin6->sin6_addr, sizeof (sin6->sin6_addr) ); VG_(sprintf) ( outmsg, description, "sin6_scope_id" ); PRE_MEM_READ( outmsg, (Addr) &sin6->sin6_scope_id, sizeof (sin6->sin6_scope_id) ); break; # ifdef VKI_AF_BLUETOOTH case VKI_AF_BLUETOOTH: VG_(sprintf) ( outmsg, description, "rc_bdaddr" ); PRE_MEM_READ( outmsg, (Addr) &rc->rc_bdaddr, sizeof (rc->rc_bdaddr) ); VG_(sprintf) ( outmsg, description, "rc_channel" ); PRE_MEM_READ( outmsg, (Addr) &rc->rc_channel, sizeof (rc->rc_channel) ); break; # endif # ifdef VKI_AF_NETLINK case VKI_AF_NETLINK: VG_(sprintf)(outmsg, description, "nl_pid"); PRE_MEM_READ(outmsg, (Addr)&nl->nl_pid, sizeof(nl->nl_pid)); VG_(sprintf)(outmsg, description, "nl_groups"); PRE_MEM_READ(outmsg, (Addr)&nl->nl_groups, sizeof(nl->nl_groups)); break; # endif # ifdef VKI_AF_UNSPEC case VKI_AF_UNSPEC: break; # endif default: /* No specific information about this address family. Let's just check the full data following the family. Note that this can give false positive if this (unknown) struct sockaddr_???? has padding bytes between its elements. */ VG_(sprintf) ( outmsg, description, "sa_data" ); PRE_MEM_READ( outmsg, (Addr)&sa->sa_family + sizeof(sa->sa_family), salen - sizeof(sa->sa_family)); break; } VG_(free) ( outmsg ); } /* Dereference a pointer to a UInt. */ static UInt deref_UInt ( ThreadId tid, Addr a, const HChar* s ) { UInt* a_p = (UInt*)a; PRE_MEM_READ( s, (Addr)a_p, sizeof(UInt) ); if (a_p == NULL) return 0; else return *a_p; } void ML_(buf_and_len_pre_check) ( ThreadId tid, Addr buf_p, Addr buflen_p, const HChar* buf_s, const HChar* buflen_s ) { if (VG_(tdict).track_pre_mem_write) { UInt buflen_in = deref_UInt( tid, buflen_p, buflen_s); if (buflen_in > 0) { VG_(tdict).track_pre_mem_write( Vg_CoreSysCall, tid, buf_s, buf_p, buflen_in ); } } } void ML_(buf_and_len_post_check) ( ThreadId tid, SysRes res, Addr buf_p, Addr buflen_p, const HChar* s ) { if (!sr_isError(res) && VG_(tdict).track_post_mem_write) { UInt buflen_out = deref_UInt( tid, buflen_p, s); if (buflen_out > 0 && buf_p != (Addr)NULL) { VG_(tdict).track_post_mem_write( Vg_CoreSysCall, tid, buf_p, buflen_out ); } } } /* --------------------------------------------------------------------- Data seg end, for brk() ------------------------------------------------------------------ */ /* +--------+------------+ | anon | resvn | +--------+------------+ ^ ^ ^ | | boundary is page aligned | VG_(brk_limit) -- no alignment constraint VG_(brk_base) -- page aligned -- does not move Both the anon part and the reservation part are always at least one page. */ /* Set the new data segment end to NEWBRK. If this succeeds, return NEWBRK, else return the current data segment end. */ static Addr do_brk ( Addr newbrk, ThreadId tid ) { NSegment const* aseg; Addr newbrkP; SizeT delta; Bool debug = False; if (debug) VG_(printf)("\ndo_brk: brk_base=%#lx brk_limit=%#lx newbrk=%#lx\n", VG_(brk_base), VG_(brk_limit), newbrk); if (0) VG_(am_show_nsegments)(0, "in_brk"); if (newbrk < VG_(brk_base)) /* Clearly impossible. */ goto bad; if (newbrk < VG_(brk_limit)) { /* shrinking the data segment. Be lazy and don't munmap the excess area. */ NSegment const * seg = VG_(am_find_nsegment)(newbrk); vg_assert(seg); if (seg->hasT) VG_(discard_translations)( newbrk, VG_(brk_limit) - newbrk, "do_brk(shrink)" ); /* Since we're being lazy and not unmapping pages, we have to zero out the area, so that if the area later comes back into circulation, it will be filled with zeroes, as if it really had been unmapped and later remapped. Be a bit paranoid and try hard to ensure we're not going to segfault by doing the write - check both ends of the range are in the same segment and that segment is writable. */ NSegment const * seg2; seg2 = VG_(am_find_nsegment)( VG_(brk_limit) - 1 ); vg_assert(seg2); if (seg == seg2 && seg->hasW) VG_(memset)( (void*)newbrk, 0, VG_(brk_limit) - newbrk ); VG_(brk_limit) = newbrk; return newbrk; } /* otherwise we're expanding the brk segment. */ if (VG_(brk_limit) > VG_(brk_base)) aseg = VG_(am_find_nsegment)( VG_(brk_limit)-1 ); else aseg = VG_(am_find_nsegment)( VG_(brk_limit) ); /* These should be assured by setup_client_dataseg in m_main. */ vg_assert(aseg); vg_assert(aseg->kind == SkAnonC); if (newbrk <= aseg->end + 1) { /* still fits within the anon segment. */ VG_(brk_limit) = newbrk; return newbrk; } newbrkP = VG_PGROUNDUP(newbrk); delta = newbrkP - (aseg->end + 1); vg_assert(delta > 0); vg_assert(VG_IS_PAGE_ALIGNED(delta)); Bool overflow; if (! VG_(am_extend_into_adjacent_reservation_client)( aseg->start, delta, &overflow)) { if (overflow) VG_(umsg)("brk segment overflow in thread #%u: can't grow to %#lx\n", tid, newbrkP); else VG_(umsg)("Cannot map memory to grow brk segment in thread #%u " "to %#lx\n", tid, newbrkP); goto bad; } VG_(brk_limit) = newbrk; return newbrk; bad: return VG_(brk_limit); } /* --------------------------------------------------------------------- Vet file descriptors for sanity ------------------------------------------------------------------ */ /* > - what does the "Bool soft" parameter mean? (Tom Hughes, 3 Oct 05): Whether or not to consider a file descriptor invalid if it is above the current soft limit. Basically if we are testing whether a newly created file descriptor is valid (in a post handler) then we set soft to true, and if we are testing whether a file descriptor that is about to be used (in a pre handler) is valid [viz, an already-existing fd] then we set it to false. The point is that if the (virtual) soft limit is lowered then any existing descriptors can still be read/written/closed etc (so long as they are below the valgrind reserved descriptors) but no new descriptors can be created above the new soft limit. (jrs 4 Oct 05: in which case, I've renamed it "isNewFd") */ /* Return true if we're allowed to use or create this fd */ Bool ML_(fd_allowed)(Int fd, const HChar *syscallname, ThreadId tid, Bool isNewFd) { Bool allowed = True; /* hard limits always apply */ if (fd < 0 || fd >= VG_(fd_hard_limit)) allowed = False; /* hijacking the output fds is never allowed */ if (fd == VG_(log_output_sink).fd || fd == VG_(xml_output_sink).fd) allowed = False; /* if creating a new fd (rather than using an existing one), the soft limit must also be observed */ if (isNewFd && fd >= VG_(fd_soft_limit)) allowed = False; /* this looks like it ought to be included, but causes problems: */ /* if (fd == 2 && VG_(debugLog_getLevel)() > 0) allowed = False; */ /* The difficulty is as follows: consider a program P which expects to be able to mess with (redirect) its own stderr (fd 2). Usually to deal with P we would issue command line flags to send logging somewhere other than stderr, so as not to disrupt P. The problem is that -d unilaterally hijacks stderr with no consultation with P. And so, if this check is enabled, P will work OK normally but fail if -d is issued. Basically -d is a hack and you take your chances when using it. It's very useful for low level debugging -- particularly at startup -- and having its presence change the behaviour of the client is exactly what we don't want. */ /* croak? */ if ((!allowed) && VG_(showing_core_errors)() ) { VG_(message)(Vg_UserMsg, "Warning: invalid file descriptor %d in syscall %s()\n", fd, syscallname); if (fd == VG_(log_output_sink).fd && VG_(log_output_sink).fd >= 0) VG_(message)(Vg_UserMsg, " Use --log-fd=<number> to select an alternative log fd.\n"); if (fd == VG_(xml_output_sink).fd && VG_(xml_output_sink).fd >= 0) VG_(message)(Vg_UserMsg, " Use --xml-fd=<number> to select an alternative XML " "output fd.\n"); // DDD: consider always printing this stack trace, it's useful. // Also consider also making this a proper core error, ie. // suppressible and all that. if (VG_(clo_verbosity) > 1) { VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size)); } } return allowed; } /* --------------------------------------------------------------------- Deal with a bunch of socket-related syscalls ------------------------------------------------------------------ */ /* ------ */ void ML_(generic_PRE_sys_socketpair) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3 ) { /* int socketpair(int d, int type, int protocol, int sv[2]); */ PRE_MEM_WRITE( "socketcall.socketpair(sv)", arg3, 2*sizeof(int) ); } SysRes ML_(generic_POST_sys_socketpair) ( ThreadId tid, SysRes res, UWord arg0, UWord arg1, UWord arg2, UWord arg3 ) { SysRes r = res; Int fd1 = ((Int*)arg3)[0]; Int fd2 = ((Int*)arg3)[1]; vg_assert(!sr_isError(res)); /* guaranteed by caller */ POST_MEM_WRITE( arg3, 2*sizeof(int) ); if (!ML_(fd_allowed)(fd1, "socketcall.socketpair", tid, True) || !ML_(fd_allowed)(fd2, "socketcall.socketpair", tid, True)) { VG_(close)(fd1); VG_(close)(fd2); r = VG_(mk_SysRes_Error)( VKI_EMFILE ); } else { POST_MEM_WRITE( arg3, 2*sizeof(int) ); if (VG_(clo_track_fds)) { ML_(record_fd_open_nameless)(tid, fd1); ML_(record_fd_open_nameless)(tid, fd2); } } return r; } /* ------ */ SysRes ML_(generic_POST_sys_socket) ( ThreadId tid, SysRes res ) { SysRes r = res; vg_assert(!sr_isError(res)); /* guaranteed by caller */ if (!ML_(fd_allowed)(sr_Res(res), "socket", tid, True)) { VG_(close)(sr_Res(res)); r = VG_(mk_SysRes_Error)( VKI_EMFILE ); } else { if (VG_(clo_track_fds)) ML_(record_fd_open_nameless)(tid, sr_Res(res)); } return r; } /* ------ */ void ML_(generic_PRE_sys_bind) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int bind(int sockfd, struct sockaddr *my_addr, int addrlen); */ pre_mem_read_sockaddr( tid, "socketcall.bind(my_addr.%s)", (struct vki_sockaddr *) arg1, arg2 ); } /* ------ */ void ML_(generic_PRE_sys_accept) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int accept(int s, struct sockaddr *addr, int *addrlen); */ Addr addr_p = arg1; Addr addrlen_p = arg2; if (addr_p != (Addr)NULL) ML_(buf_and_len_pre_check) ( tid, addr_p, addrlen_p, "socketcall.accept(addr)", "socketcall.accept(addrlen_in)" ); } SysRes ML_(generic_POST_sys_accept) ( ThreadId tid, SysRes res, UWord arg0, UWord arg1, UWord arg2 ) { SysRes r = res; vg_assert(!sr_isError(res)); /* guaranteed by caller */ if (!ML_(fd_allowed)(sr_Res(res), "accept", tid, True)) { VG_(close)(sr_Res(res)); r = VG_(mk_SysRes_Error)( VKI_EMFILE ); } else { Addr addr_p = arg1; Addr addrlen_p = arg2; if (addr_p != (Addr)NULL) ML_(buf_and_len_post_check) ( tid, res, addr_p, addrlen_p, "socketcall.accept(addrlen_out)" ); if (VG_(clo_track_fds)) ML_(record_fd_open_nameless)(tid, sr_Res(res)); } return r; } /* ------ */ void ML_(generic_PRE_sys_sendto) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5 ) { /* int sendto(int s, const void *msg, int len, unsigned int flags, const struct sockaddr *to, int tolen); */ PRE_MEM_READ( "socketcall.sendto(msg)", arg1, /* msg */ arg2 /* len */ ); pre_mem_read_sockaddr( tid, "socketcall.sendto(to.%s)", (struct vki_sockaddr *) arg4, arg5 ); } /* ------ */ void ML_(generic_PRE_sys_send) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int send(int s, const void *msg, size_t len, int flags); */ PRE_MEM_READ( "socketcall.send(msg)", arg1, /* msg */ arg2 /* len */ ); } /* ------ */ void ML_(generic_PRE_sys_recvfrom) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5 ) { /* int recvfrom(int s, void *buf, int len, unsigned int flags, struct sockaddr *from, int *fromlen); */ Addr buf_p = arg1; Int len = arg2; Addr from_p = arg4; Addr fromlen_p = arg5; PRE_MEM_WRITE( "socketcall.recvfrom(buf)", buf_p, len ); if (from_p != (Addr)NULL) ML_(buf_and_len_pre_check) ( tid, from_p, fromlen_p, "socketcall.recvfrom(from)", "socketcall.recvfrom(fromlen_in)" ); } void ML_(generic_POST_sys_recvfrom) ( ThreadId tid, SysRes res, UWord arg0, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5 ) { Addr buf_p = arg1; Int len = arg2; Addr from_p = arg4; Addr fromlen_p = arg5; vg_assert(!sr_isError(res)); /* guaranteed by caller */ if (from_p != (Addr)NULL) ML_(buf_and_len_post_check) ( tid, res, from_p, fromlen_p, "socketcall.recvfrom(fromlen_out)" ); POST_MEM_WRITE( buf_p, len ); } /* ------ */ void ML_(generic_PRE_sys_recv) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int recv(int s, void *buf, int len, unsigned int flags); */ /* man 2 recv says: The recv call is normally used only on a connected socket (see connect(2)) and is identical to recvfrom with a NULL from parameter. */ PRE_MEM_WRITE( "socketcall.recv(buf)", arg1, /* buf */ arg2 /* len */ ); } void ML_(generic_POST_sys_recv) ( ThreadId tid, UWord res, UWord arg0, UWord arg1, UWord arg2 ) { if (res >= 0 && arg1 != 0) { POST_MEM_WRITE( arg1, /* buf */ arg2 /* len */ ); } } /* ------ */ void ML_(generic_PRE_sys_connect) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int connect(int sockfd, struct sockaddr *serv_addr, int addrlen ); */ pre_mem_read_sockaddr( tid, "socketcall.connect(serv_addr.%s)", (struct vki_sockaddr *) arg1, arg2); } /* ------ */ void ML_(generic_PRE_sys_setsockopt) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3, UWord arg4 ) { /* int setsockopt(int s, int level, int optname, const void *optval, int optlen); */ PRE_MEM_READ( "socketcall.setsockopt(optval)", arg3, /* optval */ arg4 /* optlen */ ); } /* ------ */ void ML_(generic_PRE_sys_getsockname) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int getsockname(int s, struct sockaddr* name, int* namelen) */ Addr name_p = arg1; Addr namelen_p = arg2; /* Nb: name_p cannot be NULL */ ML_(buf_and_len_pre_check) ( tid, name_p, namelen_p, "socketcall.getsockname(name)", "socketcall.getsockname(namelen_in)" ); } void ML_(generic_POST_sys_getsockname) ( ThreadId tid, SysRes res, UWord arg0, UWord arg1, UWord arg2 ) { Addr name_p = arg1; Addr namelen_p = arg2; vg_assert(!sr_isError(res)); /* guaranteed by caller */ ML_(buf_and_len_post_check) ( tid, res, name_p, namelen_p, "socketcall.getsockname(namelen_out)" ); } /* ------ */ void ML_(generic_PRE_sys_getpeername) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int getpeername(int s, struct sockaddr* name, int* namelen) */ Addr name_p = arg1; Addr namelen_p = arg2; /* Nb: name_p cannot be NULL */ ML_(buf_and_len_pre_check) ( tid, name_p, namelen_p, "socketcall.getpeername(name)", "socketcall.getpeername(namelen_in)" ); } void ML_(generic_POST_sys_getpeername) ( ThreadId tid, SysRes res, UWord arg0, UWord arg1, UWord arg2 ) { Addr name_p = arg1; Addr namelen_p = arg2; vg_assert(!sr_isError(res)); /* guaranteed by caller */ ML_(buf_and_len_post_check) ( tid, res, name_p, namelen_p, "socketcall.getpeername(namelen_out)" ); } /* ------ */ void ML_(generic_PRE_sys_sendmsg) ( ThreadId tid, const HChar *name, struct vki_msghdr *msg ) { msghdr_foreachfield ( tid, name, msg, ~0, pre_mem_read_sendmsg, False ); } /* ------ */ void ML_(generic_PRE_sys_recvmsg) ( ThreadId tid, const HChar *name, struct vki_msghdr *msg ) { msghdr_foreachfield ( tid, name, msg, ~0, pre_mem_write_recvmsg, True ); } void ML_(generic_POST_sys_recvmsg) ( ThreadId tid, const HChar *name, struct vki_msghdr *msg, UInt length ) { msghdr_foreachfield( tid, name, msg, length, post_mem_write_recvmsg, True ); check_cmsg_for_fds( tid, msg ); } /* --------------------------------------------------------------------- Deal with a bunch of IPC related syscalls ------------------------------------------------------------------ */ /* ------ */ void ML_(generic_PRE_sys_semop) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int semop(int semid, struct sembuf *sops, unsigned nsops); */ PRE_MEM_READ( "semop(sops)", arg1, arg2 * sizeof(struct vki_sembuf) ); } /* ------ */ void ML_(generic_PRE_sys_semtimedop) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3 ) { /* int semtimedop(int semid, struct sembuf *sops, unsigned nsops, struct timespec *timeout); */ PRE_MEM_READ( "semtimedop(sops)", arg1, arg2 * sizeof(struct vki_sembuf) ); if (arg3 != 0) PRE_MEM_READ( "semtimedop(timeout)", arg3, sizeof(struct vki_timespec) ); } /* ------ */ static UInt get_sem_count( Int semid ) { struct vki_semid_ds buf; union vki_semun arg; SysRes res; /* Doesn't actually seem to be necessary, but gcc-4.4.0 20081017 (experimental) otherwise complains that the use in the return statement below is uninitialised. */ buf.sem_nsems = 0; arg.buf = &buf; # if defined(__NR_semctl) res = VG_(do_syscall4)(__NR_semctl, semid, 0, VKI_IPC_STAT, *(UWord *)&arg); # elif defined(__NR_semsys) /* Solaris */ res = VG_(do_syscall5)(__NR_semsys, VKI_SEMCTL, semid, 0, VKI_IPC_STAT, *(UWord *)&arg); # else res = VG_(do_syscall5)(__NR_ipc, 3 /* IPCOP_semctl */, semid, 0, VKI_IPC_STAT, (UWord)&arg); # endif if (sr_isError(res)) return 0; return buf.sem_nsems; } void ML_(generic_PRE_sys_semctl) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2, UWord arg3 ) { /* int semctl(int semid, int semnum, int cmd, ...); */ union vki_semun arg = *(union vki_semun *)&arg3; UInt nsems; switch (arg2 /* cmd */) { #if defined(VKI_IPC_INFO) case VKI_IPC_INFO: case VKI_SEM_INFO: case VKI_IPC_INFO|VKI_IPC_64: case VKI_SEM_INFO|VKI_IPC_64: PRE_MEM_WRITE( "semctl(IPC_INFO, arg.buf)", (Addr)arg.buf, sizeof(struct vki_seminfo) ); break; #endif case VKI_IPC_STAT: #if defined(VKI_SEM_STAT) case VKI_SEM_STAT: #endif PRE_MEM_WRITE( "semctl(IPC_STAT, arg.buf)", (Addr)arg.buf, sizeof(struct vki_semid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_STAT|VKI_IPC_64: #if defined(VKI_SEM_STAT) case VKI_SEM_STAT|VKI_IPC_64: #endif #endif #if defined(VKI_IPC_STAT64) case VKI_IPC_STAT64: #endif #if defined(VKI_IPC_64) || defined(VKI_IPC_STAT64) PRE_MEM_WRITE( "semctl(IPC_STAT, arg.buf)", (Addr)arg.buf, sizeof(struct vki_semid64_ds) ); break; #endif case VKI_IPC_SET: PRE_MEM_READ( "semctl(IPC_SET, arg.buf)", (Addr)arg.buf, sizeof(struct vki_semid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_SET|VKI_IPC_64: #endif #if defined(VKI_IPC_SET64) case VKI_IPC_SET64: #endif #if defined(VKI_IPC64) || defined(VKI_IPC_SET64) PRE_MEM_READ( "semctl(IPC_SET, arg.buf)", (Addr)arg.buf, sizeof(struct vki_semid64_ds) ); break; #endif case VKI_GETALL: #if defined(VKI_IPC_64) case VKI_GETALL|VKI_IPC_64: #endif nsems = get_sem_count( arg0 ); PRE_MEM_WRITE( "semctl(IPC_GETALL, arg.array)", (Addr)arg.array, sizeof(unsigned short) * nsems ); break; case VKI_SETALL: #if defined(VKI_IPC_64) case VKI_SETALL|VKI_IPC_64: #endif nsems = get_sem_count( arg0 ); PRE_MEM_READ( "semctl(IPC_SETALL, arg.array)", (Addr)arg.array, sizeof(unsigned short) * nsems ); break; } } void ML_(generic_POST_sys_semctl) ( ThreadId tid, UWord res, UWord arg0, UWord arg1, UWord arg2, UWord arg3 ) { union vki_semun arg = *(union vki_semun *)&arg3; UInt nsems; switch (arg2 /* cmd */) { #if defined(VKI_IPC_INFO) case VKI_IPC_INFO: case VKI_SEM_INFO: case VKI_IPC_INFO|VKI_IPC_64: case VKI_SEM_INFO|VKI_IPC_64: POST_MEM_WRITE( (Addr)arg.buf, sizeof(struct vki_seminfo) ); break; #endif case VKI_IPC_STAT: #if defined(VKI_SEM_STAT) case VKI_SEM_STAT: #endif POST_MEM_WRITE( (Addr)arg.buf, sizeof(struct vki_semid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_STAT|VKI_IPC_64: case VKI_SEM_STAT|VKI_IPC_64: #endif #if defined(VKI_IPC_STAT64) case VKI_IPC_STAT64: #endif #if defined(VKI_IPC_64) || defined(VKI_IPC_STAT64) POST_MEM_WRITE( (Addr)arg.buf, sizeof(struct vki_semid64_ds) ); break; #endif case VKI_GETALL: #if defined(VKI_IPC_64) case VKI_GETALL|VKI_IPC_64: #endif nsems = get_sem_count( arg0 ); POST_MEM_WRITE( (Addr)arg.array, sizeof(unsigned short) * nsems ); break; } } /* ------ */ /* ------ */ static SizeT get_shm_size ( Int shmid ) { #if defined(__NR_shmctl) # ifdef VKI_IPC_64 struct vki_shmid64_ds buf; # if defined(VGP_amd64_linux) || defined(VGP_arm64_linux) /* See bug 222545 comment 7 */ SysRes __res = VG_(do_syscall3)(__NR_shmctl, shmid, VKI_IPC_STAT, (UWord)&buf); # else SysRes __res = VG_(do_syscall3)(__NR_shmctl, shmid, VKI_IPC_STAT|VKI_IPC_64, (UWord)&buf); # endif # else /* !def VKI_IPC_64 */ struct vki_shmid_ds buf; SysRes __res = VG_(do_syscall3)(__NR_shmctl, shmid, VKI_IPC_STAT, (UWord)&buf); # endif /* def VKI_IPC_64 */ #elif defined(__NR_shmsys) /* Solaris */ struct vki_shmid_ds buf; SysRes __res = VG_(do_syscall4)(__NR_shmsys, VKI_SHMCTL, shmid, VKI_IPC_STAT, (UWord)&buf); #else struct vki_shmid_ds buf; SysRes __res = VG_(do_syscall5)(__NR_ipc, 24 /* IPCOP_shmctl */, shmid, VKI_IPC_STAT, 0, (UWord)&buf); #endif if (sr_isError(__res)) return 0; return (SizeT) buf.shm_segsz; } UWord ML_(generic_PRE_sys_shmat) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* void *shmat(int shmid, const void *shmaddr, int shmflg); */ SizeT segmentSize = get_shm_size ( arg0 ); UWord tmp; Bool ok; if (arg1 == 0) { /* arm-linux only: work around the fact that VG_(am_get_advisory_client_simple) produces something that is VKI_PAGE_SIZE aligned, whereas what we want is something VKI_SHMLBA aligned, and VKI_SHMLBA >= VKI_PAGE_SIZE. Hence increase the request size by VKI_SHMLBA - VKI_PAGE_SIZE and then round the result up to the next VKI_SHMLBA boundary. See bug 222545 comment 15. So far, arm-linux is the only platform where this is known to be necessary. */ vg_assert(VKI_SHMLBA >= VKI_PAGE_SIZE); if (VKI_SHMLBA > VKI_PAGE_SIZE) { segmentSize += VKI_SHMLBA - VKI_PAGE_SIZE; } tmp = VG_(am_get_advisory_client_simple)(0, segmentSize, &ok); if (ok) { if (VKI_SHMLBA > VKI_PAGE_SIZE) { arg1 = VG_ROUNDUP(tmp, VKI_SHMLBA); } else { arg1 = tmp; } } } else if (!ML_(valid_client_addr)(arg1, segmentSize, tid, "shmat")) arg1 = 0; return arg1; } void ML_(generic_POST_sys_shmat) ( ThreadId tid, UWord res, UWord arg0, UWord arg1, UWord arg2 ) { SizeT segmentSize = VG_PGROUNDUP(get_shm_size(arg0)); if ( segmentSize > 0 ) { UInt prot = VKI_PROT_READ|VKI_PROT_WRITE; Bool d; if (arg2 & VKI_SHM_RDONLY) prot &= ~VKI_PROT_WRITE; /* It isn't exactly correct to pass 0 for the fd and offset here. The kernel seems to think the corresponding section does have dev/ino numbers: 04e52000-04ec8000 rw-s 00000000 00:06 1966090 /SYSV00000000 (deleted) However there is no obvious way to find them. In order to cope with the discrepancy, aspacem's sync checker omits the dev/ino correspondence check in cases where V does not know the dev/ino. */ d = VG_(am_notify_client_shmat)( res, segmentSize, prot ); /* we don't distinguish whether it's read-only or * read-write -- it doesn't matter really. */ VG_TRACK( new_mem_mmap, res, segmentSize, True, True, False, 0/*di_handle*/ ); if (d) VG_(discard_translations)( (Addr)res, (ULong)VG_PGROUNDUP(segmentSize), "ML_(generic_POST_sys_shmat)" ); } } /* ------ */ Bool ML_(generic_PRE_sys_shmdt) ( ThreadId tid, UWord arg0 ) { /* int shmdt(const void *shmaddr); */ return ML_(valid_client_addr)(arg0, 1, tid, "shmdt"); } void ML_(generic_POST_sys_shmdt) ( ThreadId tid, UWord res, UWord arg0 ) { NSegment const* s = VG_(am_find_nsegment)(arg0); if (s != NULL) { Addr s_start = s->start; SizeT s_len = s->end+1 - s->start; Bool d; vg_assert(s->kind == SkShmC); vg_assert(s->start == arg0); d = VG_(am_notify_munmap)(s_start, s_len); s = NULL; /* s is now invalid */ VG_TRACK( die_mem_munmap, s_start, s_len ); if (d) VG_(discard_translations)( s_start, (ULong)s_len, "ML_(generic_POST_sys_shmdt)" ); } } /* ------ */ void ML_(generic_PRE_sys_shmctl) ( ThreadId tid, UWord arg0, UWord arg1, UWord arg2 ) { /* int shmctl(int shmid, int cmd, struct shmid_ds *buf); */ switch (arg1 /* cmd */) { #if defined(VKI_IPC_INFO) case VKI_IPC_INFO: PRE_MEM_WRITE( "shmctl(IPC_INFO, buf)", arg2, sizeof(struct vki_shminfo) ); break; #if defined(VKI_IPC_64) case VKI_IPC_INFO|VKI_IPC_64: PRE_MEM_WRITE( "shmctl(IPC_INFO, buf)", arg2, sizeof(struct vki_shminfo64) ); break; #endif #endif #if defined(VKI_SHM_INFO) case VKI_SHM_INFO: #if defined(VKI_IPC_64) case VKI_SHM_INFO|VKI_IPC_64: #endif PRE_MEM_WRITE( "shmctl(SHM_INFO, buf)", arg2, sizeof(struct vki_shm_info) ); break; #endif case VKI_IPC_STAT: #if defined(VKI_SHM_STAT) case VKI_SHM_STAT: #endif PRE_MEM_WRITE( "shmctl(IPC_STAT, buf)", arg2, sizeof(struct vki_shmid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_STAT|VKI_IPC_64: case VKI_SHM_STAT|VKI_IPC_64: PRE_MEM_WRITE( "shmctl(IPC_STAT, arg.buf)", arg2, sizeof(struct vki_shmid64_ds) ); break; #endif case VKI_IPC_SET: PRE_MEM_READ( "shmctl(IPC_SET, arg.buf)", arg2, sizeof(struct vki_shmid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_SET|VKI_IPC_64: PRE_MEM_READ( "shmctl(IPC_SET, arg.buf)", arg2, sizeof(struct vki_shmid64_ds) ); break; #endif } } void ML_(generic_POST_sys_shmctl) ( ThreadId tid, UWord res, UWord arg0, UWord arg1, UWord arg2 ) { switch (arg1 /* cmd */) { #if defined(VKI_IPC_INFO) case VKI_IPC_INFO: POST_MEM_WRITE( arg2, sizeof(struct vki_shminfo) ); break; case VKI_IPC_INFO|VKI_IPC_64: POST_MEM_WRITE( arg2, sizeof(struct vki_shminfo64) ); break; #endif #if defined(VKI_SHM_INFO) case VKI_SHM_INFO: case VKI_SHM_INFO|VKI_IPC_64: POST_MEM_WRITE( arg2, sizeof(struct vki_shm_info) ); break; #endif case VKI_IPC_STAT: #if defined(VKI_SHM_STAT) case VKI_SHM_STAT: #endif POST_MEM_WRITE( arg2, sizeof(struct vki_shmid_ds) ); break; #if defined(VKI_IPC_64) case VKI_IPC_STAT|VKI_IPC_64: case VKI_SHM_STAT|VKI_IPC_64: POST_MEM_WRITE( arg2, sizeof(struct vki_shmid64_ds) ); break; #endif } } /* --------------------------------------------------------------------- Generic handler for mmap ------------------------------------------------------------------ */ /* * Although mmap is specified by POSIX and the argument are generally * consistent across platforms the precise details of the low level * argument passing conventions differ. For example: * * - On x86-linux there is mmap (aka old_mmap) which takes the * arguments in a memory block and the offset in bytes; and * mmap2 (aka sys_mmap2) which takes the arguments in the normal * way and the offset in pages. * * - On ppc32-linux there is mmap (aka sys_mmap) which takes the * arguments in the normal way and the offset in bytes; and * mmap2 (aka sys_mmap2) which takes the arguments in the normal * way and the offset in pages. * * - On amd64-linux everything is simple and there is just the one * call, mmap (aka sys_mmap) which takes the arguments in the * normal way and the offset in bytes. * * - On s390x-linux there is mmap (aka old_mmap) which takes the * arguments in a memory block and the offset in bytes. mmap2 * is also available (but not exported via unistd.h) with * arguments in a memory block and the offset in pages. * * To cope with all this we provide a generic handler function here * and then each platform implements one or more system call handlers * which call this generic routine after extracting and normalising * the arguments. */ SysRes ML_(generic_PRE_sys_mmap) ( ThreadId tid, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5, Off64T arg6 ) { Addr advised; SysRes sres; MapRequest mreq; Bool mreq_ok; # if defined(VGO_darwin) // Nb: we can't use this on Darwin, it has races: // * needs to RETRY if advisory succeeds but map fails // (could have been some other thread in a nonblocking call) // * needs to not use fixed-position mmap() on Darwin // (mmap will cheerfully smash whatever's already there, which might // be a new mapping from some other thread in a nonblocking call) VG_(core_panic)("can't use ML_(generic_PRE_sys_mmap) on Darwin"); # endif if (arg2 == 0) { /* SuSV3 says: If len is zero, mmap() shall fail and no mapping shall be established. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg1)) { /* zap any misaligned addresses. */ /* SuSV3 says misaligned addresses only cause the MAP_FIXED case to fail. Here, we catch them all. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg6)) { /* zap any misaligned offsets. */ /* SuSV3 says: The off argument is constrained to be aligned and sized according to the value returned by sysconf() when passed _SC_PAGESIZE or _SC_PAGE_SIZE. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* Figure out what kind of allocation constraints there are (fixed/hint/any), and ask aspacem what we should do. */ mreq.start = arg1; mreq.len = arg2; if (arg4 & VKI_MAP_FIXED) { mreq.rkind = MFixed; } else #if defined(VKI_MAP_ALIGN) /* Solaris specific */ if (arg4 & VKI_MAP_ALIGN) { mreq.rkind = MAlign; if (mreq.start == 0) { mreq.start = VKI_PAGE_SIZE; } /* VKI_MAP_FIXED and VKI_MAP_ALIGN don't like each other. */ arg4 &= ~VKI_MAP_ALIGN; } else #endif if (arg1 != 0) { mreq.rkind = MHint; } else { mreq.rkind = MAny; } /* Enquire ... */ advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } # if defined(VKI_MAP_32BIT) /* MAP_32BIT is royally unportable, so if the client asks for it, try our best to make it work (but without complexifying aspacemgr). If the user requested MAP_32BIT, the mmap-ed space must be in the first 2GB of the address space. So, return ENOMEM if aspacemgr advisory is above the first 2GB. If MAP_FIXED is also requested, MAP_32BIT has to be ignored. Assumption about aspacemgr behaviour: aspacemgr scans the address space from low addresses to find a free segment. No special effort is done to keep the first 2GB 'free' for this MAP_32BIT. So, this will often fail once the program has already allocated significant memory. */ if ((arg4 & VKI_MAP_32BIT) && !(arg4 & VKI_MAP_FIXED)) { if (advised + arg2 >= 0x80000000) return VG_(mk_SysRes_Error)( VKI_ENOMEM ); } # endif /* Otherwise we're OK (so far). Install aspacem's choice of address, and let the mmap go through. */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); # if defined(VKI_MAP_32BIT) /* No recovery trial if the advisory was not accepted. */ if ((arg4 & VKI_MAP_32BIT) && !(arg4 & VKI_MAP_FIXED) && sr_isError(sres)) { return VG_(mk_SysRes_Error)( VKI_ENOMEM ); } # endif /* A refinement: it may be that the kernel refused aspacem's choice of address. If we were originally asked for a hinted mapping, there is still a last chance: try again at any address. Hence: */ if (mreq.rkind == MHint && sr_isError(sres)) { mreq.start = 0; mreq.len = arg2; mreq.rkind = MAny; advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* and try again with the kernel */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); } /* Yet another refinement : sometimes valgrind chooses an address which is not acceptable by the kernel. This at least happens when mmap-ing huge pages, using the flag MAP_HUGETLB. valgrind aspacem does not know about huge pages, and modifying it to handle huge pages is not straightforward (e.g. need to understand special file system mount options). So, let's just redo an mmap, without giving any constraint to the kernel. If that succeeds, check with aspacem that the returned address is acceptable. This will give a similar effect as if the user would have hinted that address. The aspacem state will be correctly updated afterwards. We however cannot do this last refinement when the user asked for a fixed mapping, as the user asked a specific address. */ if (sr_isError(sres) && !(arg4 & VKI_MAP_FIXED)) { advised = 0; /* try mmap with NULL address and without VKI_MAP_FIXED to let the kernel decide. */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4, arg5, arg6); if (!sr_isError(sres)) { /* The kernel is supposed to know what it is doing, but let's do a last sanity check anyway, as if the chosen address had been initially hinted by the client. The whole point of this last try was to allow mmap of huge pages to succeed without making aspacem understand them, on the other hand the kernel does not know about valgrind reservations, so this mapping can end up in free space and reservations. */ mreq.start = (Addr)sr_Res(sres); mreq.len = arg2; mreq.rkind = MHint; advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); vg_assert(mreq_ok && advised == mreq.start); } } if (!sr_isError(sres)) { ULong di_handle; /* Notify aspacem. */ notify_core_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ arg4, /* the original flags value */ arg5, /* fd */ arg6 /* offset */ ); /* Load symbols? */ di_handle = VG_(di_notify_mmap)( (Addr)sr_Res(sres), False/*allow_SkFileV*/, (Int)arg5 ); /* Notify the tool. */ notify_tool_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ di_handle /* so the tool can refer to the read debuginfo later, if it wants. */ ); } /* Stay sane */ if (!sr_isError(sres) && (arg4 & VKI_MAP_FIXED)) vg_assert(sr_Res(sres) == arg1); return sres; } /* --------------------------------------------------------------------- The Main Entertainment ... syscall wrappers ------------------------------------------------------------------ */ /* Note: the PRE() and POST() wrappers are for the actual functions implementing the system calls in the OS kernel. These mostly have names like sys_write(); a few have names like old_mmap(). See the comment for ML_(syscall_table)[] for important info about the __NR_foo constants and their relationship to the sys_foo() functions. Some notes about names used for syscalls and args: - For the --trace-syscalls=yes output, we use the sys_foo() name to avoid ambiguity. - For error messages, we generally use a somewhat generic name for the syscall (eg. "write" rather than "sys_write"). This should be good enough for the average user to understand what is happening, without confusing them with names like "sys_write". - Also, for error messages the arg names are mostly taken from the man pages (even though many of those man pages are really for glibc functions of the same name), rather than from the OS kernel source, for the same reason -- a user presented with a "bogus foo(bar)" arg will most likely look at the "foo" man page to see which is the "bar" arg. Note that we use our own vki_* types. The one exception is in PRE_REG_READn calls, where pointer types haven't been changed, because they don't need to be -- eg. for "foo*" to be used, the type foo need not be visible. XXX: some of these are arch-specific, and should be factored out. */ #define PRE(name) DEFN_PRE_TEMPLATE(generic, name) #define POST(name) DEFN_POST_TEMPLATE(generic, name) // Macros to support 64-bit syscall args split into two 32 bit values #if defined(VG_LITTLEENDIAN) #define MERGE64(lo,hi) ( ((ULong)(lo)) | (((ULong)(hi)) << 32) ) #define MERGE64_FIRST(name) name##_low #define MERGE64_SECOND(name) name##_high #elif defined(VG_BIGENDIAN) #define MERGE64(hi,lo) ( ((ULong)(lo)) | (((ULong)(hi)) << 32) ) #define MERGE64_FIRST(name) name##_high #define MERGE64_SECOND(name) name##_low #else #error Unknown endianness #endif PRE(sys_exit) { ThreadState* tst; /* simple; just make this thread exit */ PRINT("exit( %ld )", SARG1); PRE_REG_READ1(void, "exit", int, status); tst = VG_(get_ThreadState)(tid); /* Set the thread's status to be exiting, then claim that the syscall succeeded. */ tst->exitreason = VgSrc_ExitThread; tst->os_state.exitcode = ARG1; SET_STATUS_Success(0); } PRE(sys_ni_syscall) { PRINT("unimplemented (by the kernel) syscall: %s! (ni_syscall)\n", VG_SYSNUM_STRING(SYSNO)); PRE_REG_READ0(long, "ni_syscall"); SET_STATUS_Failure( VKI_ENOSYS ); } PRE(sys_iopl) { PRINT("sys_iopl ( %lu )", ARG1); PRE_REG_READ1(long, "iopl", unsigned long, level); } PRE(sys_fsync) { *flags |= SfMayBlock; PRINT("sys_fsync ( %lu )", ARG1); PRE_REG_READ1(long, "fsync", unsigned int, fd); } PRE(sys_fdatasync) { *flags |= SfMayBlock; PRINT("sys_fdatasync ( %lu )", ARG1); PRE_REG_READ1(long, "fdatasync", unsigned int, fd); } PRE(sys_msync) { *flags |= SfMayBlock; PRINT("sys_msync ( %#lx, %lu, %#lx )", ARG1, ARG2, ARG3); PRE_REG_READ3(long, "msync", unsigned long, start, vki_size_t, length, int, flags); PRE_MEM_READ( "msync(start)", ARG1, ARG2 ); } // Nb: getpmsg() and putpmsg() are special additional syscalls used in early // versions of LiS (Linux Streams). They are not part of the kernel. // Therefore, we have to provide this type ourself, rather than getting it // from the kernel sources. struct vki_pmsg_strbuf { int maxlen; /* no. of bytes in buffer */ int len; /* no. of bytes returned */ vki_caddr_t buf; /* pointer to data */ }; PRE(sys_getpmsg) { /* LiS getpmsg from http://www.gcom.com/home/linux/lis/ */ struct vki_pmsg_strbuf *ctrl; struct vki_pmsg_strbuf *data; *flags |= SfMayBlock; PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4, ARG5); PRE_REG_READ5(int, "getpmsg", int, fd, struct strbuf *, ctrl, struct strbuf *, data, int *, bandp, int *, flagsp); ctrl = (struct vki_pmsg_strbuf *)ARG2; data = (struct vki_pmsg_strbuf *)ARG3; if (ctrl && ctrl->maxlen > 0) PRE_MEM_WRITE( "getpmsg(ctrl)", (Addr)ctrl->buf, ctrl->maxlen); if (data && data->maxlen > 0) PRE_MEM_WRITE( "getpmsg(data)", (Addr)data->buf, data->maxlen); if (ARG4) PRE_MEM_WRITE( "getpmsg(bandp)", (Addr)ARG4, sizeof(int)); if (ARG5) PRE_MEM_WRITE( "getpmsg(flagsp)", (Addr)ARG5, sizeof(int)); } POST(sys_getpmsg) { struct vki_pmsg_strbuf *ctrl; struct vki_pmsg_strbuf *data; vg_assert(SUCCESS); ctrl = (struct vki_pmsg_strbuf *)ARG2; data = (struct vki_pmsg_strbuf *)ARG3; if (RES == 0 && ctrl && ctrl->len > 0) { POST_MEM_WRITE( (Addr)ctrl->buf, ctrl->len); } if (RES == 0 && data && data->len > 0) { POST_MEM_WRITE( (Addr)data->buf, data->len); } } PRE(sys_putpmsg) { /* LiS putpmsg from http://www.gcom.com/home/linux/lis/ */ struct vki_pmsg_strbuf *ctrl; struct vki_pmsg_strbuf *data; *flags |= SfMayBlock; PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4, SARG5); PRE_REG_READ5(int, "putpmsg", int, fd, struct strbuf *, ctrl, struct strbuf *, data, int, band, int, flags); ctrl = (struct vki_pmsg_strbuf *)ARG2; data = (struct vki_pmsg_strbuf *)ARG3; if (ctrl && ctrl->len > 0) PRE_MEM_READ( "putpmsg(ctrl)", (Addr)ctrl->buf, ctrl->len); if (data && data->len > 0) PRE_MEM_READ( "putpmsg(data)", (Addr)data->buf, data->len); } PRE(sys_getitimer) { struct vki_itimerval *value = (struct vki_itimerval*)ARG2; PRINT("sys_getitimer ( %ld, %#lx )", SARG1, ARG2); PRE_REG_READ2(long, "getitimer", int, which, struct itimerval *, value); PRE_timeval_WRITE( "getitimer(&value->it_interval)", &(value->it_interval)); PRE_timeval_WRITE( "getitimer(&value->it_value)", &(value->it_value)); } POST(sys_getitimer) { if (ARG2 != (Addr)NULL) { struct vki_itimerval *value = (struct vki_itimerval*)ARG2; POST_timeval_WRITE( &(value->it_interval) ); POST_timeval_WRITE( &(value->it_value) ); } } PRE(sys_setitimer) { PRINT("sys_setitimer ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3); PRE_REG_READ3(long, "setitimer", int, which, struct itimerval *, value, struct itimerval *, ovalue); if (ARG2 != (Addr)NULL) { struct vki_itimerval *value = (struct vki_itimerval*)ARG2; PRE_timeval_READ( "setitimer(&value->it_interval)", &(value->it_interval)); PRE_timeval_READ( "setitimer(&value->it_value)", &(value->it_value)); } if (ARG3 != (Addr)NULL) { struct vki_itimerval *ovalue = (struct vki_itimerval*)ARG3; PRE_timeval_WRITE( "setitimer(&ovalue->it_interval)", &(ovalue->it_interval)); PRE_timeval_WRITE( "setitimer(&ovalue->it_value)", &(ovalue->it_value)); } } POST(sys_setitimer) { if (ARG3 != (Addr)NULL) { struct vki_itimerval *ovalue = (struct vki_itimerval*)ARG3; POST_timeval_WRITE( &(ovalue->it_interval) ); POST_timeval_WRITE( &(ovalue->it_value) ); } } PRE(sys_chroot) { PRINT("sys_chroot ( %#lx )", ARG1); PRE_REG_READ1(long, "chroot", const char *, path); PRE_MEM_RASCIIZ( "chroot(path)", ARG1 ); } PRE(sys_madvise) { *flags |= SfMayBlock; PRINT("sys_madvise ( %#lx, %lu, %ld )", ARG1, ARG2, SARG3); PRE_REG_READ3(long, "madvise", unsigned long, start, vki_size_t, length, int, advice); } #if HAVE_MREMAP PRE(sys_mremap) { // Nb: this is different to the glibc version described in the man pages, // which lacks the fifth 'new_address' argument. if (ARG4 & VKI_MREMAP_FIXED) { PRINT("sys_mremap ( %#lx, %lu, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4, ARG5); PRE_REG_READ5(unsigned long, "mremap", unsigned long, old_addr, unsigned long, old_size, unsigned long, new_size, unsigned long, flags, unsigned long, new_addr); } else { PRINT("sys_mremap ( %#lx, %lu, %lu, 0x%lx )", ARG1, ARG2, ARG3, ARG4); PRE_REG_READ4(unsigned long, "mremap", unsigned long, old_addr, unsigned long, old_size, unsigned long, new_size, unsigned long, flags); } SET_STATUS_from_SysRes( do_mremap((Addr)ARG1, ARG2, (Addr)ARG5, ARG3, ARG4, tid) ); } #endif /* HAVE_MREMAP */ PRE(sys_nice) { PRINT("sys_nice ( %ld )", SARG1); PRE_REG_READ1(long, "nice", int, inc); } PRE(sys_mlock) { *flags |= SfMayBlock; PRINT("sys_mlock ( %#lx, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "mlock", unsigned long, addr, vki_size_t, len); } PRE(sys_munlock) { *flags |= SfMayBlock; PRINT("sys_munlock ( %#lx, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "munlock", unsigned long, addr, vki_size_t, len); } PRE(sys_mlockall) { *flags |= SfMayBlock; PRINT("sys_mlockall ( %lx )", ARG1); PRE_REG_READ1(long, "mlockall", int, flags); } PRE(sys_setpriority) { PRINT("sys_setpriority ( %ld, %ld, %ld )", SARG1, SARG2, SARG3); PRE_REG_READ3(long, "setpriority", int, which, int, who, int, prio); } PRE(sys_getpriority) { PRINT("sys_getpriority ( %ld, %ld )", SARG1, SARG2); PRE_REG_READ2(long, "getpriority", int, which, int, who); } PRE(sys_pwrite64) { *flags |= SfMayBlock; #if VG_WORDSIZE == 4 PRINT("sys_pwrite64 ( %lu, %#lx, %lu, %lld )", ARG1, ARG2, ARG3, (Long)MERGE64(ARG4,ARG5)); PRE_REG_READ5(ssize_t, "pwrite64", unsigned int, fd, const char *, buf, vki_size_t, count, vki_u32, MERGE64_FIRST(offset), vki_u32, MERGE64_SECOND(offset)); #elif VG_WORDSIZE == 8 PRINT("sys_pwrite64 ( %lu, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, SARG4); PRE_REG_READ4(ssize_t, "pwrite64", unsigned int, fd, const char *, buf, vki_size_t, count, Word, offset); #else # error Unexpected word size #endif PRE_MEM_READ( "pwrite64(buf)", ARG2, ARG3 ); } PRE(sys_sync) { *flags |= SfMayBlock; PRINT("sys_sync ( )"); PRE_REG_READ0(long, "sync"); } PRE(sys_fstatfs) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_fstatfs ( %lu, %#lx )", ARG1, ARG2); PRE_REG_READ2(long, "fstatfs", unsigned int, fd, struct statfs *, buf); PRE_MEM_WRITE( "fstatfs(buf)", ARG2, sizeof(struct vki_statfs) ); } POST(sys_fstatfs) { POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs) ); } PRE(sys_fstatfs64) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_fstatfs64 ( %lu, %lu, %#lx )", ARG1, ARG2, ARG3); PRE_REG_READ3(long, "fstatfs64", unsigned int, fd, vki_size_t, size, struct statfs64 *, buf); PRE_MEM_WRITE( "fstatfs64(buf)", ARG3, ARG2 ); } POST(sys_fstatfs64) { POST_MEM_WRITE( ARG3, ARG2 ); } PRE(sys_getsid) { PRINT("sys_getsid ( %ld )", SARG1); PRE_REG_READ1(long, "getsid", vki_pid_t, pid); } PRE(sys_pread64) { *flags |= SfMayBlock; #if VG_WORDSIZE == 4 PRINT("sys_pread64 ( %lu, %#lx, %lu, %lld )", ARG1, ARG2, ARG3, (Long)MERGE64(ARG4,ARG5)); PRE_REG_READ5(ssize_t, "pread64", unsigned int, fd, char *, buf, vki_size_t, count, vki_u32, MERGE64_FIRST(offset), vki_u32, MERGE64_SECOND(offset)); #elif VG_WORDSIZE == 8 PRINT("sys_pread64 ( %lu, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, SARG4); PRE_REG_READ4(ssize_t, "pread64", unsigned int, fd, char *, buf, vki_size_t, count, Word, offset); #else # error Unexpected word size #endif PRE_MEM_WRITE( "pread64(buf)", ARG2, ARG3 ); } POST(sys_pread64) { vg_assert(SUCCESS); if (RES > 0) { POST_MEM_WRITE( ARG2, RES ); } } PRE(sys_mknod) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_mknod ( %#lx(%s), %#lx, %#lx )", ARG1, (HChar*)ARG1, ARG2, ARG3 ); PRE_REG_READ3(long, "mknod", const char *, pathname, int, mode, unsigned, dev); PRE_MEM_RASCIIZ( "mknod(pathname)", ARG1 ); } PRE(sys_flock) { *flags |= SfMayBlock; PRINT("sys_flock ( %lu, %lu )", ARG1, ARG2 ); PRE_REG_READ2(long, "flock", unsigned int, fd, unsigned int, operation); } // Pre_read a char** argument. void ML_(pre_argv_envp)(Addr a, ThreadId tid, const HChar *s1, const HChar *s2) { while (True) { Addr a_deref; Addr* a_p = (Addr*)a; PRE_MEM_READ( s1, (Addr)a_p, sizeof(Addr) ); a_deref = *a_p; if (0 == a_deref) break; PRE_MEM_RASCIIZ( s2, a_deref ); a += sizeof(char*); } } static Bool i_am_the_only_thread ( void ) { Int c = VG_(count_living_threads)(); vg_assert(c >= 1); /* stay sane */ return c == 1; } /* Wait until all other threads disappear. */ void VG_(reap_threads)(ThreadId self) { while (!i_am_the_only_thread()) { /* Let other thread(s) run */ VG_(vg_yield)(); VG_(poll_signals)(self); } vg_assert(i_am_the_only_thread()); } // XXX: prototype here seemingly doesn't match the prototype for i386-linux, // but it seems to work nonetheless... PRE(sys_execve) { HChar* path = NULL; /* path to executable */ HChar** envp = NULL; HChar** argv = NULL; HChar** arg2copy; HChar* launcher_basename = NULL; ThreadState* tst; Int i, j, tot_args; SysRes res; Bool setuid_allowed, trace_this_child; PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )", ARG1, (char*)ARG1, ARG2, ARG3); PRE_REG_READ3(vki_off_t, "execve", char *, filename, char **, argv, char **, envp); PRE_MEM_RASCIIZ( "execve(filename)", ARG1 ); if (ARG2 != 0) ML_(pre_argv_envp)( ARG2, tid, "execve(argv)", "execve(argv[i])" ); if (ARG3 != 0) ML_(pre_argv_envp)( ARG3, tid, "execve(envp)", "execve(envp[i])" ); vg_assert(VG_(is_valid_tid)(tid)); tst = VG_(get_ThreadState)(tid); /* Erk. If the exec fails, then the following will have made a mess of things which makes it hard for us to continue. The right thing to do is piece everything together again in POST(execve), but that's close to impossible. Instead, we make an effort to check that the execve will work before actually doing it. */ /* Check that the name at least begins in client-accessible storage. */ if (ARG1 == 0 /* obviously bogus */ || !VG_(am_is_valid_for_client)( ARG1, 1, VKI_PROT_READ )) { SET_STATUS_Failure( VKI_EFAULT ); return; } // debug-only printing if (0) { VG_(printf)("ARG1 = %p(%s)\n", (void*)ARG1, (HChar*)ARG1); if (ARG2) { VG_(printf)("ARG2 = "); Int q; HChar** vec = (HChar**)ARG2; for (q = 0; vec[q]; q++) VG_(printf)("%p(%s) ", vec[q], vec[q]); VG_(printf)("\n"); } else { VG_(printf)("ARG2 = null\n"); } } // Decide whether or not we want to follow along { // Make 'child_argv' be a pointer to the child's arg vector // (skipping the exe name) const HChar** child_argv = (const HChar**)ARG2; if (child_argv && child_argv[0] == NULL) child_argv = NULL; trace_this_child = VG_(should_we_trace_this_child)( (HChar*)ARG1, child_argv ); } // Do the important checks: it is a file, is executable, permissions are // ok, etc. We allow setuid executables to run only in the case when // we are not simulating them, that is, they to be run natively. setuid_allowed = trace_this_child ? False : True; res = VG_(pre_exec_check)((const HChar *)ARG1, NULL, setuid_allowed); if (sr_isError(res)) { SET_STATUS_Failure( sr_Err(res) ); return; } /* If we're tracing the child, and the launcher name looks bogus (possibly because launcher.c couldn't figure it out, see comments therein) then we have no option but to fail. */ if (trace_this_child && (VG_(name_of_launcher) == NULL || VG_(name_of_launcher)[0] != '/')) { SET_STATUS_Failure( VKI_ECHILD ); /* "No child processes" */ return; } /* After this point, we can't recover if the execve fails. */ VG_(debugLog)(1, "syswrap", "Exec of %s\n", (HChar*)ARG1); // Terminate gdbserver if it is active. if (VG_(clo_vgdb) != Vg_VgdbNo) { // If the child will not be traced, we need to terminate gdbserver // to cleanup the gdbserver resources (e.g. the FIFO files). // If child will be traced, we also terminate gdbserver: the new // Valgrind will start a fresh gdbserver after exec. VG_(gdbserver) (0); } /* Resistance is futile. Nuke all other threads. POSIX mandates this. (Really, nuke them all, since the new process will make its own new thread.) */ VG_(nuke_all_threads_except)( tid, VgSrc_ExitThread ); VG_(reap_threads)(tid); // Set up the child's exe path. // if (trace_this_child) { // We want to exec the launcher. Get its pre-remembered path. path = VG_(name_of_launcher); // VG_(name_of_launcher) should have been acquired by m_main at // startup. vg_assert(path); launcher_basename = VG_(strrchr)(path, '/'); if (launcher_basename == NULL || launcher_basename[1] == 0) { launcher_basename = path; // hmm, tres dubious } else { launcher_basename++; } } else { path = (HChar*)ARG1; } // Set up the child's environment. // // Remove the valgrind-specific stuff from the environment so the // child doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. // This is done unconditionally, since if we are tracing the child, // the child valgrind will set up the appropriate client environment. // Nb: we make a copy of the environment before trying to mangle it // as it might be in read-only memory (this was bug #101881). // // Then, if tracing the child, set VALGRIND_LIB for it. // if (ARG3 == 0) { envp = NULL; } else { envp = VG_(env_clone)( (HChar**)ARG3 ); if (envp == NULL) goto hosed; VG_(env_remove_valgrind_env_stuff)( envp, True /*ro_strings*/, NULL ); } if (trace_this_child) { // Set VALGRIND_LIB in ARG3 (the environment) VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir)); } // Set up the child's args. If not tracing it, they are // simply ARG2. Otherwise, they are // // [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..] // // except that the first VG_(args_for_valgrind_noexecpass) args // are omitted. // if (!trace_this_child) { argv = (HChar**)ARG2; } else { vg_assert( VG_(args_for_valgrind) ); vg_assert( VG_(args_for_valgrind_noexecpass) >= 0 ); vg_assert( VG_(args_for_valgrind_noexecpass) <= VG_(sizeXA)( VG_(args_for_valgrind) ) ); /* how many args in total will there be? */ // launcher basename tot_args = 1; // V's args tot_args += VG_(sizeXA)( VG_(args_for_valgrind) ); tot_args -= VG_(args_for_valgrind_noexecpass); // name of client exe tot_args++; // args for client exe, skipping [0] arg2copy = (HChar**)ARG2; if (arg2copy && arg2copy[0]) { for (i = 1; arg2copy[i]; i++) tot_args++; } // allocate argv = VG_(malloc)( "di.syswrap.pre_sys_execve.1", (tot_args+1) * sizeof(HChar*) ); // copy j = 0; argv[j++] = launcher_basename; for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++) { if (i < VG_(args_for_valgrind_noexecpass)) continue; argv[j++] = * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i ); } argv[j++] = (HChar*)ARG1; if (arg2copy && arg2copy[0]) for (i = 1; arg2copy[i]; i++) argv[j++] = arg2copy[i]; argv[j++] = NULL; // check vg_assert(j == tot_args+1); } /* restore the DATA rlimit for the child */ VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data)); /* Set the signal state up for exec. We need to set the real signal state to make sure the exec'd process gets SIG_IGN properly. Also set our real sigmask to match the client's sigmask so that the exec'd child will get the right mask. First we need to clear out any pending signals so they they don't get delivered, which would confuse things. XXX This is a bug - the signals should remain pending, and be delivered to the new process after exec. There's also a race-condition, since if someone delivers us a signal between the sigprocmask and the execve, we'll still get the signal. Oh well. */ { vki_sigset_t allsigs; vki_siginfo_t info; /* What this loop does: it queries SCSS (the signal state that the client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction), and modifies the real kernel signal state accordingly. */ for (i = 1; i < VG_(max_signal); i++) { vki_sigaction_fromK_t sa_f; vki_sigaction_toK_t sa_t; VG_(do_sys_sigaction)(i, NULL, &sa_f); VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t); if (sa_t.ksa_handler == VKI_SIG_IGN) VG_(sigaction)(i, &sa_t, NULL); else { sa_t.ksa_handler = VKI_SIG_DFL; VG_(sigaction)(i, &sa_t, NULL); } } VG_(sigfillset)(&allsigs); while(VG_(sigtimedwait_zero)(&allsigs, &info) > 0) ; VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL); } if (0) { HChar **cpp; VG_(printf)("exec: %s\n", path); for (cpp = argv; cpp && *cpp; cpp++) VG_(printf)("argv: %s\n", *cpp); if (0) for (cpp = envp; cpp && *cpp; cpp++) VG_(printf)("env: %s\n", *cpp); } SET_STATUS_from_SysRes( VG_(do_syscall3)(__NR_execve, (UWord)path, (UWord)argv, (UWord)envp) ); /* If we got here, then the execve failed. We've already made way too much of a mess to continue, so we have to abort. */ hosed: vg_assert(FAILURE); VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %lu\n", ARG1, (HChar*)ARG1, ARG2, ARG3, ERR); VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from " "execve() failing, so I'm dying.\n"); VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), " "or work out how to recover.\n"); VG_(exit)(101); } PRE(sys_access) { PRINT("sys_access ( %#lx(%s), %ld )", ARG1, (HChar*)ARG1, SARG2); PRE_REG_READ2(long, "access", const char *, pathname, int, mode); PRE_MEM_RASCIIZ( "access(pathname)", ARG1 ); } PRE(sys_alarm) { PRINT("sys_alarm ( %lu )", ARG1); PRE_REG_READ1(unsigned long, "alarm", unsigned int, seconds); } PRE(sys_brk) { Addr brk_limit = VG_(brk_limit); Addr brk_new; /* libc says: int brk(void *end_data_segment); kernel says: void* brk(void* end_data_segment); (more or less) libc returns 0 on success, and -1 (and sets errno) on failure. Nb: if you ask to shrink the dataseg end below what it currently is, that always succeeds, even if the dataseg end doesn't actually change (eg. brk(0)). Unless it seg faults. Kernel returns the new dataseg end. If the brk() failed, this will be unchanged from the old one. That's why calling (kernel) brk(0) gives the current dataseg end (libc brk() just returns zero in that case). Both will seg fault if you shrink it back into a text segment. */ PRINT("sys_brk ( %#lx )", ARG1); PRE_REG_READ1(unsigned long, "brk", unsigned long, end_data_segment); brk_new = do_brk(ARG1, tid); SET_STATUS_Success( brk_new ); if (brk_new == ARG1) { /* brk() succeeded */ if (brk_new < brk_limit) { /* successfully shrunk the data segment. */ VG_TRACK( die_mem_brk, (Addr)ARG1, brk_limit-ARG1 ); } else if (brk_new > brk_limit) { /* successfully grew the data segment */ VG_TRACK( new_mem_brk, brk_limit, ARG1-brk_limit, tid ); } } else { /* brk() failed */ vg_assert(brk_limit == brk_new); } } PRE(sys_chdir) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_chdir ( %#lx(%s) )", ARG1,(char*)ARG1); PRE_REG_READ1(long, "chdir", const char *, path); PRE_MEM_RASCIIZ( "chdir(path)", ARG1 ); } PRE(sys_chmod) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_chmod ( %#lx(%s), %lu )", ARG1, (HChar*)ARG1, ARG2); PRE_REG_READ2(long, "chmod", const char *, path, vki_mode_t, mode); PRE_MEM_RASCIIZ( "chmod(path)", ARG1 ); } PRE(sys_chown) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_chown ( %#lx(%s), 0x%lx, 0x%lx )", ARG1,(char*)ARG1,ARG2,ARG3); PRE_REG_READ3(long, "chown", const char *, path, vki_uid_t, owner, vki_gid_t, group); PRE_MEM_RASCIIZ( "chown(path)", ARG1 ); } PRE(sys_lchown) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_lchown ( %#lx(%s), 0x%lx, 0x%lx )", ARG1,(char*)ARG1,ARG2,ARG3); PRE_REG_READ3(long, "lchown", const char *, path, vki_uid_t, owner, vki_gid_t, group); PRE_MEM_RASCIIZ( "lchown(path)", ARG1 ); } PRE(sys_close) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_close ( %lu )", ARG1); PRE_REG_READ1(long, "close", unsigned int, fd); /* Detect and negate attempts by the client to close Valgrind's log fd */ if ( (!ML_(fd_allowed)(ARG1, "close", tid, False)) /* If doing -d style logging (which is to fd=2), don't allow that to be closed either. */ || (ARG1 == 2/*stderr*/ && VG_(debugLog_getLevel)() > 0) ) SET_STATUS_Failure( VKI_EBADF ); } POST(sys_close) { if (VG_(clo_track_fds)) ML_(record_fd_close)(ARG1); } PRE(sys_dup) { PRINT("sys_dup ( %lu )", ARG1); PRE_REG_READ1(long, "dup", unsigned int, oldfd); } POST(sys_dup) { vg_assert(SUCCESS); if (!ML_(fd_allowed)(RES, "dup", tid, True)) { VG_(close)(RES); SET_STATUS_Failure( VKI_EMFILE ); } else { if (VG_(clo_track_fds)) ML_(record_fd_open_named)(tid, RES); } } PRE(sys_dup2) { PRINT("sys_dup2 ( %lu, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "dup2", unsigned int, oldfd, unsigned int, newfd); if (!ML_(fd_allowed)(ARG2, "dup2", tid, True)) SET_STATUS_Failure( VKI_EBADF ); } POST(sys_dup2) { vg_assert(SUCCESS); if (VG_(clo_track_fds)) ML_(record_fd_open_named)(tid, RES); } PRE(sys_fchdir) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_fchdir ( %lu )", ARG1); PRE_REG_READ1(long, "fchdir", unsigned int, fd); } PRE(sys_fchown) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_fchown ( %lu, %lu, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(long, "fchown", unsigned int, fd, vki_uid_t, owner, vki_gid_t, group); } PRE(sys_fchmod) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_fchmod ( %lu, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "fchmod", unsigned int, fildes, vki_mode_t, mode); } PRE(sys_newfstat) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_newfstat ( %lu, %#lx )", ARG1, ARG2); PRE_REG_READ2(long, "fstat", unsigned int, fd, struct stat *, buf); PRE_MEM_WRITE( "fstat(buf)", ARG2, sizeof(struct vki_stat) ); } POST(sys_newfstat) { POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); } #if !defined(VGO_solaris) static vki_sigset_t fork_saved_mask; // In Linux, the sys_fork() function varies across architectures, but we // ignore the various args it gets, and so it looks arch-neutral. Hmm. PRE(sys_fork) { Bool is_child; Int child_pid; vki_sigset_t mask; PRINT("sys_fork ( )"); PRE_REG_READ0(long, "fork"); /* Block all signals during fork, so that we can fix things up in the child without being interrupted. */ VG_(sigfillset)(&mask); VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, &fork_saved_mask); VG_(do_atfork_pre)(tid); SET_STATUS_from_SysRes( VG_(do_syscall0)(__NR_fork) ); if (!SUCCESS) return; #if defined(VGO_linux) // RES is 0 for child, non-0 (the child's PID) for parent. is_child = ( RES == 0 ? True : False ); child_pid = ( is_child ? -1 : RES ); #elif defined(VGO_darwin) // RES is the child's pid. RESHI is 1 for child, 0 for parent. is_child = RESHI; child_pid = RES; #else # error Unknown OS #endif if (is_child) { VG_(do_atfork_child)(tid); /* restore signal mask */ VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL); /* If --child-silent-after-fork=yes was specified, set the output file descriptors to 'impossible' values. This is noticed by send_bytes_to_logging_sink in m_libcprint.c, which duly stops writing any further output. */ if (VG_(clo_child_silent_after_fork)) { if (!VG_(log_output_sink).is_socket) VG_(log_output_sink).fd = -1; if (!VG_(xml_output_sink).is_socket) VG_(xml_output_sink).fd = -1; } } else { VG_(do_atfork_parent)(tid); PRINT(" fork: process %d created child %d\n", VG_(getpid)(), child_pid); /* restore signal mask */ VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL); } } #endif // !defined(VGO_solaris) PRE(sys_ftruncate) { *flags |= SfMayBlock; PRINT("sys_ftruncate ( %lu, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "ftruncate", unsigned int, fd, unsigned long, length); } PRE(sys_truncate) { *flags |= SfMayBlock; PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1, (HChar*)ARG1, ARG2); PRE_REG_READ2(long, "truncate", const char *, path, unsigned long, length); PRE_MEM_RASCIIZ( "truncate(path)", ARG1 ); } PRE(sys_ftruncate64) { *flags |= SfMayBlock; #if VG_WORDSIZE == 4 PRINT("sys_ftruncate64 ( %lu, %llu )", ARG1, MERGE64(ARG2,ARG3)); PRE_REG_READ3(long, "ftruncate64", unsigned int, fd, UWord, MERGE64_FIRST(length), UWord, MERGE64_SECOND(length)); #else PRINT("sys_ftruncate64 ( %lu, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "ftruncate64", unsigned int,fd, UWord,length); #endif } PRE(sys_truncate64) { *flags |= SfMayBlock; #if VG_WORDSIZE == 4 PRINT("sys_truncate64 ( %#lx, %lld )", ARG1, (Long)MERGE64(ARG2, ARG3)); PRE_REG_READ3(long, "truncate64", const char *, path, UWord, MERGE64_FIRST(length), UWord, MERGE64_SECOND(length)); #else PRINT("sys_truncate64 ( %#lx, %lld )", ARG1, (Long)ARG2); PRE_REG_READ2(long, "truncate64", const char *,path, UWord,length); #endif PRE_MEM_RASCIIZ( "truncate64(path)", ARG1 ); } PRE(sys_getdents) { *flags |= SfMayBlock; PRINT("sys_getdents ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(long, "getdents", unsigned int, fd, struct vki_dirent *, dirp, unsigned int, count); PRE_MEM_WRITE( "getdents(dirp)", ARG2, ARG3 ); } POST(sys_getdents) { vg_assert(SUCCESS); if (RES > 0) POST_MEM_WRITE( ARG2, RES ); } PRE(sys_getdents64) { *flags |= SfMayBlock; PRINT("sys_getdents64 ( %lu, %#lx, %lu )",ARG1, ARG2, ARG3); PRE_REG_READ3(long, "getdents64", unsigned int, fd, struct vki_dirent64 *, dirp, unsigned int, count); PRE_MEM_WRITE( "getdents64(dirp)", ARG2, ARG3 ); } POST(sys_getdents64) { vg_assert(SUCCESS); if (RES > 0) POST_MEM_WRITE( ARG2, RES ); } PRE(sys_getgroups) { PRINT("sys_getgroups ( %ld, %#lx )", SARG1, ARG2); PRE_REG_READ2(long, "getgroups", int, size, vki_gid_t *, list); if (ARG1 > 0) PRE_MEM_WRITE( "getgroups(list)", ARG2, ARG1 * sizeof(vki_gid_t) ); } POST(sys_getgroups) { vg_assert(SUCCESS); if (ARG1 > 0 && RES > 0) POST_MEM_WRITE( ARG2, RES * sizeof(vki_gid_t) ); } PRE(sys_getcwd) { // Comment from linux/fs/dcache.c: // NOTE! The user-level library version returns a character pointer. // The kernel system call just returns the length of the buffer filled // (which includes the ending '\0' character), or a negative error // value. // Is this Linux-specific? If so it should be moved to syswrap-linux.c. PRINT("sys_getcwd ( %#lx, %llu )", ARG1,(ULong)ARG2); PRE_REG_READ2(long, "getcwd", char *, buf, unsigned long, size); PRE_MEM_WRITE( "getcwd(buf)", ARG1, ARG2 ); } POST(sys_getcwd) { vg_assert(SUCCESS); if (RES != (Addr)NULL) POST_MEM_WRITE( ARG1, RES ); } PRE(sys_geteuid) { PRINT("sys_geteuid ( )"); PRE_REG_READ0(long, "geteuid"); } PRE(sys_getegid) { PRINT("sys_getegid ( )"); PRE_REG_READ0(long, "getegid"); } PRE(sys_getgid) { PRINT("sys_getgid ( )"); PRE_REG_READ0(long, "getgid"); } PRE(sys_getpid) { PRINT("sys_getpid ()"); PRE_REG_READ0(long, "getpid"); } PRE(sys_getpgid) { PRINT("sys_getpgid ( %ld )", SARG1); PRE_REG_READ1(long, "getpgid", vki_pid_t, pid); } PRE(sys_getpgrp) { PRINT("sys_getpgrp ()"); PRE_REG_READ0(long, "getpgrp"); } PRE(sys_getppid) { PRINT("sys_getppid ()"); PRE_REG_READ0(long, "getppid"); } static void common_post_getrlimit(ThreadId tid, UWord a1, UWord a2) { POST_MEM_WRITE( a2, sizeof(struct vki_rlimit) ); #ifdef _RLIMIT_POSIX_FLAG // Darwin will sometimes set _RLIMIT_POSIX_FLAG on getrlimit calls. // Unset it here to make the switch case below work correctly. a1 &= ~_RLIMIT_POSIX_FLAG; #endif switch (a1) { case VKI_RLIMIT_NOFILE: ((struct vki_rlimit *)a2)->rlim_cur = VG_(fd_soft_limit); ((struct vki_rlimit *)a2)->rlim_max = VG_(fd_hard_limit); break; case VKI_RLIMIT_DATA: *((struct vki_rlimit *)a2) = VG_(client_rlimit_data); break; case VKI_RLIMIT_STACK: *((struct vki_rlimit *)a2) = VG_(client_rlimit_stack); break; } } PRE(sys_old_getrlimit) { PRINT("sys_old_getrlimit ( %lu, %#lx )", ARG1, ARG2); PRE_REG_READ2(long, "old_getrlimit", unsigned int, resource, struct rlimit *, rlim); PRE_MEM_WRITE( "old_getrlimit(rlim)", ARG2, sizeof(struct vki_rlimit) ); } POST(sys_old_getrlimit) { common_post_getrlimit(tid, ARG1, ARG2); } PRE(sys_getrlimit) { PRINT("sys_getrlimit ( %lu, %#lx )", ARG1, ARG2); PRE_REG_READ2(long, "getrlimit", unsigned int, resource, struct rlimit *, rlim); PRE_MEM_WRITE( "getrlimit(rlim)", ARG2, sizeof(struct vki_rlimit) ); } POST(sys_getrlimit) { common_post_getrlimit(tid, ARG1, ARG2); } PRE(sys_getrusage) { PRINT("sys_getrusage ( %ld, %#lx )", SARG1, ARG2); PRE_REG_READ2(long, "getrusage", int, who, struct rusage *, usage); PRE_MEM_WRITE( "getrusage(usage)", ARG2, sizeof(struct vki_rusage) ); } POST(sys_getrusage) { vg_assert(SUCCESS); if (RES == 0) POST_MEM_WRITE( ARG2, sizeof(struct vki_rusage) ); } PRE(sys_gettimeofday) { PRINT("sys_gettimeofday ( %#lx, %#lx )", ARG1,ARG2); PRE_REG_READ2(long, "gettimeofday", struct timeval *, tv, struct timezone *, tz); // GrP fixme does darwin write to *tz anymore? if (ARG1 != 0) PRE_timeval_WRITE( "gettimeofday(tv)", ARG1 ); if (ARG2 != 0) PRE_MEM_WRITE( "gettimeofday(tz)", ARG2, sizeof(struct vki_timezone) ); } POST(sys_gettimeofday) { vg_assert(SUCCESS); if (RES == 0) { if (ARG1 != 0) POST_timeval_WRITE( ARG1 ); if (ARG2 != 0) POST_MEM_WRITE( ARG2, sizeof(struct vki_timezone) ); } } PRE(sys_settimeofday) { PRINT("sys_settimeofday ( %#lx, %#lx )", ARG1,ARG2); PRE_REG_READ2(long, "settimeofday", struct timeval *, tv, struct timezone *, tz); if (ARG1 != 0) PRE_timeval_READ( "settimeofday(tv)", ARG1 ); if (ARG2 != 0) { PRE_MEM_READ( "settimeofday(tz)", ARG2, sizeof(struct vki_timezone) ); /* maybe should warn if tz->tz_dsttime is non-zero? */ } } PRE(sys_getuid) { PRINT("sys_getuid ( )"); PRE_REG_READ0(long, "getuid"); } void ML_(PRE_unknown_ioctl)(ThreadId tid, UWord request, UWord arg) { /* We don't have any specific information on it, so try to do something reasonable based on direction and size bits. The encoding scheme is described in /usr/include/asm/ioctl.h or /usr/include/sys/ioccom.h . According to Simon Hausmann, _IOC_READ means the kernel writes a value to the ioctl value passed from the user space and the other way around with _IOC_WRITE. */ #if defined(VGO_solaris) /* Majority of Solaris ioctl requests does not honour direction hints. */ UInt dir = _VKI_IOC_NONE; #else UInt dir = _VKI_IOC_DIR(request); #endif UInt size = _VKI_IOC_SIZE(request); if (SimHintiS(SimHint_lax_ioctls, VG_(clo_sim_hints))) { /* * Be very lax about ioctl handling; the only * assumption is that the size is correct. Doesn't * require the full buffer to be initialized when * writing. Without this, using some device * drivers with a large number of strange ioctl * commands becomes very tiresome. */ } else if (/* size == 0 || */ dir == _VKI_IOC_NONE) { static UWord unknown_ioctl[10]; static Int moans = sizeof(unknown_ioctl) / sizeof(unknown_ioctl[0]); if (moans > 0 && !VG_(clo_xml)) { /* Check if have not already moaned for this request. */ UInt i; for (i = 0; i < sizeof(unknown_ioctl)/sizeof(unknown_ioctl[0]); i++) { if (unknown_ioctl[i] == request) break; if (unknown_ioctl[i] == 0) { unknown_ioctl[i] = request; moans--; VG_(umsg)("Warning: noted but unhandled ioctl 0x%lx" " with no size/direction hints.\n", request); VG_(umsg)(" This could cause spurious value errors to appear.\n"); VG_(umsg)(" See README_MISSING_SYSCALL_OR_IOCTL for " "guidance on writing a proper wrapper.\n" ); //VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size)); return; } } } } else { //VG_(message)(Vg_UserMsg, "UNKNOWN ioctl %#lx\n", request); //VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size)); if ((dir & _VKI_IOC_WRITE) && size > 0) PRE_MEM_READ( "ioctl(generic)", arg, size); if ((dir & _VKI_IOC_READ) && size > 0) PRE_MEM_WRITE( "ioctl(generic)", arg, size); } } void ML_(POST_unknown_ioctl)(ThreadId tid, UInt res, UWord request, UWord arg) { /* We don't have any specific information on it, so try to do something reasonable based on direction and size bits. The encoding scheme is described in /usr/include/asm/ioctl.h or /usr/include/sys/ioccom.h . According to Simon Hausmann, _IOC_READ means the kernel writes a value to the ioctl value passed from the user space and the other way around with _IOC_WRITE. */ UInt dir = _VKI_IOC_DIR(request); UInt size = _VKI_IOC_SIZE(request); if (size > 0 && (dir & _VKI_IOC_READ) && res == 0 && arg != (Addr)NULL) { POST_MEM_WRITE(arg, size); } } /* If we're sending a SIGKILL to one of our own threads, then simulate it rather than really sending the signal, so that the target thread gets a chance to clean up. Returns True if we did the killing (or no killing is necessary), and False if the caller should use the normal kill syscall. "pid" is any pid argument which can be passed to kill; group kills (< -1, 0), and owner kills (-1) are ignored, on the grounds that they'll most likely hit all the threads and we won't need to worry about cleanup. In truth, we can't fully emulate these multicast kills. "tgid" is a thread group id. If it is not -1, then the target thread must be in that thread group. */ Bool ML_(do_sigkill)(Int pid, Int tgid) { ThreadState *tst; ThreadId tid; if (pid <= 0) return False; tid = VG_(lwpid_to_vgtid)(pid); if (tid == VG_INVALID_THREADID) return False; /* none of our threads */ tst = VG_(get_ThreadState)(tid); if (tst == NULL || tst->status == VgTs_Empty) return False; /* hm, shouldn't happen */ if (tgid != -1 && tst->os_state.threadgroup != tgid) return False; /* not the right thread group */ /* Check to see that the target isn't already exiting. */ if (!VG_(is_exiting)(tid)) { if (VG_(clo_trace_signals)) VG_(message)(Vg_DebugMsg, "Thread %u being killed with SIGKILL\n", tst->tid); tst->exitreason = VgSrc_FatalSig; tst->os_state.fatalsig = VKI_SIGKILL; if (!VG_(is_running_thread)(tid)) VG_(get_thread_out_of_syscall)(tid); } return True; } PRE(sys_kill) { PRINT("sys_kill ( %ld, %ld )", SARG1, SARG2); PRE_REG_READ2(long, "kill", int, pid, int, signal); if (!ML_(client_signal_OK)(ARG2)) { SET_STATUS_Failure( VKI_EINVAL ); return; } /* If we're sending SIGKILL, check to see if the target is one of our threads and handle it specially. */ if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) SET_STATUS_Success(0); else /* re syscall3: Darwin has a 3rd arg, which is a flag (boolean) affecting how posix-compliant the call is. I guess it is harmless to pass the 3rd arg on other platforms; hence pass it on all. */ SET_STATUS_from_SysRes( VG_(do_syscall3)(SYSNO, ARG1, ARG2, ARG3) ); if (VG_(clo_trace_signals)) VG_(message)(Vg_DebugMsg, "kill: sent signal %ld to pid %ld\n", SARG2, SARG1); /* This kill might have given us a pending signal. Ask for a check once the syscall is done. */ *flags |= SfPollAfter; } PRE(sys_link) { *flags |= SfMayBlock; PRINT("sys_link ( %#lx(%s), %#lx(%s) )", ARG1,(char*)ARG1,ARG2,(char*)ARG2); PRE_REG_READ2(long, "link", const char *, oldpath, const char *, newpath); PRE_MEM_RASCIIZ( "link(oldpath)", ARG1); PRE_MEM_RASCIIZ( "link(newpath)", ARG2); } PRE(sys_newlstat) { PRINT("sys_newlstat ( %#lx(%s), %#lx )", ARG1,(char*)ARG1,ARG2); PRE_REG_READ2(long, "lstat", char *, file_name, struct stat *, buf); PRE_MEM_RASCIIZ( "lstat(file_name)", ARG1 ); PRE_MEM_WRITE( "lstat(buf)", ARG2, sizeof(struct vki_stat) ); } POST(sys_newlstat) { vg_assert(SUCCESS); POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); } PRE(sys_mkdir) { *flags |= SfMayBlock; PRINT("sys_mkdir ( %#lx(%s), %ld )", ARG1, (HChar*)ARG1, SARG2); PRE_REG_READ2(long, "mkdir", const char *, pathname, int, mode); PRE_MEM_RASCIIZ( "mkdir(pathname)", ARG1 ); } PRE(sys_mprotect) { PRINT("sys_mprotect ( %#lx, %lu, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(long, "mprotect", unsigned long, addr, vki_size_t, len, unsigned long, prot); if (!ML_(valid_client_addr)(ARG1, ARG2, tid, "mprotect")) { SET_STATUS_Failure( VKI_ENOMEM ); } #if defined(VKI_PROT_GROWSDOWN) else if (ARG3 & (VKI_PROT_GROWSDOWN|VKI_PROT_GROWSUP)) { /* Deal with mprotects on growable stack areas. The critical files to understand all this are mm/mprotect.c in the kernel and sysdeps/unix/sysv/linux/dl-execstack.c in glibc. The kernel provides PROT_GROWSDOWN and PROT_GROWSUP which round the start/end address of mprotect to the start/end of the underlying vma and glibc uses that as an easy way to change the protection of the stack by calling mprotect on the last page of the stack with PROT_GROWSDOWN set. The sanity check provided by the kernel is that the vma must have the VM_GROWSDOWN/VM_GROWSUP flag set as appropriate. */ UInt grows = ARG3 & (VKI_PROT_GROWSDOWN|VKI_PROT_GROWSUP); NSegment const *aseg = VG_(am_find_nsegment)(ARG1); NSegment const *rseg; vg_assert(aseg); if (grows == VKI_PROT_GROWSDOWN) { rseg = VG_(am_next_nsegment)( aseg, False/*backwards*/ ); if (rseg && rseg->kind == SkResvn && rseg->smode == SmUpper && rseg->end+1 == aseg->start) { Addr end = ARG1 + ARG2; ARG1 = aseg->start; ARG2 = end - aseg->start; ARG3 &= ~VKI_PROT_GROWSDOWN; } else { SET_STATUS_Failure( VKI_EINVAL ); } } else if (grows == VKI_PROT_GROWSUP) { rseg = VG_(am_next_nsegment)( aseg, True/*forwards*/ ); if (rseg && rseg->kind == SkResvn && rseg->smode == SmLower && aseg->end+1 == rseg->start) { ARG2 = aseg->end - ARG1 + 1; ARG3 &= ~VKI_PROT_GROWSUP; } else { SET_STATUS_Failure( VKI_EINVAL ); } } else { /* both GROWSUP and GROWSDOWN */ SET_STATUS_Failure( VKI_EINVAL ); } } #endif // defined(VKI_PROT_GROWSDOWN) } POST(sys_mprotect) { Addr a = ARG1; SizeT len = ARG2; Int prot = ARG3; ML_(notify_core_and_tool_of_mprotect)(a, len, prot); } PRE(sys_munmap) { if (0) VG_(printf)(" munmap( %#lx )\n", ARG1); PRINT("sys_munmap ( %#lx, %llu )", ARG1,(ULong)ARG2); PRE_REG_READ2(long, "munmap", unsigned long, start, vki_size_t, length); if (!ML_(valid_client_addr)(ARG1, ARG2, tid, "munmap")) SET_STATUS_Failure( VKI_EINVAL ); } POST(sys_munmap) { Addr a = ARG1; SizeT len = ARG2; ML_(notify_core_and_tool_of_munmap)( a, len ); } PRE(sys_mincore) { PRINT("sys_mincore ( %#lx, %llu, %#lx )", ARG1,(ULong)ARG2,ARG3); PRE_REG_READ3(long, "mincore", unsigned long, start, vki_size_t, length, unsigned char *, vec); PRE_MEM_WRITE( "mincore(vec)", ARG3, VG_PGROUNDUP(ARG2) / VKI_PAGE_SIZE ); } POST(sys_mincore) { POST_MEM_WRITE( ARG3, VG_PGROUNDUP(ARG2) / VKI_PAGE_SIZE ); } PRE(sys_nanosleep) { *flags |= SfMayBlock|SfPostOnFail; PRINT("sys_nanosleep ( %#lx, %#lx )", ARG1,ARG2); PRE_REG_READ2(long, "nanosleep", struct timespec *, req, struct timespec *, rem); PRE_MEM_READ( "nanosleep(req)", ARG1, sizeof(struct vki_timespec) ); if (ARG2 != 0) PRE_MEM_WRITE( "nanosleep(rem)", ARG2, sizeof(struct vki_timespec) ); } POST(sys_nanosleep) { vg_assert(SUCCESS || FAILURE); if (ARG2 != 0 && FAILURE && ERR == VKI_EINTR) POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) ); } #if defined(VGO_linux) || defined(VGO_solaris) /* Handles the case where the open is of /proc/self/auxv or /proc/<pid>/auxv, and just gives out a copy of the fd for the fake file we cooked up at startup (in m_main). Also, seeks the cloned fd back to the start. Returns True if auxv open was handled (status is set). */ Bool ML_(handle_auxv_open)(SyscallStatus *status, const HChar *filename, int flags) { HChar name[30]; // large enough if (!ML_(safe_to_deref)((const void *) filename, 1)) return False; /* Opening /proc/<pid>/auxv or /proc/self/auxv? */ VG_(sprintf)(name, "/proc/%d/auxv", VG_(getpid)()); if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/auxv")) return False; /* Allow to open the file only for reading. */ if (flags & (VKI_O_WRONLY | VKI_O_RDWR)) { SET_STATUS_Failure(VKI_EACCES); return True; } # if defined(VGO_solaris) VG_(sprintf)(name, "/proc/self/fd/%d", VG_(cl_auxv_fd)); SysRes sres = VG_(open)(name, flags, 0); SET_STATUS_from_SysRes(sres); # else SysRes sres = VG_(dup)(VG_(cl_auxv_fd)); SET_STATUS_from_SysRes(sres); if (!sr_isError(sres)) { OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET); if (off < 0) SET_STATUS_Failure(VKI_EMFILE); } # endif return True; } #endif // defined(VGO_linux) || defined(VGO_solaris) PRE(sys_open) { if (ARG2 & VKI_O_CREAT) { // 3-arg version PRINT("sys_open ( %#lx(%s), %ld, %ld )",ARG1, (HChar*)ARG1, SARG2, SARG3); PRE_REG_READ3(long, "open", const char *, filename, int, flags, int, mode); } else { // 2-arg version PRINT("sys_open ( %#lx(%s), %ld )",ARG1, (HChar*)ARG1, SARG2); PRE_REG_READ2(long, "open", const char *, filename, int, flags); } PRE_MEM_RASCIIZ( "open(filename)", ARG1 ); #if defined(VGO_linux) /* Handle the case where the open is of /proc/self/cmdline or /proc/<pid>/cmdline, and just give it a copy of the fd for the fake file we cooked up at startup (in m_main). Also, seek the cloned fd back to the start. */ { HChar name[30]; // large enough HChar* arg1s = (HChar*) ARG1; SysRes sres; VG_(sprintf)(name, "/proc/%d/cmdline", VG_(getpid)()); if (ML_(safe_to_deref)( arg1s, 1 ) && (VG_STREQ(arg1s, name) || VG_STREQ(arg1s, "/proc/self/cmdline")) ) { sres = VG_(dup)( VG_(cl_cmdline_fd) ); SET_STATUS_from_SysRes( sres ); if (!sr_isError(sres)) { OffT off = VG_(lseek)( sr_Res(sres), 0, VKI_SEEK_SET ); if (off < 0) SET_STATUS_Failure( VKI_EMFILE ); } return; } } /* Handle also the case of /proc/self/auxv or /proc/<pid>/auxv. */ if (ML_(handle_auxv_open)(status, (const HChar *)ARG1, ARG2)) return; #endif // defined(VGO_linux) /* Otherwise handle normally */ *flags |= SfMayBlock; } POST(sys_open) { vg_assert(SUCCESS); if (!ML_(fd_allowed)(RES, "open", tid, True)) { VG_(close)(RES); SET_STATUS_Failure( VKI_EMFILE ); } else { if (VG_(clo_track_fds)) ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1); } } PRE(sys_read) { *flags |= SfMayBlock; PRINT("sys_read ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(ssize_t, "read", unsigned int, fd, char *, buf, vki_size_t, count); if (!ML_(fd_allowed)(ARG1, "read", tid, False)) SET_STATUS_Failure( VKI_EBADF ); else PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 ); } POST(sys_read) { vg_assert(SUCCESS); POST_MEM_WRITE( ARG2, RES ); } PRE(sys_write) { Bool ok; *flags |= SfMayBlock; PRINT("sys_write ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(ssize_t, "write", unsigned int, fd, const char *, buf, vki_size_t, count); /* check to see if it is allowed. If not, try for an exemption from --sim-hints=enable-outer (used for self hosting). */ ok = ML_(fd_allowed)(ARG1, "write", tid, False); if (!ok && ARG1 == 2/*stderr*/ && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints))) ok = True; #if defined(VGO_solaris) if (!ok && VG_(vfork_fildes_addr) != NULL && *VG_(vfork_fildes_addr) >= 0 && *VG_(vfork_fildes_addr) == ARG1) ok = True; #endif if (!ok) SET_STATUS_Failure( VKI_EBADF ); else PRE_MEM_READ( "write(buf)", ARG2, ARG3 ); } PRE(sys_creat) { *flags |= SfMayBlock; PRINT("sys_creat ( %#lx(%s), %ld )", ARG1, (HChar*)ARG1, SARG2); PRE_REG_READ2(long, "creat", const char *, pathname, int, mode); PRE_MEM_RASCIIZ( "creat(pathname)", ARG1 ); } POST(sys_creat) { vg_assert(SUCCESS); if (!ML_(fd_allowed)(RES, "creat", tid, True)) { VG_(close)(RES); SET_STATUS_Failure( VKI_EMFILE ); } else { if (VG_(clo_track_fds)) ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1); } } PRE(sys_poll) { /* struct pollfd { int fd; -- file descriptor short events; -- requested events short revents; -- returned events }; int poll(struct pollfd *ufds, unsigned int nfds, int timeout) */ UInt i; struct vki_pollfd* ufds = (struct vki_pollfd *)ARG1; *flags |= SfMayBlock; PRINT("sys_poll ( %#lx, %lu, %ld )\n", ARG1, ARG2, SARG3); PRE_REG_READ3(long, "poll", struct vki_pollfd *, ufds, unsigned int, nfds, long, timeout); for (i = 0; i < ARG2; i++) { PRE_MEM_READ( "poll(ufds.fd)", (Addr)(&ufds[i].fd), sizeof(ufds[i].fd) ); PRE_MEM_READ( "poll(ufds.events)", (Addr)(&ufds[i].events), sizeof(ufds[i].events) ); PRE_MEM_WRITE( "poll(ufds.revents)", (Addr)(&ufds[i].revents), sizeof(ufds[i].revents) ); } } POST(sys_poll) { if (RES >= 0) { UInt i; struct vki_pollfd* ufds = (struct vki_pollfd *)ARG1; for (i = 0; i < ARG2; i++) POST_MEM_WRITE( (Addr)(&ufds[i].revents), sizeof(ufds[i].revents) ); } } PRE(sys_readlink) { FUSE_COMPATIBLE_MAY_BLOCK(); Word saved = SYSNO; PRINT("sys_readlink ( %#lx(%s), %#lx, %llu )", ARG1,(char*)ARG1,ARG2,(ULong)ARG3); PRE_REG_READ3(long, "readlink", const char *, path, char *, buf, int, bufsiz); PRE_MEM_RASCIIZ( "readlink(path)", ARG1 ); PRE_MEM_WRITE( "readlink(buf)", ARG2,ARG3 ); { #if defined(VGO_linux) /* * Handle the case where readlink is looking at /proc/self/exe or * /proc/<pid>/exe. */ HChar name[30]; // large enough HChar* arg1s = (HChar*) ARG1; VG_(sprintf)(name, "/proc/%d/exe", VG_(getpid)()); if (ML_(safe_to_deref)(arg1s, 1) && (VG_STREQ(arg1s, name) || VG_STREQ(arg1s, "/proc/self/exe")) ) { VG_(sprintf)(name, "/proc/self/fd/%d", VG_(cl_exec_fd)); SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, (UWord)name, ARG2, ARG3)); } else #elif defined(VGO_solaris) /* Same for Solaris, but /proc/self/path/a.out and /proc/<pid>/path/a.out. */ HChar name[30]; // large enough HChar* arg1s = (HChar*) ARG1; VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)()); if (ML_(safe_to_deref)(arg1s, 1) && (VG_STREQ(arg1s, name) || VG_STREQ(arg1s, "/proc/self/path/a.out")) ) { VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd)); SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, (UWord)name, ARG2, ARG3)); } else #endif { /* Normal case */ SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, ARG1, ARG2, ARG3)); } } if (SUCCESS && RES > 0) POST_MEM_WRITE( ARG2, RES ); } PRE(sys_readv) { Int i; struct vki_iovec * vec; *flags |= SfMayBlock; PRINT("sys_readv ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(ssize_t, "readv", unsigned long, fd, const struct iovec *, vector, unsigned long, count); if (!ML_(fd_allowed)(ARG1, "readv", tid, False)) { SET_STATUS_Failure( VKI_EBADF ); } else { if ((Int)ARG3 >= 0) PRE_MEM_READ( "readv(vector)", ARG2, ARG3 * sizeof(struct vki_iovec) ); if (ARG2 != 0) { /* ToDo: don't do any of the following if the vector is invalid */ vec = (struct vki_iovec *)ARG2; for (i = 0; i < (Int)ARG3; i++) PRE_MEM_WRITE( "readv(vector[...])", (Addr)vec[i].iov_base, vec[i].iov_len ); } } } POST(sys_readv) { vg_assert(SUCCESS); if (RES > 0) { Int i; struct vki_iovec * vec = (struct vki_iovec *)ARG2; Int remains = RES; /* RES holds the number of bytes read. */ for (i = 0; i < (Int)ARG3; i++) { Int nReadThisBuf = vec[i].iov_len; if (nReadThisBuf > remains) nReadThisBuf = remains; POST_MEM_WRITE( (Addr)vec[i].iov_base, nReadThisBuf ); remains -= nReadThisBuf; if (remains < 0) VG_(core_panic)("readv: remains < 0"); } } } PRE(sys_rename) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_rename ( %#lx(%s), %#lx(%s) )", ARG1,(char*)ARG1,ARG2,(char*)ARG2); PRE_REG_READ2(long, "rename", const char *, oldpath, const char *, newpath); PRE_MEM_RASCIIZ( "rename(oldpath)", ARG1 ); PRE_MEM_RASCIIZ( "rename(newpath)", ARG2 ); } PRE(sys_rmdir) { *flags |= SfMayBlock; PRINT("sys_rmdir ( %#lx(%s) )", ARG1,(char*)ARG1); PRE_REG_READ1(long, "rmdir", const char *, pathname); PRE_MEM_RASCIIZ( "rmdir(pathname)", ARG1 ); } PRE(sys_select) { *flags |= SfMayBlock; PRINT("sys_select ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4, ARG5); PRE_REG_READ5(long, "select", int, n, vki_fd_set *, readfds, vki_fd_set *, writefds, vki_fd_set *, exceptfds, struct vki_timeval *, timeout); // XXX: this possibly understates how much memory is read. if (ARG2 != 0) PRE_MEM_READ( "select(readfds)", ARG2, ARG1/8 /* __FD_SETSIZE/8 */ ); if (ARG3 != 0) PRE_MEM_READ( "select(writefds)", ARG3, ARG1/8 /* __FD_SETSIZE/8 */ ); if (ARG4 != 0) PRE_MEM_READ( "select(exceptfds)", ARG4, ARG1/8 /* __FD_SETSIZE/8 */ ); if (ARG5 != 0) PRE_timeval_READ( "select(timeout)", ARG5 ); } PRE(sys_setgid) { PRINT("sys_setgid ( %lu )", ARG1); PRE_REG_READ1(long, "setgid", vki_gid_t, gid); } PRE(sys_setsid) { PRINT("sys_setsid ( )"); PRE_REG_READ0(long, "setsid"); } PRE(sys_setgroups) { PRINT("setgroups ( %llu, %#lx )", (ULong)ARG1, ARG2); PRE_REG_READ2(long, "setgroups", int, size, vki_gid_t *, list); if (ARG1 > 0) PRE_MEM_READ( "setgroups(list)", ARG2, ARG1 * sizeof(vki_gid_t) ); } PRE(sys_setpgid) { PRINT("setpgid ( %ld, %ld )", SARG1, SARG2); PRE_REG_READ2(long, "setpgid", vki_pid_t, pid, vki_pid_t, pgid); } PRE(sys_setregid) { PRINT("sys_setregid ( %lu, %lu )", ARG1, ARG2); PRE_REG_READ2(long, "setregid", vki_gid_t, rgid, vki_gid_t, egid); } PRE(sys_setreuid) { PRINT("sys_setreuid ( 0x%lx, 0x%lx )", ARG1, ARG2); PRE_REG_READ2(long, "setreuid", vki_uid_t, ruid, vki_uid_t, euid); } PRE(sys_setrlimit) { UWord arg1 = ARG1; PRINT("sys_setrlimit ( %lu, %#lx )", ARG1, ARG2); PRE_REG_READ2(long, "setrlimit", unsigned int, resource, struct rlimit *, rlim); PRE_MEM_READ( "setrlimit(rlim)", ARG2, sizeof(struct vki_rlimit) ); #ifdef _RLIMIT_POSIX_FLAG // Darwin will sometimes set _RLIMIT_POSIX_FLAG on setrlimit calls. // Unset it here to make the if statements below work correctly. arg1 &= ~_RLIMIT_POSIX_FLAG; #endif if (!VG_(am_is_valid_for_client)(ARG2, sizeof(struct vki_rlimit), VKI_PROT_READ)) { SET_STATUS_Failure( VKI_EFAULT ); } else if (((struct vki_rlimit *)ARG2)->rlim_cur > ((struct vki_rlimit *)ARG2)->rlim_max) { SET_STATUS_Failure( VKI_EINVAL ); } else if (arg1 == VKI_RLIMIT_NOFILE) { if (((struct vki_rlimit *)ARG2)->rlim_cur > VG_(fd_hard_limit) || ((struct vki_rlimit *)ARG2)->rlim_max != VG_(fd_hard_limit)) { SET_STATUS_Failure( VKI_EPERM ); } else { VG_(fd_soft_limit) = ((struct vki_rlimit *)ARG2)->rlim_cur; SET_STATUS_Success( 0 ); } } else if (arg1 == VKI_RLIMIT_DATA) { if (((struct vki_rlimit *)ARG2)->rlim_cur > VG_(client_rlimit_data).rlim_max || ((struct vki_rlimit *)ARG2)->rlim_max > VG_(client_rlimit_data).rlim_max) { SET_STATUS_Failure( VKI_EPERM ); } else { VG_(client_rlimit_data) = *(struct vki_rlimit *)ARG2; SET_STATUS_Success( 0 ); } } else if (arg1 == VKI_RLIMIT_STACK && tid == 1) { if (((struct vki_rlimit *)ARG2)->rlim_cur > VG_(client_rlimit_stack).rlim_max || ((struct vki_rlimit *)ARG2)->rlim_max > VG_(client_rlimit_stack).rlim_max) { SET_STATUS_Failure( VKI_EPERM ); } else { /* Change the value of client_stack_szB to the rlim_cur value but only if it is smaller than the size of the allocated stack for the client. TODO: All platforms should set VG_(clstk_max_size) as part of their setup_client_stack(). */ if ((VG_(clstk_max_size) == 0) || (((struct vki_rlimit *) ARG2)->rlim_cur <= VG_(clstk_max_size))) VG_(threads)[tid].client_stack_szB = ((struct vki_rlimit *)ARG2)->rlim_cur; VG_(client_rlimit_stack) = *(struct vki_rlimit *)ARG2; SET_STATUS_Success( 0 ); } } } PRE(sys_setuid) { PRINT("sys_setuid ( %lu )", ARG1); PRE_REG_READ1(long, "setuid", vki_uid_t, uid); } PRE(sys_newstat) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_newstat ( %#lx(%s), %#lx )", ARG1,(char*)ARG1,ARG2); PRE_REG_READ2(long, "stat", char *, file_name, struct stat *, buf); PRE_MEM_RASCIIZ( "stat(file_name)", ARG1 ); PRE_MEM_WRITE( "stat(buf)", ARG2, sizeof(struct vki_stat) ); } POST(sys_newstat) { POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); } PRE(sys_statfs) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_statfs ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2); PRE_REG_READ2(long, "statfs", const char *, path, struct statfs *, buf); PRE_MEM_RASCIIZ( "statfs(path)", ARG1 ); PRE_MEM_WRITE( "statfs(buf)", ARG2, sizeof(struct vki_statfs) ); } POST(sys_statfs) { POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs) ); } PRE(sys_statfs64) { PRINT("sys_statfs64 ( %#lx(%s), %llu, %#lx )",ARG1,(char*)ARG1,(ULong)ARG2,ARG3); PRE_REG_READ3(long, "statfs64", const char *, path, vki_size_t, size, struct statfs64 *, buf); PRE_MEM_RASCIIZ( "statfs64(path)", ARG1 ); PRE_MEM_WRITE( "statfs64(buf)", ARG3, ARG2 ); } POST(sys_statfs64) { POST_MEM_WRITE( ARG3, ARG2 ); } PRE(sys_symlink) { *flags |= SfMayBlock; PRINT("sys_symlink ( %#lx(%s), %#lx(%s) )",ARG1,(char*)ARG1,ARG2,(char*)ARG2); PRE_REG_READ2(long, "symlink", const char *, oldpath, const char *, newpath); PRE_MEM_RASCIIZ( "symlink(oldpath)", ARG1 ); PRE_MEM_RASCIIZ( "symlink(newpath)", ARG2 ); } PRE(sys_time) { /* time_t time(time_t *t); */ PRINT("sys_time ( %#lx )",ARG1); PRE_REG_READ1(long, "time", int *, t); if (ARG1 != 0) { PRE_MEM_WRITE( "time(t)", ARG1, sizeof(vki_time_t) ); } } POST(sys_time) { if (ARG1 != 0) { POST_MEM_WRITE( ARG1, sizeof(vki_time_t) ); } } PRE(sys_times) { PRINT("sys_times ( %#lx )", ARG1); PRE_REG_READ1(long, "times", struct tms *, buf); if (ARG1 != 0) { PRE_MEM_WRITE( "times(buf)", ARG1, sizeof(struct vki_tms) ); } } POST(sys_times) { if (ARG1 != 0) { POST_MEM_WRITE( ARG1, sizeof(struct vki_tms) ); } } PRE(sys_umask) { PRINT("sys_umask ( %ld )", SARG1); PRE_REG_READ1(long, "umask", int, mask); } PRE(sys_unlink) { *flags |= SfMayBlock; PRINT("sys_unlink ( %#lx(%s) )", ARG1,(char*)ARG1); PRE_REG_READ1(long, "unlink", const char *, pathname); PRE_MEM_RASCIIZ( "unlink(pathname)", ARG1 ); } PRE(sys_newuname) { PRINT("sys_newuname ( %#lx )", ARG1); PRE_REG_READ1(long, "uname", struct new_utsname *, buf); PRE_MEM_WRITE( "uname(buf)", ARG1, sizeof(struct vki_new_utsname) ); } POST(sys_newuname) { if (ARG1 != 0) { POST_MEM_WRITE( ARG1, sizeof(struct vki_new_utsname) ); } } PRE(sys_waitpid) { *flags |= SfMayBlock; PRINT("sys_waitpid ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3); PRE_REG_READ3(long, "waitpid", vki_pid_t, pid, unsigned int *, status, int, options); if (ARG2 != (Addr)NULL) PRE_MEM_WRITE( "waitpid(status)", ARG2, sizeof(int) ); } POST(sys_waitpid) { if (ARG2 != (Addr)NULL) POST_MEM_WRITE( ARG2, sizeof(int) ); } PRE(sys_wait4) { *flags |= SfMayBlock; PRINT("sys_wait4 ( %ld, %#lx, %ld, %#lx )", SARG1, ARG2, SARG3, ARG4); PRE_REG_READ4(long, "wait4", vki_pid_t, pid, unsigned int *, status, int, options, struct rusage *, rusage); if (ARG2 != (Addr)NULL) PRE_MEM_WRITE( "wait4(status)", ARG2, sizeof(int) ); if (ARG4 != (Addr)NULL) PRE_MEM_WRITE( "wait4(rusage)", ARG4, sizeof(struct vki_rusage) ); } POST(sys_wait4) { if (ARG2 != (Addr)NULL) POST_MEM_WRITE( ARG2, sizeof(int) ); if (ARG4 != (Addr)NULL) POST_MEM_WRITE( ARG4, sizeof(struct vki_rusage) ); } PRE(sys_writev) { Int i; struct vki_iovec * vec; *flags |= SfMayBlock; PRINT("sys_writev ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3); PRE_REG_READ3(ssize_t, "writev", unsigned long, fd, const struct iovec *, vector, unsigned long, count); if (!ML_(fd_allowed)(ARG1, "writev", tid, False)) { SET_STATUS_Failure( VKI_EBADF ); } else { if ((Int)ARG3 >= 0) PRE_MEM_READ( "writev(vector)", ARG2, ARG3 * sizeof(struct vki_iovec) ); if (ARG2 != 0) { /* ToDo: don't do any of the following if the vector is invalid */ vec = (struct vki_iovec *)ARG2; for (i = 0; i < (Int)ARG3; i++) PRE_MEM_READ( "writev(vector[...])", (Addr)vec[i].iov_base, vec[i].iov_len ); } } } PRE(sys_utimes) { FUSE_COMPATIBLE_MAY_BLOCK(); PRINT("sys_utimes ( %#lx(%s), %#lx )", ARG1,(char*)ARG1,ARG2); PRE_REG_READ2(long, "utimes", char *, filename, struct timeval *, tvp); PRE_MEM_RASCIIZ( "utimes(filename)", ARG1 ); if (ARG2 != 0) { PRE_timeval_READ( "utimes(tvp[0])", ARG2 ); PRE_timeval_READ( "utimes(tvp[1])", ARG2+sizeof(struct vki_timeval) ); } } PRE(sys_acct) { PRINT("sys_acct ( %#lx(%s) )", ARG1,(char*)ARG1); PRE_REG_READ1(long, "acct", const char *, filename); PRE_MEM_RASCIIZ( "acct(filename)", ARG1 ); } PRE(sys_pause) { *flags |= SfMayBlock; PRINT("sys_pause ( )"); PRE_REG_READ0(long, "pause"); } PRE(sys_sigaltstack) { PRINT("sigaltstack ( %#lx, %#lx )",ARG1,ARG2); PRE_REG_READ2(int, "sigaltstack", const vki_stack_t *, ss, vki_stack_t *, oss); if (ARG1 != 0) { const vki_stack_t *ss = (vki_stack_t *)ARG1; PRE_MEM_READ( "sigaltstack(ss)", (Addr)&ss->ss_sp, sizeof(ss->ss_sp) ); PRE_MEM_READ( "sigaltstack(ss)", (Addr)&ss->ss_flags, sizeof(ss->ss_flags) ); PRE_MEM_READ( "sigaltstack(ss)", (Addr)&ss->ss_size, sizeof(ss->ss_size) ); } if (ARG2 != 0) { PRE_MEM_WRITE( "sigaltstack(oss)", ARG2, sizeof(vki_stack_t) ); } /* Be safe. */ if (ARG1 && !ML_(safe_to_deref((void*)ARG1, sizeof(vki_stack_t)))) { SET_STATUS_Failure(VKI_EFAULT); return; } if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) { SET_STATUS_Failure(VKI_EFAULT); return; } SET_STATUS_from_SysRes( VG_(do_sys_sigaltstack) (tid, (vki_stack_t*)ARG1, (vki_stack_t*)ARG2) ); } POST(sys_sigaltstack) { vg_assert(SUCCESS); if (RES == 0 && ARG2 != 0) POST_MEM_WRITE( ARG2, sizeof(vki_stack_t)); } PRE(sys_sethostname) { PRINT("sys_sethostname ( %#lx, %ld )", ARG1, SARG2); PRE_REG_READ2(long, "sethostname", char *, name, int, len); PRE_MEM_READ( "sethostname(name)", ARG1, ARG2 ); } #undef PRE #undef POST #endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) /*--------------------------------------------------------------------*/ /*--- end ---*/ /*--------------------------------------------------------------------*/
the_stack_data/874675.c
#include <stdio.h> #include <stdlib.h> int main() { int *ptr; int n; scanf("%d", &n); printf("sizeof(int) = %ld\n", sizeof(int)); // possible to allocate large amounts of memory // malloc needs the amount ot BYTES to allocate // malloc returns a (void *), needs to be casted to correct type ptr = (int *)malloc(n * sizeof(int)); printf("Success!\n"); // tells the OS this memory is not going to be used further and can be reused free(ptr); return 0; }
the_stack_data/320773.c
// This file is part of CPAchecker, // a tool for configurable software verification: // https://cpachecker.sosy-lab.org // // SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org> // // SPDX-License-Identifier: Apache-2.0 extern int __VERIFIER_nondet_int(); int isPrime(int n){ for(int i = 2; i < n/2 + 1; i++){ if(n % i == 0) return 0; } return 1; } /** Calculate all prime factors of a given number. Example: prime factors of 420 are {2, 2, 3, 5, 7} because 2 * 2 * 3 * 5 * 7 = 420 and 2,3,5,7 are prime. */ int main(){ // Calculate prime factors of number; int number = __VERIFIER_nondet_int(); int copyForCheck = number; if (number <= 0) { // Tell user that a positive number is required. // This is not consider to be an error. goto EXIT; } int test = 1; for(int i = 2; i <= number; i++){ if (number % i == 0 && isPrime(i)) { // Multiply all prime factors to test test *= i; // Reset i to restart computation with new number number = number / i; i = 2; // FIX: i = 1 } } // POST-CONDITION check if test equals number // (test should equal the product of all found prime factors) if(test != copyForCheck) { goto ERROR; } EXIT: return 0; ERROR: return 1; }
the_stack_data/61074618.c
#include<stdio.h> #include<stdlib.h> // Function definition long an_term(long, long, long); double sn_sum(long, long, long); // Main function int main(void){ long a1, common_difference, n, a_n; scanf("%ld %ld %ld", &a1, &common_difference, &n); // Reads 3 numbers from stdin a_n = an_term(a1, common_difference, n); // Gets the result from an_term function // Prints the result printf("%ld\n", a_n); printf("%.0lf\n", sn_sum(a1, a_n, n)); // Gets the result from sn_sum function and prints it return 0; } // Function declaration // Finds the a_n term from a arithmetic progression long an_term(long a1, long common_diff, long n){ return a1 + common_diff * (n - 1); } // Calculates the sum of a_n terms in a arithmetic progression double sn_sum(long a1, long an, long n){ return ((a1 + an) * n) / 2; }
the_stack_data/68090.c
#include <stdio.h> void reset(int a[], int len){ // inizializza l’array dei contatori a 0 int i; for(i=0;i<len;i++) a[i]=0; } void add(int a[], int len, int val){ //incrementa il contatore array[val] se val `e tra 0 e len-1 int i; for(i=0;i<len;i++){ if(val==i) a[i]++; } } int main(void){ int x=0,i; int a[10]; reset(a,10); while(x!=-1){ scanf("%d",&x); if(0<=x<=10) add(a,10,x); } for(i=0;i<10;i++) printf("%d\n",a[i]); return 0; }
the_stack_data/147267.c
#include <sys/types.h> #include <unistd.h> #include <fcntl.h> #include <stdio.h> #include <string.h> #include <stdlib.h> typedef struct pessoa *Pessoa; struct pessoa { int idade; char nome[25]; }; Pessoa initPessoa(){ Pessoa p=(Pessoa)malloc(sizeof(struct pessoa)); p->idade=0; p->nome[0]='\0'; return p; } Pessoa setPessoa(Pessoa p,int age, char *name){ p->idade=age; strcpy(p->nome,name); return p; } int main(int argc, char* argv[]){ int fd; if(argc!=5){ printf("erro\n"); return -1; } if (!strcmp(argv[2],"-i")){ fd = open("pessoas.txt", O_CREAT | O_RDWR | O_APPEND, 0600); Pessoa p=initPessoa(); p=setPessoa(p,atoi(argv[4]),argv[3]); printf("%s\n",p->nome); write(fd,p,sizeof(struct pessoa)); close(fd); } else{ perror("erro"); } return 0; }
the_stack_data/173576847.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried anti data dependence. However, the loop is not parallelized so no race condition. Source: based on AutoPar's regression test. */ int n=100, m=100; double b[100][100]; void foo() { int i,j; #pragma omp parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) // Be careful about bounds of j b[i][j]=b[i][j-1]; } int main() { foo(); return 0; }
the_stack_data/741519.c
#include <stdio.h> void scilab_rt_barh_d2d0d0_(int in00, int in01, double matrixin0[in00][in01], double scalarin0, double scalarin1) { int i; int j; double val0 = 0; for (i = 0; i < in00; ++i) { for (j = 0; j < in01; ++j) { val0 += matrixin0[i][j]; } } printf("%f", val0); printf("%f", scalarin0); printf("%f", scalarin1); }
the_stack_data/20450262.c
#include <stdio.h> #include <stdlib.h> /* This is a code for binary search tree basic implementtion */ //Contains code for PreOrder, InOrder and PostOrder Traversals i.e. Depth First typedef struct node{ int data; struct node* left; struct node* right; } node; node* head; node* create(int data){ node* temp = (node* ) malloc(sizeof(node)); temp->data = data; temp->left = NULL; temp->right = NULL; return temp; } node* insert(node* current, int data){ if(head == NULL){ node* temp; temp = create(data); head = temp; return temp; } else{ if(current == NULL){ node* temp; temp = create(data); return temp; } if(data <= current->data){ current->left = insert(current->left,data); } else if ( data > current->data){ current->right = insert(current->right,data); } } return current; } void print_preorder(node* temp){ if(temp != NULL){ printf("%d ",temp->data); print_preorder(temp->left); print_preorder(temp->right); } return; } void print_inorder(node* temp){ if(temp != NULL){ print_inorder(temp->left); printf("%d ",temp->data); print_inorder(temp->right); } return; } void print_postorder(node* temp){ if(temp != NULL){ print_postorder(temp->left); print_postorder(temp->right); printf("%d ",temp->data); } return; } void print_preorder_all(node* temp){ if(temp != NULL){ printf("data %d ",temp->data); if(temp->left != NULL){ printf("left child %d ",temp->left->data);} else { printf("left child NULL "); } if(temp->right != NULL){ printf("right child %d \n",temp->right->data);} else { printf("right child NULL \n"); } print_preorder_all(temp->left); print_preorder_all(temp->right); } return; } int main(){ head = NULL; node* temp; int A[7] = {9,4,15,2,6,12,17}; int i; for(i =0 ; i<7 ; i ++){ temp = insert(head,A[i]); } printf("tree in print_preorder : \n"); print_preorder(head); printf("\n"); printf("tree in print_inorder : \n"); print_inorder(head); printf("\n"); printf("tree in print_postorder : \n"); print_postorder(head); printf("\n"); printf("all info\n"); print_preorder_all(head); return 0; }
the_stack_data/248581963.c
/* { dg-do run { target ia64-*-* } } */ /* { dg-options } */ /* Test that "=S" properly avoids the post-increment on the memory address. */ static void foo(int *x) { long i; for (i = 0; i < 100; ++i) __asm__("st4 %0 = r0" : "=S"(x[i])); } int main() { int array[100]; long i; for (i = 0; i < 100; ++i) array[i] = -1; foo(array); for (i = 0; i < 100; ++i) if (array[i]) abort (); return 0; }
the_stack_data/51512.c
#include <string.h> #include <stdlib.h> #include <stdint.h> unsigned int array1_size = 16; uint8_t array1[16] = { 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 }; uint8_t array2[256 * 512]; uint8_t temp = 0; /* Used so compiler won't optimize out victim_function() */ void victim_function_v10(size_t x, uint8_t k) { if (x < array1_size) { if (array1[x] == k) temp &= array2[0]; } }
the_stack_data/7949825.c
/* * pidcheck - simple test of fork and getpid * * relies only on fork, console write, getpid and _exit * * child prints its pid, parent prints childs pid and its own * */ #include <unistd.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <err.h> /* declare this volatile to discourage the compiler from optimizing away the parent's delay loop */ volatile int tot; int main(int argc, char *argv[]) { (void)argc; (void)argv; pid_t pid,pid2; int i; pid = fork(); if (pid < 0) { warn("fork"); } else if (pid == 0) { /* child */ pid2 = getpid(); /* print the child's PID, as seen by the child */ printf("C: %d\n",pid2); } else { /* parent */ /* try to delay long enough for the child to finish printing */ tot = 0; for(i=0;i<1000000;i++) { tot++; } /* print the child's PID, as seen by the parent */ printf("PC: %d\n",pid); /* print the parent's PID */ pid2 = getpid(); printf("PP: %d\n",pid2); } return(0); }
the_stack_data/76254.c
/*************************************************************************** * FILE IDENTIFICATION * * Name: c-test-fns.c * Purpose: Test functions in C for UFFI library * Programer: Kevin M. Rosenberg * Date Started: Mar 2002 * * This file, part of UFFI, is Copyright (c) 2002-2010 by Kevin M. Rosenberg * * These variables are correct for GCC * you'll need to modify these for other compilers ***************************************************************************/ #if defined(WIN32)||defined(WIN64) #include <windows.h> BOOL WINAPI DllEntryPoint(HINSTANCE hinstdll, DWORD fdwReason, LPVOID lpvReserved) { return 1; } #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT #endif #include <ctype.h> #include <stdlib.h> #include <math.h> /* Test of constant input string */ DLLEXPORT int cs_count_upper (char* psz) { int count = 0; if (psz) { while (*psz) { if (isupper (*psz)) ++count; ++psz; } return count; } else return -1; } /* Test of input and output of a string */ DLLEXPORT void cs_to_upper (char* psz) { if (psz) { while (*psz) { *psz = toupper (*psz); ++psz; } } } /* Test of an output only string */ DLLEXPORT void cs_make_random (int size, char* buffer) { int i; for (i = 0; i < size; i++) buffer[i] = 'A' + (rand() % 26); } /* Test of input/output vector */ DLLEXPORT void half_double_vector (int size, double* vec) { int i; for (i = 0; i < size; i++) vec[i] /= 2.; }
the_stack_data/9415.c
/* * Copyright (C) 2017-2019 Alibaba Group Holding Limited */ # define _weak_alias(name, aliasname) \ extern __typeof (name) aliasname __attribute__ ((weak, alias (#name))); # define weak_alias(name, aliasname) _weak_alias (name, aliasname) static void weak_func(void) { } weak_alias(weak_func,csi_spiflash_qspi_init) weak_alias(weak_func,csi_spiflash_get_flash_info) weak_alias(weak_func,csi_spiflash_erase) weak_alias(weak_func,csi_spiflash_program) weak_alias(weak_func,csi_eflash_read) weak_alias(weak_func,csi_eflash_erase_sector) weak_alias(weak_func,csi_eflash_program) weak_alias(weak_func,csi_eflash_init) weak_alias(weak_func,csi_eflash_erase)
the_stack_data/130969.c
/* $OpenBSD: kern_ktrace.c,v 1.50 2011/04/02 17:04:35 guenther Exp $ */ /* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */ /* * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 */ #ifdef KTRACE #include <sys/param.h> #include <sys/systm.h> #include <sys/proc.h> #include <sys/sched.h> #include <sys/file.h> #include <sys/namei.h> #include <sys/vnode.h> #include <sys/ktrace.h> #include <sys/malloc.h> #include <sys/syslog.h> #include <sys/sysctl.h> #include <sys/mount.h> #include <sys/syscall.h> #include <sys/syscallargs.h> #include <uvm/uvm_extern.h> void ktrinitheader(struct ktr_header *, struct proc *, int); int ktrops(struct proc *, struct proc *, int, int, struct vnode *); int ktrsetchildren(struct proc *, struct process *, int, int, struct vnode *); int ktrwrite(struct proc *, struct ktr_header *); int ktrcanset(struct proc *, struct proc *); /* * Change the trace vnode in a correct way (to avoid races). */ void ktrsettracevnode(struct proc *p, struct vnode *newvp) { struct vnode *vp; if (p->p_tracep == newvp) /* avoid work */ return; if (newvp != NULL) vref(newvp); vp = p->p_tracep; p->p_tracep = newvp; if (vp != NULL) vrele(vp); } void ktrinitheader(struct ktr_header *kth, struct proc *p, int type) { bzero(kth, sizeof (struct ktr_header)); kth->ktr_type = type; microtime(&kth->ktr_time); kth->ktr_pid = p->p_pid; bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); } void ktrsyscall(struct proc *p, register_t code, size_t argsize, register_t args[]) { struct ktr_header kth; struct ktr_syscall *ktp; size_t len = sizeof(struct ktr_syscall) + argsize; register_t *argp; u_int nargs = 0; int i; if (code == SYS___sysctl && (p->p_emul->e_flags & EMUL_NATIVE)) { /* * The native sysctl encoding stores the mib[] * array because it is interesting. */ if (args[1] > 0) nargs = min(args[1], CTL_MAXNAME); len += nargs * sizeof(int); } p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_SYSCALL); ktp = malloc(len, M_TEMP, M_WAITOK); ktp->ktr_code = code; ktp->ktr_argsize = argsize; argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall)); for (i = 0; i < (argsize / sizeof *argp); i++) *argp++ = args[i]; if (code == SYS___sysctl && (p->p_emul->e_flags & EMUL_NATIVE) && nargs && copyin((void *)args[0], argp, nargs * sizeof(int))) bzero(argp, nargs * sizeof(int)); kth.ktr_buf = (caddr_t)ktp; kth.ktr_len = len; ktrwrite(p, &kth); free(ktp, M_TEMP); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktrsysret(struct proc *p, register_t code, int error, register_t retval) { struct ktr_header kth; struct ktr_sysret ktp; p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_SYSRET); ktp.ktr_code = code; ktp.ktr_error = error; ktp.ktr_retval = retval; /* what about val2 ? */ kth.ktr_buf = (caddr_t)&ktp; kth.ktr_len = sizeof(struct ktr_sysret); ktrwrite(p, &kth); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktrnamei(struct proc *p, char *path) { struct ktr_header kth; p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_NAMEI); kth.ktr_len = strlen(path); kth.ktr_buf = path; ktrwrite(p, &kth); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktremul(struct proc *p, char *emul) { struct ktr_header kth; p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_EMUL); kth.ktr_len = strlen(emul); kth.ktr_buf = emul; ktrwrite(p, &kth); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktrgenio(struct proc *p, int fd, enum uio_rw rw, struct iovec *iov, int len, int error) { struct ktr_header kth; struct ktr_genio *ktp; caddr_t cp; int resid = len, count; int buflen; if (error) return; p->p_traceflag |= KTRFAC_ACTIVE; buflen = min(PAGE_SIZE, len + sizeof(struct ktr_genio)); ktrinitheader(&kth, p, KTR_GENIO); ktp = malloc(buflen, M_TEMP, M_WAITOK); ktp->ktr_fd = fd; ktp->ktr_rw = rw; kth.ktr_buf = (caddr_t)ktp; cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio)); buflen -= sizeof(struct ktr_genio); while (resid > 0) { /* * Don't allow this process to hog the cpu when doing * huge I/O. */ if (curcpu()->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD) preempt(NULL); count = min(iov->iov_len, buflen); if (count > resid) count = resid; if (copyin(iov->iov_base, cp, count)) break; kth.ktr_len = count + sizeof(struct ktr_genio); if (ktrwrite(p, &kth) != 0) break; iov->iov_len -= count; iov->iov_base = (caddr_t)iov->iov_base + count; if (iov->iov_len == 0) iov++; resid -= count; } free(ktp, M_TEMP); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktrpsig(struct proc *p, int sig, sig_t action, int mask, int code, siginfo_t *si) { struct ktr_header kth; struct ktr_psig kp; p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_PSIG); kp.signo = (char)sig; kp.action = action; kp.mask = mask; kp.code = code; kp.si = *si; kth.ktr_buf = (caddr_t)&kp; kth.ktr_len = sizeof(struct ktr_psig); ktrwrite(p, &kth); p->p_traceflag &= ~KTRFAC_ACTIVE; } void ktrcsw(struct proc *p, int out, int user) { struct ktr_header kth; struct ktr_csw kc; p->p_traceflag |= KTRFAC_ACTIVE; ktrinitheader(&kth, p, KTR_CSW); kc.out = out; kc.user = user; kth.ktr_buf = (caddr_t)&kc; kth.ktr_len = sizeof(struct ktr_csw); ktrwrite(p, &kth); p->p_traceflag &= ~KTRFAC_ACTIVE; } /* Interface and common routines */ /* * ktrace system call */ /* ARGSUSED */ int sys_ktrace(struct proc *curp, void *v, register_t *retval) { struct sys_ktrace_args /* { syscallarg(const char *) fname; syscallarg(int) ops; syscallarg(int) facs; syscallarg(pid_t) pid; } */ *uap = v; struct vnode *vp = NULL; struct proc *p = NULL; struct process *pr = NULL; struct pgrp *pg; int facs = SCARG(uap, facs) & ~((unsigned) KTRFAC_ROOT); int ops = KTROP(SCARG(uap, ops)); int descend = SCARG(uap, ops) & KTRFLAG_DESCEND; int ret = 0; int error = 0; struct nameidata nd; curp->p_traceflag |= KTRFAC_ACTIVE; if (ops != KTROP_CLEAR) { /* * an operation which requires a file argument. */ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname), curp); if ((error = vn_open(&nd, FREAD|FWRITE|O_NOFOLLOW, 0)) != 0) { curp->p_traceflag &= ~KTRFAC_ACTIVE; return (error); } vp = nd.ni_vp; VOP_UNLOCK(vp, 0, curp); if (vp->v_type != VREG) { (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); curp->p_traceflag &= ~KTRFAC_ACTIVE; return (EACCES); } } /* * Clear all uses of the tracefile */ if (ops == KTROP_CLEARFILE) { LIST_FOREACH(p, &allproc, p_list) { if (p->p_tracep == vp) { if (ktrcanset(curp, p)) { p->p_traceflag = 0; ktrsettracevnode(p, NULL); } else error = EPERM; } } goto done; } /* * need something to (un)trace (XXX - why is this here?) */ if (!facs) { error = EINVAL; goto done; } /* * do it */ if (SCARG(uap, pid) < 0) { /* * by process group */ pg = pgfind(-SCARG(uap, pid)); if (pg == NULL) { error = ESRCH; goto done; } LIST_FOREACH(pr, &pg->pg_members, ps_pglist) { if (descend) ret |= ktrsetchildren(curp, pr, ops, facs, vp); else TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) ret |= ktrops(curp, p, ops, facs, vp); } } else { /* * by pid */ pr = prfind(SCARG(uap, pid)); if (pr == NULL) { error = ESRCH; goto done; } if (descend) ret |= ktrsetchildren(curp, pr, ops, facs, vp); else TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) { ret |= ktrops(curp, p, ops, facs, vp); } } if (!ret) error = EPERM; done: if (vp != NULL) (void) vn_close(vp, FWRITE, curp->p_ucred, curp); curp->p_traceflag &= ~KTRFAC_ACTIVE; return (error); } int ktrops(struct proc *curp, struct proc *p, int ops, int facs, struct vnode *vp) { if (!ktrcanset(curp, p)) return (0); if (ops == KTROP_SET) { ktrsettracevnode(p, vp); p->p_traceflag |= facs; if (suser(curp, 0) == 0) p->p_traceflag |= KTRFAC_ROOT; } else { /* KTROP_CLEAR */ if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { /* no more tracing */ p->p_traceflag = 0; ktrsettracevnode(p, NULL); } } /* * Emit an emulation record, every time there is a ktrace * change/attach request. */ if (KTRPOINT(p, KTR_EMUL)) ktremul(p, p->p_emul->e_name); return (1); } int ktrsetchildren(struct proc *curp, struct process *top, int ops, int facs, struct vnode *vp) { struct process *pr; struct proc *p; int ret = 0; pr = top; for (;;) { TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) ret |= ktrops(curp, p, ops, facs, vp); /* * If this process has children, descend to them next, * otherwise do any siblings, and if done with this level, * follow back up the tree (but not past top). */ if (!LIST_EMPTY(&pr->ps_children)) pr = LIST_FIRST(&pr->ps_children); else for (;;) { if (pr == top) return (ret); if (LIST_NEXT(pr, ps_sibling) != NULL) { pr = LIST_NEXT(pr, ps_sibling); break; } pr = pr->ps_pptr; } } /*NOTREACHED*/ } int ktrwrite(struct proc *p, struct ktr_header *kth) { struct uio auio; struct iovec aiov[2]; int error; struct vnode *vp = p->p_tracep; if (vp == NULL) return 0; auio.uio_iov = &aiov[0]; auio.uio_offset = 0; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; aiov[0].iov_base = (caddr_t)kth; aiov[0].iov_len = sizeof(struct ktr_header); auio.uio_resid = sizeof(struct ktr_header); auio.uio_iovcnt = 1; auio.uio_procp = p; if (kth->ktr_len > 0) { auio.uio_iovcnt++; aiov[1].iov_base = kth->ktr_buf; aiov[1].iov_len = kth->ktr_len; auio.uio_resid += kth->ktr_len; } vget(vp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred); if (!error) { vput(vp); return (0); } /* * If error encountered, give up tracing on this vnode. */ log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", error); LIST_FOREACH(p, &allproc, p_list) { if (p->p_tracep == vp) { p->p_traceflag = 0; ktrsettracevnode(p, NULL); } } vput(vp); return (error); } /* * Return true if caller has permission to set the ktracing state * of target. Essentially, the target can't possess any * more permissions than the caller. KTRFAC_ROOT signifies that * root previously set the tracing status on the target process, and * so, only root may further change it. * * TODO: check groups. use caller effective gid. */ int ktrcanset(struct proc *callp, struct proc *targetp) { struct pcred *caller = callp->p_cred; struct pcred *target = targetp->p_cred; if ((caller->pc_ucred->cr_uid == target->p_ruid && target->p_ruid == target->p_svuid && caller->p_rgid == target->p_rgid && /* XXX */ target->p_rgid == target->p_svgid && (targetp->p_traceflag & KTRFAC_ROOT) == 0 && !ISSET(targetp->p_p->ps_flags, PS_SUGID)) || caller->pc_ucred->cr_uid == 0) return (1); return (0); } #endif
the_stack_data/711951.c
/*++ Copyright (c) 1991 Microsoft Corporation Module Name: RegeCls.c Abstract: This module contains helper functions for enumerating class registrations via the win32 RegEnumKeyEx api Author: Adam Edwards (adamed) 06-May-1998 Key Functions: EnumTableGetNextEnum EnumTableRemoveKey InitializeClassesEnumTable ClassKeyCountSubKeys Notes: Starting with NT5, the HKEY_CLASSES_ROOT key is per-user instead of per-machine -- previously, HKCR was an alias for HKLM\Software\Classes. Please see regclass.c for more information on this functionality. This feature complicates registry key enumeration because certain keys, such as CLSID, can have some subkeys that come from HKLM\Software\Classes, and other subkeys that come from HKCU\Software\Classes. Since the feature is implemented in user mode, the kernel mode apis know nothing of this. When it's time to enumerate keys, the kernel doesn't know that it should enumerate keys from two different parent keys. The key problem is that keys with the same name can exist in the user and machine portions. When this happens, we choose the user portion is belonging to HKCR -- the other one does not exist -- it is "overridden" by the user version. This means that we cannot simply enumerate from both places and return the results -- we would get duplicates in this case. Thus, we have to do work in user mode to make sure duplicates are not returned. This module provides the user mode implementation for enumerating class registration keys in HKEY_CLASSES_ROOT. The general method is to maintain state between each call to RegEnumKeyEx. The state is kept in a global table indexed by registry key handle and thread id. The state allows the api to remember where it is in the enumeration. The rest of the code handles finding the next key, which is accomplished by retrieving keys from both user and machine locations. Since the kernel returns keys from either of these locations in sorted order, we can compare the key names and return whichever one is less or greater, depending on if we're enumerating upward or downward. We keep track of where we are for both user and machine locations, so we know which key to enumerate next and when to stop. ************************** IMPORTANT ASSUMPTIONS: ************************** This code assumes that the caller has both query permission and enumerate subkey permission in the registry key's acl -- some calls may fail with access denied if the acl denies access to the caller. --*/ #ifdef LOCAL #include <rpc.h> #include "regrpc.h" #include "localreg.h" #include "regclass.h" #include "regecls.h" #include <malloc.h> NTSTATUS QueryKeyInfo( HKEY hKey, KEY_INFORMATION_CLASS KeyInformationClass, PVOID *ppKeyInfo, ULONG BufferLength, BOOL fClass, USHORT MaxClassLength); // // Global table of registry key enumeration state. This is initialized // at dll initialize time. // EnumTable gClassesEnumTable; // // Global indicating need for calling thread detach routines // BOOL gbDllHasThreadState = FALSE; BOOL InitializeClassesEnumTable() /*++ Routine Description: Initializes the global classes enumeration table when advapi32.dll is initialized. Arguments: Return Value: Returns TRUE for success, FALSE for failure Notes: This recordset merging is all in user mode -- should be moved to the kernel for perf and other reasons ?? --*/ { NTSTATUS Status; // // Init the classes enumeration table // Status = EnumTableInit(&gClassesEnumTable); return NT_SUCCESS(Status); } BOOL CleanupClassesEnumTable(BOOL fThisThreadOnly) /*++ Routine Description: Uninitializes the global classes enumeration table when advapi32.dll is unloaded -- this frees all heap associated with the enumeration table, including that for keys which have not been closed. Other resources required for the table are also freed. Arguments: dwCriteria - if this is ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD, then only the table entries concerning this thread are cleaned up. If it is ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD, the table entries for all threads in the process are cleaned up. Return Value: TRUE for success, FALSE otherwise. Notes: --*/ { NTSTATUS Status; DWORD dwCriteria; dwCriteria = fThisThreadOnly ? ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD : ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD; // // Clear our enumeration table // Status = EnumTableClear(&gClassesEnumTable, dwCriteria); return NT_SUCCESS(Status); } NTSTATUS EnumTableInit(EnumTable* pEnumTable) /*++ Routine Description: Initializes an enumeration state table Arguments: pEnumTable - table to initialize Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { NTSTATUS Status; EnumState* rgNewState; #if defined(_REGCLASS_ENUMTABLE_INSTRUMENTED_) DbgPrint("WINREG: Instrumented enum table data for process id 0x%x\n", NtCurrentTeb()->ClientId.UniqueProcess); DbgPrint("WINREG: EnumTableInit subtree state size %d\n", sizeof(rgNewState->UserState)); DbgPrint("WINREG: EnumTableInit state size %d\n", sizeof(*rgNewState)); DbgPrint("WINREG: EnumTableInit initial table size %d\n", sizeof(*pEnumTable)); #endif // _REGCLASS_ENUMTABLE_INSTRUMENTED_ // // Initialize the thread list // StateObjectListInit( &(pEnumTable->ThreadEnumList), 0); // // We have not initialized the critical section // for this table yet -- remember this. // pEnumTable->bCriticalSectionInitialized = FALSE; // // Initialize the critical section that will be used to // synchronize access to this table // Status = RtlInitializeCriticalSection( &(pEnumTable->CriticalSection)); // // Remember that we have initialized this critical section // so we can remember to delete it. // pEnumTable->bCriticalSectionInitialized = NT_SUCCESS(Status); return Status; } NTSTATUS EnumTableClear(EnumTable* pEnumTable, DWORD dwCriteria) /*++ Routine Description: Clears all state in an enumeration table -- frees all state (memory, resources) memory associated with the enumeration table. Arguments: pEnumTable - table to clear dwCriteria - if this is ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD, enumeration states are removed for this thread only. If it is ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD, enumeration states are removed for all threads in the process. Return Value: none Notes: --*/ { NTSTATUS Status; BOOL fThisThreadOnly; DWORD dwThreadId; #if defined(_REGCLASS_ENUMTABLE_INSTRUMENTED_) DWORD cOrphanedStates = 0; #endif // _REGCLASS_ENUMTABLE_INSTRUMENTED_ ASSERT((ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD == dwCriteria) || (ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD == dwCriteria)); Status = STATUS_SUCCESS; // // we assume that if we are called with ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD // that we are being called at process detach to remove all keys from the // table and free the table itself -- this means that we are the only // thread executing this code. // // // Protect ourselves while modifying the table // if (dwCriteria != ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD) { Status = RtlEnterCriticalSection(&(pEnumTable->CriticalSection)); ASSERT( NT_SUCCESS( Status ) ); if ( !NT_SUCCESS( Status ) ) { #if DBG DbgPrint( "WINREG: RtlEnterCriticalSection() in EnumTableRemoveKey() failed. Status = %lx \n", Status ); #endif return Status; } } fThisThreadOnly = (ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD == dwCriteria); // // Find our thread id if the caller wants to remove // state for just this thread // if (fThisThreadOnly) { KeyStateList* pStateList; dwThreadId = GetCurrentThreadId(); pStateList = (KeyStateList*) StateObjectListRemove( &(pEnumTable->ThreadEnumList), ULongToPtr((const unsigned long)dwThreadId)); // // Announce that this dll no longer stores state for any // threads -- used to avoid calls to dll thread // detach routines when there's no state to clean up. // if (StateObjectListIsEmpty(&(pEnumTable->ThreadEnumList))) { gbDllHasThreadState = FALSE; } if (pStateList) { KeyStateListDestroy((StateObject*) pStateList); } } else { // // If we're clearing all threads, just destroy this list // StateObjectListClear(&(pEnumTable->ThreadEnumList), KeyStateListDestroy); gbDllHasThreadState = FALSE; } // // It's safe to unlock the table // if (dwCriteria != ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD) { Status = RtlLeaveCriticalSection(&(pEnumTable->CriticalSection)); ASSERT( NT_SUCCESS( Status ) ); #if DBG if ( !NT_SUCCESS( Status ) ) { DbgPrint( "WINREG: RtlLeaveCriticalSection() in EnumTableClear() failed. Status = %lx \n", Status ); } #endif } if (pEnumTable->bCriticalSectionInitialized && !fThisThreadOnly) { Status = RtlDeleteCriticalSection(&(pEnumTable->CriticalSection)); ASSERT(NT_SUCCESS(Status)); #if DBG if ( !NT_SUCCESS( Status ) ) { DbgPrint( "WINREG: RtlDeleteCriticalSection() in EnumTableClear() failed. Status = %lx \n", Status ); } #endif } #if defined(_REGCLASS_ENUMTABLE_INSTRUMENTED_) if (!fThisThreadOnly) { DbgPrint("WINREG: EnumTableClear() deleted %d unfreed states.\n", cOrphanedStates); DbgPrint("WINREG: If the number of unfreed states is > 1, either the\n" "WINREG: process terminated a thread with TerminateThread, the process\n" "WINREG: didn't close all registry handles before exiting,\n" "WINREG: or there's a winreg bug in the classes enumeration code\n"); } #endif // _REGCLASS_ENUMTABLE_INSTRUMENTED_ return Status; } NTSTATUS EnumTableFindKeyState( EnumTable* pEnumTable, HKEY hKey, EnumState** ppEnumState) /*++ Routine Description: Searches for the state for a registry key in an enumeration table Arguments: pEnumTable - table in which to search hKey - key for whose state we're searching ppEnumState - out param for result of search Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { KeyStateList* pStateList; pStateList = (KeyStateList*) StateObjectListFind( &(pEnumTable->ThreadEnumList), ULongToPtr((const unsigned long)GetCurrentThreadId())); if (!pStateList) { return STATUS_OBJECT_NAME_NOT_FOUND; } else { *ppEnumState = (EnumState*) StateObjectListFind( (StateObjectList*) pStateList, hKey); if (!*ppEnumState) { return STATUS_OBJECT_NAME_NOT_FOUND; } } return STATUS_SUCCESS; } NTSTATUS EnumTableAddKey( EnumTable* pEnumTable, HKEY hKey, DWORD dwFirstSubKey, EnumState** ppEnumState, EnumState** ppRootState) /*++ Routine Description: Adds an enumeration state to an enumeration table for a given key. Arguments: pEnumTable - table in which to add state hKey - key for whom we want to add state dwFirstSubKey - index of first subkey requested by caller for enumeration ppEnumState - out param for result of search or add Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { EnumState* pEnumState; KeyStateList* pStateList; NTSTATUS Status; pEnumState = NULL; // // Announce that this dll has thread state so it will // be properly cleaned up by dll thread detach routines // gbDllHasThreadState = TRUE; pStateList = (KeyStateList*) StateObjectListFind( (StateObjectList*) &(pEnumTable->ThreadEnumList), ULongToPtr((const unsigned long)GetCurrentThreadId())); if (!pStateList) { pStateList = RegClassHeapAlloc(sizeof(*pStateList)); if (!pStateList) { return STATUS_NO_MEMORY; } KeyStateListInit(pStateList); StateObjectListAdd( &(pEnumTable->ThreadEnumList), (StateObject*) pStateList); } pEnumState = RegClassHeapAlloc(sizeof(*pEnumState)); if (!pEnumState) { return STATUS_NO_MEMORY; } RtlZeroMemory(pEnumState, sizeof(*pEnumState)); { SKeySemantics keyinfo; UNICODE_STRING EmptyString = {0, 0, 0}; BYTE rgNameBuf[REG_MAX_CLASSKEY_LEN + REG_CHAR_SIZE + sizeof(KEY_NAME_INFORMATION)]; // // Set buffer to store info about this key // RtlZeroMemory(&keyinfo, sizeof(keyinfo)); keyinfo._pFullPath = (PKEY_NAME_INFORMATION) rgNameBuf; keyinfo._cbFullPath = sizeof(rgNameBuf); keyinfo._fAllocedNameBuf = FALSE; // // get information about this key // Status = BaseRegGetKeySemantics(hKey, &EmptyString, &keyinfo); if (!NT_SUCCESS(Status)) { goto error_exit; } // // initialize the empty spot // Status = EnumStateInit( pEnumState, hKey, dwFirstSubKey, dwFirstSubKey ? ENUM_DIRECTION_BACKWARD : ENUM_DIRECTION_FORWARD, &keyinfo); BaseRegReleaseKeySemantics(&keyinfo); if (!NT_SUCCESS(Status)) { goto error_exit; } if (IsRootKey(&keyinfo)) { NTSTATUS RootStatus; // // If this fails, it is not fatal -- it just means // we may miss out on an optimization. This can // fail due to out of memory, so it is possible // that it may fail and we would still want to continue // RootStatus = EnumTableGetRootState(pEnumTable, ppRootState); #if DBG if (!NT_SUCCESS(RootStatus)) { DbgPrint( "WINREG: EnumTableAddKey failed to get classes root state. Status = %lx \n", RootStatus ); } #endif // DBG if (NT_SUCCESS(RootStatus)) { RootStatus = EnumStateCopy( pEnumState, *ppRootState); #if DBG if (!NT_SUCCESS(RootStatus)) { DbgPrint( "WINREG: EnumTableAddKey failed to copy key state. Status = %lx \n", RootStatus ); } #endif // DBG } } } // // set the out parameter for the caller // *ppEnumState = pEnumState; StateObjectListAdd( (StateObjectList*) pStateList, (StateObject*) pEnumState); Status = STATUS_SUCCESS; error_exit: if (!NT_SUCCESS(Status) && pEnumState) { RegClassHeapFree(pEnumState); } return Status; } NTSTATUS EnumTableRemoveKey( EnumTable* pEnumTable, HKEY hKey, DWORD dwCriteria) /*++ Routine Description: remove an enumeration state from an enumeration table for a given key. Arguments: pEnumTable - table in which to remove state hKey - key whose state we wish to remove dwCriteria - if this is ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD, the enumeration state for hkey is removed for this thread only. If it is ENUM_TABLE_REMOVEKEY_CRITERIA_ANYTHREAD, the enumeration state for hkey is removed for all threads in the process. Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { KeyStateList* pStateList; EnumState* pEnumState; BOOL fThisThreadOnly; NTSTATUS Status; // // Protect ourselves while modifying the table // Status = RtlEnterCriticalSection(&(pEnumTable->CriticalSection)); ASSERT( NT_SUCCESS( Status ) ); if ( !NT_SUCCESS( Status ) ) { #if DBG DbgPrint( "WINREG: RtlEnterCriticalSection() in EnumTableRemoveKey() failed. Status = %lx \n", Status ); #endif return Status; } Status = STATUS_OBJECT_NAME_NOT_FOUND; fThisThreadOnly = (ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD == dwCriteria); { KeyStateList* pNext; pNext = NULL; for (pStateList = (KeyStateList*) (pEnumTable->ThreadEnumList.pHead); pStateList != NULL; pStateList = NULL) { EnumState* pEnumState; if (fThisThreadOnly) { pStateList = (KeyStateList*) StateObjectListFind( (StateObjectList*) &(pEnumTable->ThreadEnumList), ULongToPtr((const unsigned long)GetCurrentThreadId())); if (!pStateList) { break; } } else { pNext = (KeyStateList*) (pStateList->Object.Links.Flink); } pEnumState = (EnumState*) StateObjectListRemove( (StateObjectList*) pStateList, hKey); if (pEnumState) { Status = STATUS_SUCCESS; EnumStateDestroy((StateObject*) pEnumState); // // Note the state list might be empty for a given thread, // but we will not destroy this list in order to avoid // excessive heap calls // } } } // // It's safe to unlock the table // Status = RtlLeaveCriticalSection(&(pEnumTable->CriticalSection)); ASSERT( NT_SUCCESS( Status ) ); #if DBG if ( !NT_SUCCESS( Status ) ) { DbgPrint( "WINREG: RtlLeaveCriticalSection() in EnumTableRemoveKey() failed. Status = %lx \n", Status ); } #endif return Status; } NTSTATUS EnumTableGetNextEnum( EnumTable* pEnumTable, HKEY hKey, DWORD dwSubkey, KEY_INFORMATION_CLASS KeyInformationClass, PVOID pKeyInfo, DWORD cbKeyInfo, LPDWORD pcbKeyInfo) /*++ Routine Description: Gets the next enumerated subkey for a particular subkey Arguments: pEnumTable - table that holds state of registry key enumerations hKey - key for whom we want to add state dwSubKey - index of subkey requested by caller for enumeration KeyInformationClass - the type of key information data requested by caller pKeyInfo - out param -- buffer for key information data for caller cbKeyInfo - size of pKeyInfo buffer pcbKeyInfo - out param -- size of key information returned to caller Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { EnumState* pEnumState; EnumState* pRootState; NTSTATUS Status; BOOL fFreeState; // // Protect ourselves while we enumerate // Status = RtlEnterCriticalSection(&(pEnumTable->CriticalSection)); // // Very big -- unlikely to happen unless there's a runaway enumeration // due to a bug in this module. // // ASSERT(dwSubkey < 16383); ASSERT( NT_SUCCESS( Status ) ); if ( !NT_SUCCESS( Status ) ) { #if DBG DbgPrint( "WINREG: RtlEnterCriticalSection() in EnumTableGetNextENUm() failed. Status = %lx \n", Status ); #endif return Status; } // // Find the enumeration state for the requested key. Note that even if this // function fails to find an existing state, which case it returns a failure code // it can still return an empty pEnumState for that hKey so it can be added later // Status = EnumTableGetKeyState(pEnumTable, hKey, dwSubkey, &pEnumState, &pRootState, pcbKeyInfo); if (!NT_SUCCESS(Status) || !pEnumState) { goto cleanup; } // // We have a state for this key, now we can use it to enumerate the next key // Status = EnumStateGetNextEnum(pEnumState, dwSubkey, KeyInformationClass, pKeyInfo, cbKeyInfo, pcbKeyInfo, &fFreeState); // // Below is an optimization for apps that enumerate HKEY_CLASSES_ROOT but close the handle and reopen it each // time before they call the registry enumeration api. This is a very bad way to use the api (that's two extra // kernel calls for the open and close per enumeration), but existing applications do this and // without the optimization, their enumeration times can go from 3 seconds to 1 or more minutes. With this optimization, // the time gets back down to a few seconds. This happened because we lost state after the close -- when the new // key was opened, we had to call the kernel to enumerate all the keys up to the requested index since we had no // previous state to go by -- this ends up making the entire enumeration an O(n^2) operation instead of O(n) as it // had been when callers didn't close the key during the enumeration. Here, n is a kernel trap to enumerate a key. // // // Above, we retrieved an enumeration state for the root of classes -- this state reflects the enumeration state // of the last handle that was used to enumerate the root on this thread. This way, when a new handle is opened // to enumerate the root, we start with this state which will most likely be right at the index before the requested // index. Instead of making i calls to NtEnumerateKey where i is the index of enumeration requested by the caller, // we make 1 or at most 2 calls. // // // Here, we update the root state to match the recently enumerated state. Note that this only happens // if the key being enumerated refers to HKEY_CLASSES_ROOT since pRootState is only non-NULL in this // case. // if (pRootState) { EnumTableUpdateRootState(pEnumTable, pRootState, pEnumState, fFreeState); } if (fFreeState) { NTSTATUS RemoveStatus; // // For whatever reason, we've been told to free the enumeration state for this key. // This could be due to an error, or it could be a normal situation such as reaching // the end of an enumeration. // RemoveStatus = EnumTableRemoveKey(pEnumTable, hKey, ENUM_TABLE_REMOVEKEY_CRITERIA_THISTHREAD); ASSERT(NT_SUCCESS(RemoveStatus)); } cleanup: // // It's safe to unlock the table now. // { NTSTATUS CriticalSectionStatus; CriticalSectionStatus = RtlLeaveCriticalSection(&(pEnumTable->CriticalSection)); ASSERT( NT_SUCCESS( CriticalSectionStatus ) ); #if DBG if ( !NT_SUCCESS( CriticalSectionStatus ) ) { DbgPrint( "WINREG: RtlLeaveCriticalSection() in EnumTableGetNextEnum() failed. Status = %lx \n", CriticalSectionStatus ); } #endif } return Status; } NTSTATUS EnumTableGetKeyState( EnumTable* pEnumTable, HKEY hKey, DWORD dwSubkey, EnumState** ppEnumState, EnumState** ppRootState, LPDWORD pcbKeyInfo) /*++ Routine Description: Finds a key state for hKey -- creates a new state for hkey if there is no existing state Arguments: pEnumTable - enumeration table in which to find key's state hKey - handle to registry key for which to find state dwSubkey - subkey that we're trying to enumerate -- needed in case we need to create a new state ppEnumState - pointer to where we should return address of the retrieved state, ppRootState - if the retrieved state is the root of the classes tree, this address will point to a known state for the root that's good across all hkey's enumerated on this thread pcbKeyInfo - stores size of key information on return Return Value: STATUS_SUCCESS for success, other error code on error Notes: --*/ { NTSTATUS Status; if (ppRootState) { *ppRootState = NULL; } // // Find the enumeration state for the requested key. Note that even if this // function fails to find an existing state, in which case it returns a failure code // it can still return an empty pEnumState for that hKey so it can be added later // Status = EnumTableFindKeyState(pEnumTable, hKey, ppEnumState); if (!NT_SUCCESS(Status)) { if (STATUS_OBJECT_NAME_NOT_FOUND == Status) { // // This means the key didn't exist, already, so we'll add it // Status = EnumTableAddKey(pEnumTable, hKey, dwSubkey, ppEnumState, ppRootState); if (!NT_SUCCESS(Status)) { return Status; } // // The above function can succeed but return a NULL pEnumState -- this // happens if it turns out this key is not a "special key" -- i.e. this key's // parents exist in only one hive, not two, so we don't need to do anything here // and regular enumeration will suffice. // if (!(*ppEnumState)) { // // We set this value to let our caller know that this isn't a class key // *pcbKeyInfo = 0; } } } else { if ((*ppEnumState)->fClassesRoot) { Status = EnumTableGetRootState(pEnumTable, ppRootState); } } return Status; } NTSTATUS EnumTableGetRootState( EnumTable* pEnumTable, EnumState** ppRootState) /*++ Routine Description: Arguments: pEnumTable - enumeration table in which to find the root state ppRootState - points to address of root state on return Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { DWORD cbKeyInfo; KeyStateList* pStateList; // // We assume the caller has made sure that a state list // for this thread exists -- this should never, ever fail // pStateList = (KeyStateList*) StateObjectListFind( &(pEnumTable->ThreadEnumList), ULongToPtr((const unsigned long)GetCurrentThreadId())); ASSERT(pStateList); *ppRootState = &(pStateList->RootState); return STATUS_SUCCESS; } void EnumTableUpdateRootState( EnumTable* pEnumTable, EnumState* pRootState, EnumState* pEnumState, BOOL fResetState) /*++ Routine Description: Updates the state of the classes root for this thread -- this allows us to optimize for apps that close handles when enumerating hkcr -- we use this classes root state when no existing state is found for an hkey that refers to hkcr, and we update this state after enumerating an hkcr key on this thread so that it will be up to date. Arguments: pEnumTable - enumeration table in which the classes root state resides pRootState - classes root state that should be updated ppEnumState - state that contains the data with which pRootState should be updated fResetState - if TRUE, this flag means we should not update the root state with pEnumState's data, just reset it. If FALSE, we update the root with pEnumState's data. Return Value: None. Notes: --*/ { NTSTATUS Status; // // See if we need to merely reset the root or actually // update it with another state // if (!fResetState) { // // Don't reset -- copy over the state from pEnumState to the // root state -- the root's state will be the same as pEnumState's // after this copy // Status = EnumStateCopy(pRootState, pEnumState); } else { // // Just clear out the state -- caller didn't request that we // use pEnumState. // Status = EnumStateInit( pRootState, 0, 0, ENUM_DIRECTION_FORWARD, NULL); } // // If there's a failure, it must be out-of-memory, so we should get rid // of this state since we can't make it accurately reflect the true // enumeration state // if (!NT_SUCCESS(Status)) { #if DBG DbgPrint( "WINREG: failure in UpdateRootState. Status = %lx \n", Status ); #endif ASSERT(STATUS_NO_MEMORY == Status); EnumStateClear(pRootState); } } VOID KeyStateListInit(KeyStateList* pStateList) /*++ Routine Description: Initializes a state list Arguments: pObject -- pointer to KeyStateList object to destroy Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { // // First initialize the base object // StateObjectListInit((StateObjectList*) pStateList, ULongToPtr((const unsigned long)GetCurrentThreadId())); // // Now do KeyStateList specific init // (void) EnumStateInit( &(pStateList->RootState), NULL, 0, ENUM_DIRECTION_FORWARD, NULL); } VOID KeyStateListDestroy(StateObject* pObject) /*++ Routine Description: Destroys an KeyStateList, freeing its resources such as memory or kernel object handles Arguments: pObject -- pointer to KeyStateList object to destroy Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { KeyStateList* pThisList; pThisList = (KeyStateList*) pObject; // // Destroy all states in this list // StateObjectListClear( (StateObjectList*) pObject, EnumStateDestroy); // // Free resources associated with the root state // EnumStateClear(&(pThisList->RootState)); // // Free the data structure for this object // RegClassHeapFree(pThisList); } NTSTATUS EnumStateInit( EnumState* pEnumState, HKEY hKey, DWORD dwFirstSubKey, DWORD dwDirection, SKeySemantics* pKeySemantics) /*++ Routine Description: Initializes enumeration state Arguments: pEnumState - enumeration state to initialize hKey - registry key to which this state refers dwFirstSubKey - index of the first subkey which this state will enumerate dwDirection - direction through which we should enumerate -- either ENUM_DIRECTION_FORWARD or ENUM_DIRECTION_BACKWARD pKeySemantics - structure containing information about hKey Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. --*/ { NTSTATUS Status; ULONG cMachineKeys; ULONG cUserKeys; HKEY hkOther; ASSERT((ENUM_DIRECTION_FORWARD == dwDirection) || (ENUM_DIRECTION_BACKWARD == dwDirection) || (ENUM_DIRECTION_IGNORE == dwDirection)); ASSERT((ENUM_DIRECTION_IGNORE == dwDirection) ? hKey == NULL : TRUE); Status = STATUS_SUCCESS; hkOther = NULL; // // If no hkey is specified, this is an init of a blank enum // state, so clear everything // if (!hKey) { memset(pEnumState, 0, sizeof(*pEnumState)); } // // Clear each subtree // EnumSubtreeStateClear(&(pEnumState->UserState)); EnumSubtreeStateClear(&(pEnumState->MachineState)); // // Reset each subtree // pEnumState->UserState.Finished = FALSE; pEnumState->MachineState.Finished = FALSE; pEnumState->UserState.iSubKey = 0; pEnumState->MachineState.iSubKey = 0; cUserKeys = 0; cMachineKeys = 0; if (pKeySemantics) { StateObjectInit((StateObject*) &(pEnumState->Object), hKey); } if (hKey) { if (pKeySemantics) { pEnumState->fClassesRoot = IsRootKey(pKeySemantics); } // // open the other key if we have enough info to do so -- // if (pKeySemantics) { // // Remember, only one of the handles returned below // is new -- the other is simply hKey // Status = BaseRegGetUserAndMachineClass( pKeySemantics, hKey, MAXIMUM_ALLOWED, &(pEnumState->hkMachineKey), &(pEnumState->hkUserKey)); if (!NT_SUCCESS(Status)) { return Status; } } // // for backwards enumerations // if (ENUM_DIRECTION_BACKWARD == dwDirection) { ULONG cMachineKeys; ULONG cUserKeys; HKEY hkUser; HKEY hkMachine; cMachineKeys = 0; cUserKeys = 0; hkMachine = pEnumState->hkMachineKey; hkUser = pEnumState->hkUserKey; // // In order to query for subkey counts, we should // to get a new handle since the caller supplied handle // may not have enough permissions // { HKEY hkSource; HANDLE hCurrentProcess; hCurrentProcess = NtCurrentProcess(); hkSource = (hkMachine == hKey) ? hkMachine : hkUser; Status = NtDuplicateObject( hCurrentProcess, hkSource, hCurrentProcess, &hkOther, KEY_QUERY_VALUE, FALSE, 0); if (!NT_SUCCESS(Status)) { goto error_exit; } if (hkSource == hkUser) { hkUser = hkOther; } else { hkMachine = hkOther; } } // // find new start -- query for index of last subkey in // each hive // if (hkMachine) { Status = GetSubKeyCount(hkMachine, &cMachineKeys); if (!NT_SUCCESS(Status)) { goto error_exit; } } if (hkUser) { Status = GetSubKeyCount(hkUser, &cUserKeys); if (!NT_SUCCESS(Status)) { goto error_exit; } } // // If either subtree has no subkeys, we're done enumerating that // subtree // if (!cUserKeys) { pEnumState->UserState.Finished = TRUE; } else { pEnumState->UserState.iSubKey = cUserKeys - 1; } if (!cMachineKeys) { pEnumState->MachineState.Finished = TRUE; } else { pEnumState->MachineState.iSubKey = cMachineKeys - 1; } } } // // Set members of this structure // pEnumState->dwThreadId = GetCurrentThreadId(); pEnumState->Direction = dwDirection; pEnumState->dwLastRequest = dwFirstSubKey; pEnumState->LastLocation = ENUM_LOCATION_NONE; pEnumState->hKey = hKey; error_exit: if (!NT_SUCCESS(Status)) { EnumSubtreeStateClear(&(pEnumState->MachineState)); EnumSubtreeStateClear(&(pEnumState->UserState)); } if (hkOther) { NtClose(hkOther); } return Status; } NTSTATUS EnumStateGetNextEnum( EnumState* pEnumState, DWORD dwSubKey, KEY_INFORMATION_CLASS KeyInformationClass, PVOID pKeyInfo, DWORD cbKeyInfo, LPDWORD pcbKeyInfo, BOOL* pfFreeState) /*++ Routine Description: Gets the next key in an enumeration based on the current state. Arguments: pEnumState - enumeration state on which to base our search for the next key dwSubKey - index of key to enumerate KeyInformationClass - enum for what sort of information to retrieve in the enumeration -- Basic Information or Node Information pKeyInfo - location to store retrieved data for caller cbKeyInfo - size of caller's info buffer pcbKeyInfo - size of data this function writes to buffer on return. pfFreeState - out param -- if set to TRUE, caller should free pEnumState. Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: This function essentially enumerates from the previous index requested by the caller of RegEnumKeyEx to the new one. In most cases, this just means one trip to the kernel -- i.e. if a caller goes from index 2 to 3, or from 3 to 2, this is one trip to the kernel. However, if the caller goes from 2 to 5, we'll have to do several enumerations on the way from 2 to 5. Also, if the caller switches direction (i.e. starts off 0,1,2,3 and then requests 1), a large penalty may be incurred. When switching from ascending to descending, we have to enumerate all keys to the end and then before we can then enumerate down to the caller's requested index. Switching from descending to ascending is less expensive -- we know that the beginning is at 0 for both user and machine keys, so we can simply set our indices to 0 without enumerating anything. However, we must then enumerate to the caller's requested index. Note that for all descending enumerations, we must enumerate all the way to the end first before returning anything to the caller. --*/ { NTSTATUS Status; LONG lIncrement; DWORD dwStart; DWORD dwLimit; EnumSubtreeState* pTreeState; // // If anything bad happens, this state should be freed // *pfFreeState = TRUE; // // Find out the limits (start, finish, increment) for // our enumeration. The increment is either 1 or -1, // depending on whether this is an ascending or descending // enumeration. EnumStateSetLimits will take into account // any changes in direction and set dwStart and dwLimit // accordingly. // Status = EnumStateSetLimits( pEnumState, dwSubKey, &dwStart, &dwLimit, &lIncrement); if (!NT_SUCCESS(Status)) { return Status; } // // Get the next enum to give back to the caller // Status = EnumStateChooseNext( pEnumState, dwSubKey, dwStart, dwLimit, lIncrement, &pTreeState); if (!NT_SUCCESS(Status)) { return Status; } // // We have retrieved information, so we should // not free this state // if (!(pEnumState->UserState.Finished && pEnumState->MachineState.Finished)) { *pfFreeState = FALSE; } // // Remember the last key we enumerated // pEnumState->dwLastRequest = dwSubKey; // // Copy the retrieved information to the user's // buffer. // Status = EnumSubtreeStateCopyKeyInfo( pTreeState, KeyInformationClass, pKeyInfo, cbKeyInfo, pcbKeyInfo); // // The copy could fail if the user's buffer isn't big enough -- // if it succeeds, clear the name information for the subkey from // which we retrieved the data so that the next time we're called // we'll get the next subkey for that subtree. // if (NT_SUCCESS(Status)) { EnumSubtreeStateClear(pTreeState); } return Status; } NTSTATUS EnumStateSetLimits( EnumState* pEnumState, DWORD dwSubKey, LPDWORD pdwStart, LPDWORD pdwLimit, PLONG plIncrement) /*++ Routine Description: Gets the limits (start, finish, increment) for enumerating a given subkey index Arguments: pEnumState - enumeration state on which to base our limits dwSubKey - index of key which caller wants enumerated pdwStart - out param -- result is the place at which to start enumerating in order to find dwSubKey pdwLimit - out param -- result is the place at which to stop enumerating when looking for dwSubKey plIncrement - out param -- increment to use for enumeration. It will be set to 1 if the enumeration is upward (0,1,2...) or -1 if it is downard (3,2,1,...). Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { LONG lNewIncrement; NTSTATUS Status; BOOL fSameKey; // // set our increment to the direction which our state remembers // *plIncrement = pEnumState->Direction == ENUM_DIRECTION_FORWARD ? 1 : -1; fSameKey = FALSE; // // Figure out what the new direction should be // This is done by comparing the current request // with the last request. // if (dwSubKey > pEnumState->dwLastRequest) { lNewIncrement = 1; } else if (dwSubKey < pEnumState->dwLastRequest) { lNewIncrement = -1; } else { // // We are enumerating a key that may already // have been enumerated // fSameKey = TRUE; lNewIncrement = *plIncrement; } // // See if we've changed direction // if (lNewIncrement != *plIncrement) { // // If so, we should throw away all existing state and start from scratch // Status = EnumStateInit( pEnumState, pEnumState->hKey, (-1 == lNewIncrement) ? dwSubKey : 0, (-1 == lNewIncrement) ? ENUM_DIRECTION_BACKWARD : ENUM_DIRECTION_FORWARD, NULL); if (!NT_SUCCESS(Status)) { return Status; } } // // By default, we start enumerating where we left off // *pdwStart = pEnumState->dwLastRequest; // // for state for which we have previously enumerated a key // if (ENUM_LOCATION_NONE != pEnumState->LastLocation) { // // We're going in the same direction as on the // previous call. We should start // one past our previous position. Note that we // only start there if this is a different key -- // if we've already enumerated it we start at the // same spot. // if (!fSameKey) { *pdwStart += *plIncrement; } else { // // If we're being asked for the same index // multiple times they're probably deleting // keys -- we should reset ourselves to // the beginning so their enum will hit // all the keys // // // We're starting at zero, so set ourselves // to start at the beginning // Status = EnumStateInit( pEnumState, pEnumState->hKey, 0, ENUM_DIRECTION_FORWARD, NULL); if (!NT_SUCCESS(Status)) { return Status; } *plIncrement = 1; pEnumState->Direction = ENUM_DIRECTION_FORWARD; *pdwStart = 0; } } else { // // No previous calls were made for this state // if (ENUM_DIRECTION_BACKWARD == pEnumState->Direction) { // // For backwards enumerations, we want to get an // accurate count of total subkeys and start there // Status = ClassKeyCountSubKeys( pEnumState->hKey, pEnumState->hkUserKey, pEnumState->hkMachineKey, 0, pdwStart); if (!NT_SUCCESS(Status)) { return Status; } // // Make sure we don't go past the end // if (dwSubKey >= *pdwStart) { return STATUS_NO_MORE_ENTRIES; } // // This is a zero-based index, so to // put our start at the very end we must // be one less than the number of keys // (*pdwStart)--; *plIncrement = -1; } else { *plIncrement = 1; } } // // Set limit to be one past requested subkey // *pdwLimit = dwSubKey + *plIncrement; return STATUS_SUCCESS; } NTSTATUS EnumStateChooseNext( EnumState* pEnumState, DWORD dwSubKey, DWORD dwStart, DWORD dwLimit, LONG lIncrement, EnumSubtreeState** ppTreeState) /*++ Routine Description: Iterates through registry keys to get the key requested by the caller Arguments: pEnumState - enumeration state on which to base our search dwSubKey - index of key which caller wants enumerated dwStart - The place at which to start enumerating in order to find dwSubKey dwLimit - The place at which to stop enumerating when looking for dwSubKey lIncrement - Increment to use for enumeration. It will be set to 1 if the enumeration is upward (0,1,2...) or -1 if it is downard (3,2,1,...). ppTreeState - out param -- pointer to address of subtree state in which this regkey was found -- each EnumState has two EnumSubtreeState's -- one for user and one for machine. Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { DWORD iCurrent; NTSTATUS Status; BOOL fClearLast; Status = STATUS_NO_MORE_ENTRIES; fClearLast = FALSE; // // We will now iterate from dwStart to dwLimit so that we can find the key // requested by the caller // for (iCurrent = dwStart; iCurrent != dwLimit; iCurrent += lIncrement) { BOOL fFoundKey; BOOL fIgnoreFailure; fFoundKey = FALSE; fIgnoreFailure = FALSE; Status = STATUS_NO_MORE_ENTRIES; // // Clear last subtree // if (fClearLast) { EnumSubtreeStateClear(*ppTreeState); } // // if key names aren't present, alloc space and get names // if (pEnumState->hkUserKey) { if (pEnumState->UserState.pKeyInfo) { fFoundKey = TRUE; } else if (!(pEnumState->UserState.Finished)) { // get user key info Status = EnumClassKey( pEnumState->hkUserKey, &(pEnumState->UserState)); fFoundKey = NT_SUCCESS(Status); // // If there are no more subkeys for this subtree, // mark it as finished // if (!NT_SUCCESS(Status)) { if (STATUS_NO_MORE_ENTRIES != Status) { return Status; } if (lIncrement > 0) { pEnumState->UserState.Finished = TRUE; } else { pEnumState->UserState.iSubKey += lIncrement; fIgnoreFailure = TRUE; } } } } if (pEnumState->hkMachineKey) { if (pEnumState->MachineState.pKeyInfo) { fFoundKey = TRUE; } else if (!(pEnumState->MachineState.Finished)) { // get machine key info Status = EnumClassKey( pEnumState->hkMachineKey, &(pEnumState->MachineState)); // // If there are no more subkeys for this subtree, // mark it as finished // if (NT_SUCCESS(Status)) { fFoundKey = TRUE; } else if (STATUS_NO_MORE_ENTRIES == Status) { if (lIncrement > 0) { pEnumState->MachineState.Finished = TRUE; } else { pEnumState->MachineState.iSubKey += lIncrement; fIgnoreFailure = TRUE; } } } } // // If we found no keys in either user or machine locations, there are // no more keys. // if (!fFoundKey) { // // For descending enumerations, we ignore STATUS_NO_MORE_ENTRIES // and keep going until we find one. // if (fIgnoreFailure) { continue; } return Status; } // // If we already hit the bottom, skip to the end // if ((pEnumState->UserState.iSubKey == 0) && (pEnumState->MachineState.iSubKey == 0) && (lIncrement < 0)) { iCurrent = dwLimit - lIncrement; } // // Now we need to choose between keys in the machine hive and user hives -- // this call will choose which key to use. // Status = EnumStateCompareSubtrees(pEnumState, lIncrement, ppTreeState); if (!NT_SUCCESS(Status)) { pEnumState->dwLastRequest = dwSubKey; return Status; } fClearLast = TRUE; } return Status; } NTSTATUS EnumStateCompareSubtrees( EnumState* pEnumState, LONG lIncrement, EnumSubtreeState** ppSubtree) /*++ Routine Description: Compares the user and machine subtrees of an enumeration state to see which of the two current keys in each hive should be returned as the next key in an enumeration Arguments: pEnumState - enumeration state on which to base our search lIncrement - Increment to use for enumeration. It will be set to 1 if the enumeration is upward (0,1,2...) or -1 if it is downard (3,2,1,...). ppSubtree - out param -- pointer to address of subtree state where key was found -- the name of the key can be extracted from it. Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { // // If both subtrees have a current subkey name, we'll need to compare // the names // if (pEnumState->MachineState.pKeyInfo && pEnumState->UserState.pKeyInfo) { UNICODE_STRING MachineKeyName; UNICODE_STRING UserKeyName; LONG lCompareResult; MachineKeyName.Buffer = pEnumState->MachineState.pKeyInfo->Name; MachineKeyName.Length = (USHORT) pEnumState->MachineState.pKeyInfo->NameLength; UserKeyName.Buffer = pEnumState->UserState.pKeyInfo->Name; UserKeyName.Length = (USHORT) pEnumState->UserState.pKeyInfo->NameLength; // // Do the comparison // lCompareResult = RtlCompareUnicodeString(&UserKeyName, &MachineKeyName, TRUE) * lIncrement; // // User wins comparison // if (lCompareResult < 0) { // choose user *ppSubtree = &(pEnumState->UserState); pEnumState->LastLocation = ENUM_LOCATION_USER; } else if (lCompareResult > 0) { // // Machine wins choose machine // *ppSubtree = &(pEnumState->MachineState); pEnumState->LastLocation = ENUM_LOCATION_MACHINE; } else { // // Comparison returned equality -- the keys have the same // name. This means the same key name exists in both machine and // user, so we need to make a choice about which one we will enumerate. // Policy for per-user class registration enumeration is to choose user, just // as we do for other api's such as RegOpenKeyEx and RegCreateKeyEx. // if (!((pEnumState->MachineState.iSubKey == 0) && (lIncrement < 0))) { pEnumState->MachineState.iSubKey += lIncrement; } else { pEnumState->MachineState.Finished = TRUE; } // // Clear the machine state and move it to the next index -- we don't // have to clear the user state yet because the state of whichever subtree // was selected is cleared down below // EnumSubtreeStateClear(&(pEnumState->MachineState)); pEnumState->LastLocation = ENUM_LOCATION_USER; *ppSubtree = &(pEnumState->UserState); } } else if (!(pEnumState->UserState.pKeyInfo) && !(pEnumState->MachineState.pKeyInfo)) { // // Neither subtree state has a subkey, so there are no subkeys // return STATUS_NO_MORE_ENTRIES; } else if (pEnumState->MachineState.pKeyInfo) { // // Only machine has a subkey // *ppSubtree = &(pEnumState->MachineState); pEnumState->LastLocation = ENUM_LOCATION_MACHINE; } else { // // only user has a subkey // *ppSubtree = &(pEnumState->UserState); pEnumState->LastLocation = ENUM_LOCATION_USER; } // // change the state of the subtree which we selected // if (!(((*ppSubtree)->iSubKey == 0) && (lIncrement < 0))) { (*ppSubtree)->iSubKey += lIncrement; } else { (*ppSubtree)->Finished = TRUE; } return STATUS_SUCCESS; } void EnumStateDestroy(StateObject* pObject) { EnumStateClear((EnumState*) pObject); RegClassHeapFree(pObject); } VOID EnumStateClear(EnumState* pEnumState) /*++ Routine Description: Clears the enumeration state Arguments: pEnumState - enumeration state to clear Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { // // Close an existing reference to a second key // if (pEnumState->hkMachineKey && (pEnumState->hKey != pEnumState->hkMachineKey)) { NtClose(pEnumState->hkMachineKey); } else if (pEnumState->hkUserKey && (pEnumState->hKey != pEnumState->hkUserKey)) { NtClose(pEnumState->hkUserKey); } // // Free any heap memory held by our subtrees // EnumSubtreeStateClear(&(pEnumState->UserState)); EnumSubtreeStateClear(&(pEnumState->MachineState)); // // reset everything in this state // memset(pEnumState, 0, sizeof(*pEnumState)); } BOOL EnumStateIsEmpty(EnumState* pEnumState) /*++ Routine Description: Returns whether or not an enumeration state is empty. An enumeration state is empty if it is not associated with any particular registry key handle Arguments: pEnumState - enumeration state to clear Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { return pEnumState->hKey == NULL; } NTSTATUS EnumStateCopy( EnumState* pDestState, EnumState* pEnumState) /*++ Routine Description: Copies an enumeration state for one hkey to the state for another hkey -- note that it the does not change the hkey referred to by the destination state, it just makes pDestState->hKey's state the same as pEnumState's Arguments: pDestState - enumeration state which is destination of the copy pEnumState - source enumeration for the copy Return Value: STATUS_SUCCESS for success, other error code on error Notes: --*/ { NTSTATUS Status; PKEY_NODE_INFORMATION pKeyInfoUser; PKEY_NODE_INFORMATION pKeyInfoMachine; Status = STATUS_SUCCESS; // // Copy simple data // pDestState->Direction = pEnumState->Direction; pDestState->LastLocation = pEnumState->LastLocation; pDestState->dwLastRequest = pEnumState->dwLastRequest; pDestState->dwThreadId = pEnumState->dwThreadId; // // Free existing data before we overwrite it -- note that the pKeyInfo can point to a fixed buffer inside the state or // a heap allocated buffer, so we must see which one it points to before we decide to free it // if (pDestState->UserState.pKeyInfo && (pDestState->UserState.pKeyInfo != (PKEY_NODE_INFORMATION) pDestState->UserState.KeyInfoBuffer)) { RegClassHeapFree(pDestState->UserState.pKeyInfo); pDestState->UserState.pKeyInfo = NULL; } if (pDestState->MachineState.pKeyInfo && (pDestState->MachineState.pKeyInfo != (PKEY_NODE_INFORMATION) pDestState->MachineState.KeyInfoBuffer)) { RegClassHeapFree(pDestState->MachineState.pKeyInfo); pDestState->MachineState.pKeyInfo = NULL; } // // easy way to copy states -- we'll have to fix up below though since pKeyInfo can be // self-referential. // memcpy(&(pDestState->UserState), &(pEnumState->UserState), sizeof(pEnumState->UserState)); memcpy(&(pDestState->MachineState), &(pEnumState->MachineState), sizeof(pEnumState->MachineState)); pKeyInfoUser = NULL; pKeyInfoMachine = NULL; // // Copy new data -- as above, keep in mind that pKeyInfo can be self-referential, so check // for that before deciding whether to allocate heap or use the internal fixed buffer of the // structure. // if (pEnumState->UserState.pKeyInfo && ((pEnumState->UserState.pKeyInfo != (PKEY_NODE_INFORMATION) pEnumState->UserState.KeyInfoBuffer))) { pKeyInfoUser = (PKEY_NODE_INFORMATION) RegClassHeapAlloc(pEnumState->UserState.cbKeyInfo); if (!pKeyInfoUser) { Status = STATUS_NO_MEMORY; } pDestState->UserState.pKeyInfo = pKeyInfoUser; RtlCopyMemory(pDestState->UserState.pKeyInfo, pEnumState->UserState.pKeyInfo, pEnumState->UserState.cbKeyInfo); } else { if (pDestState->UserState.pKeyInfo) { pDestState->UserState.pKeyInfo = (PKEY_NODE_INFORMATION) pDestState->UserState.KeyInfoBuffer; } } if (pEnumState->MachineState.pKeyInfo && ((pEnumState->MachineState.pKeyInfo != (PKEY_NODE_INFORMATION) pEnumState->MachineState.KeyInfoBuffer))) { pKeyInfoMachine = (PKEY_NODE_INFORMATION) RegClassHeapAlloc(pEnumState->MachineState.cbKeyInfo); if (!pKeyInfoMachine) { Status = STATUS_NO_MEMORY; } pDestState->MachineState.pKeyInfo = pKeyInfoMachine; RtlCopyMemory(pDestState->MachineState.pKeyInfo, pEnumState->MachineState.pKeyInfo, pEnumState->MachineState.cbKeyInfo); } else { if (pDestState->MachineState.pKeyInfo) { pDestState->MachineState.pKeyInfo = (PKEY_NODE_INFORMATION) pDestState->MachineState.KeyInfoBuffer; } } // // On error, make sure we clean up. // if (!NT_SUCCESS(Status)) { if (pKeyInfoUser) { RegClassHeapFree(pKeyInfoUser); } if (pKeyInfoMachine) { RegClassHeapFree(pKeyInfoMachine); } } return Status; } void EnumSubtreeStateClear(EnumSubtreeState* pTreeState) /*++ Routine Description: This function frees the key data associated with this subtree state Arguments: pTreeState -- tree state to clear Return Value: None. Note: --*/ { // // see if we're using pre-alloced buffer -- if not, free it // if (pTreeState->pKeyInfo && (((LPBYTE) pTreeState->pKeyInfo) != pTreeState->KeyInfoBuffer)) { RegClassHeapFree(pTreeState->pKeyInfo); } pTreeState->pKeyInfo = NULL; } NTSTATUS EnumSubtreeStateCopyKeyInfo( EnumSubtreeState* pTreeState, KEY_INFORMATION_CLASS KeyInformationClass, PVOID pDestKeyInfo, ULONG cbDestKeyInfo, PULONG pcbResult) /*++ Routine Description: Copies information about a key into a buffer supplied by the caller Arguments: pTreeState - subtree tate from which to copy KeyInformationClass - the type of buffer supplied by the caller -- either a KEY_NODE_INFORMATION or KEY_BASIC_INFORMATION structure pDestKeyInfo - caller's buffer for key information cbDestKeyInfo - size of caller's buffer pcbResult - out param -- amount of data to be written to caller's buffer Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { ULONG cbNeeded; ASSERT((KeyInformationClass == KeyNodeInformation) || (KeyInformationClass == KeyBasicInformation)); // // Find out how big the caller's buffer needs to be. This // depends on whether the caller specified full or node information // as well as the size of the variable size members of those // structures // if (KeyNodeInformation == KeyInformationClass) { PKEY_NODE_INFORMATION pNodeInformation; // // Copy fixed length pieces first -- caller expects them to // be set even when the variable length members are not large enough // // // Set ourselves to point to caller's buffer // pNodeInformation = (PKEY_NODE_INFORMATION) pDestKeyInfo; // // Copy all fixed-length pieces of structure // pNodeInformation->LastWriteTime = pTreeState->pKeyInfo->LastWriteTime; pNodeInformation->TitleIndex = pTreeState->pKeyInfo->TitleIndex; pNodeInformation->ClassOffset = pTreeState->pKeyInfo->ClassOffset; pNodeInformation->ClassLength = pTreeState->pKeyInfo->ClassLength; pNodeInformation->NameLength = pTreeState->pKeyInfo->NameLength; // // Take care of the size of the node information structure // cbNeeded = sizeof(KEY_NODE_INFORMATION); if (cbDestKeyInfo < cbNeeded) { return STATUS_BUFFER_TOO_SMALL; } // // Add in the size of the variable length members // cbNeeded += pTreeState->pKeyInfo->NameLength; cbNeeded += pTreeState->pKeyInfo->ClassLength; cbNeeded -= sizeof(WCHAR); // the structure's Name member is already set to 1, // so that one has already been accounted for in // the size of the structure } else { PKEY_BASIC_INFORMATION pBasicInformation; // // Copy fixed length pieces first -- caller expects them to // be set even when the variable length members are not large enough // // // Set ourselves to point to caller's buffer // pBasicInformation = (PKEY_BASIC_INFORMATION) pDestKeyInfo; // // Copy all fixed-length pieces of structure // pBasicInformation->LastWriteTime = pTreeState->pKeyInfo->LastWriteTime; pBasicInformation->TitleIndex = pTreeState->pKeyInfo->TitleIndex; pBasicInformation->NameLength = pTreeState->pKeyInfo->NameLength; cbNeeded = sizeof(KEY_BASIC_INFORMATION); // // Take care of the size of the basic information structure // if (cbDestKeyInfo < cbNeeded) { return STATUS_BUFFER_TOO_SMALL; } // // Add in the size of the variable length members // cbNeeded += pTreeState->pKeyInfo->NameLength; cbNeeded -= sizeof(WCHAR); // the structure's Name member is already set to 1, // so that one has already been accounted for in // the size of the structure } // // Store the amount needed for the caller // *pcbResult = cbNeeded; // // See if the caller supplied enough buffer -- leave if not // if (cbDestKeyInfo < cbNeeded) { return STATUS_BUFFER_OVERFLOW; } // // We copy variable-length information differently depending // on which type of structure was passsed in // if (KeyNodeInformation == KeyInformationClass) { PBYTE pDestClass; PBYTE pSrcClass; PKEY_NODE_INFORMATION pNodeInformation; pNodeInformation = (PKEY_NODE_INFORMATION) pDestKeyInfo; // // Copy variable length pieces such as name and class // RtlCopyMemory(pNodeInformation->Name, pTreeState->pKeyInfo->Name, pTreeState->pKeyInfo->NameLength); // // Only copy the class if it exists // if (((LONG)pTreeState->pKeyInfo->ClassOffset) >= 0) { pDestClass = ((PBYTE) pNodeInformation) + pTreeState->pKeyInfo->ClassOffset; pSrcClass = ((PBYTE) pTreeState->pKeyInfo) + pTreeState->pKeyInfo->ClassOffset; RtlCopyMemory(pDestClass, pSrcClass, pTreeState->pKeyInfo->ClassLength); } } else { PKEY_BASIC_INFORMATION pBasicInformation; // // Set ourselves to point to caller's buffer // pBasicInformation = (PKEY_BASIC_INFORMATION) pDestKeyInfo; // // Copy variable length pieces -- only name is variable length // RtlCopyMemory(pBasicInformation->Name, pTreeState->pKeyInfo->Name, pTreeState->pKeyInfo->NameLength); } return STATUS_SUCCESS; } NTSTATUS EnumClassKey( HKEY hKey, EnumSubtreeState* pTreeState) /*++ Routine Description: Enumerates a subkey for a subtree state -- calls the kernel Arguments: hKey - key we want the kernel to enumerate pTreeState - subtree state -- either a user or machine subtree Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { PKEY_NODE_INFORMATION pCurrentKeyInfo; NTSTATUS Status; ASSERT(!(pTreeState->pKeyInfo)); // // First try to use the buffer built in to the subtree state // pCurrentKeyInfo = (PKEY_NODE_INFORMATION) pTreeState->KeyInfoBuffer; // // Query for the necessary information about the supplied key. // Status = NtEnumerateKey( hKey, pTreeState->iSubKey, KeyNodeInformation, pCurrentKeyInfo, sizeof(pTreeState->KeyInfoBuffer), &(pTreeState->cbKeyInfo)); ASSERT( Status != STATUS_BUFFER_TOO_SMALL ); // // If the subtree state's buffer isn't big enough, we'll have // to ask the heap to give us one. // if (STATUS_BUFFER_OVERFLOW == Status) { pCurrentKeyInfo = RegClassHeapAlloc(pTreeState->cbKeyInfo); // // If the memory allocation fails, return a Registry Status. // if( ! pCurrentKeyInfo ) { return STATUS_NO_MEMORY; } // // Query for the necessary information about the supplied key. // Status = NtEnumerateKey( hKey, pTreeState->iSubKey, KeyNodeInformation, pCurrentKeyInfo, pTreeState->cbKeyInfo, &(pTreeState->cbKeyInfo)); } if (!NT_SUCCESS(Status)) { return Status; } // // set the subtree state's reference to point // to the location of the data // pTreeState->pKeyInfo = pCurrentKeyInfo; return STATUS_SUCCESS; } NTSTATUS GetSubKeyCount( HKEY hkClassKey, LPDWORD pdwUserSubKeys) /*++ Routine Description: Counts the number of subkeys under a key Arguments: hkClassKey - key whose subkeys we wish to count pdwUserSubKeys - out param for number of subkeys Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: --*/ { NTSTATUS Status; PKEY_CACHED_INFORMATION KeyCachedInfo; ULONG BufferLength; BYTE PrivateKeyCachedInfo[ sizeof( KEY_CACHED_INFORMATION ) ]; // // Initialize out params // *pdwUserSubKeys = 0; // // Set up to query kernel for subkey information // KeyCachedInfo = (PKEY_CACHED_INFORMATION) PrivateKeyCachedInfo; BufferLength = sizeof(PrivateKeyCachedInfo); Status = QueryKeyInfo( hkClassKey, KeyCachedInformation, &KeyCachedInfo, BufferLength, FALSE, 0 ); if (NT_SUCCESS(Status)) { // // set the out param with the subkey data from the kernel call // *pdwUserSubKeys = KeyCachedInfo->SubKeys; ASSERT( KeyCachedInfo == ( PKEY_CACHED_INFORMATION )PrivateKeyCachedInfo ); } return Status; } NTSTATUS ClassKeyCountSubKeys( HKEY hKey, HKEY hkUser, HKEY hkMachine, DWORD cMax, LPDWORD pcSubKeys) /*++ Routine Description: Counts the total number of subkeys of a special key -- i.e. the sum of the subkeys in the user and machine portions of that special key minus duplicates. Arguments: hkUser - user part of special key hkMachine - machine part of special key cMax - Maximum number of keys to count -- if zero, this is ignored pcSubKeys - out param -- count of subkeys Return Value: Returns NT_SUCCESS (0) for success; error-code for failure. Notes: This is INCREDIBLY expensive if either hkUser or hkMachine has more than a few subkeys. It essentially merges two sorted lists by enumerating in both the user and machine locations, and viewing them as a merged list by doing comparisons betweens items in each list -- separate user and machine pointers are advanced according to the results of the comparison. This means that if there are N keys under hkUser and M keys under hkMachine, this function will make N+M calls to the kernel to enumerate the keys. This is currently the only way to do this -- before, an approximation was used in which the sum of the number of subkeys in the user and machine versions was returned. This method didn't take duplicates into account, and so it overestimated the number of keys. This was not thought to be a problem since there is no guarantee to callers that the number they receive is completely up to date, but it turns out that there are applications that make that assumption (such as regedt32) that do not function properly unless the exact number is returned. --*/ { NTSTATUS Status; BOOL fCheckUser; BOOL fCheckMachine; EnumSubtreeState UserTree; EnumSubtreeState MachineTree; DWORD cMachineKeys; DWORD cUserKeys; OBJECT_ATTRIBUTES Obja; HKEY hkUserCount; HKEY hkMachineCount; HKEY hkNewKey; UNICODE_STRING EmptyString = {0, 0, 0}; Status = STATUS_SUCCESS; hkNewKey = NULL; cMachineKeys = 0; cUserKeys = 0; // // Initialize ourselves to check in both the user // and machine hives for subkeys // fCheckUser = (hkUser != NULL); fCheckMachine = (hkMachine != NULL); memset(&UserTree, 0, sizeof(UserTree)); memset(&MachineTree, 0, sizeof(MachineTree)); // // We can't be sure that the user key was opened // with the right permissions so we'll open // a version that has the correct permissions // if (fCheckUser && (hkUser == hKey)) { InitializeObjectAttributes( &Obja, &EmptyString, OBJ_CASE_INSENSITIVE, hkUser, NULL); Status = NtOpenKey( &hkNewKey, KEY_ENUMERATE_SUB_KEYS | KEY_QUERY_VALUE, &Obja); if (!NT_SUCCESS(Status)) { return Status; } hkUserCount = hkNewKey; } else { hkUserCount = hkUser; } if (fCheckMachine && (hkMachine == hKey)) { InitializeObjectAttributes( &Obja, &EmptyString, OBJ_CASE_INSENSITIVE, hkMachine, NULL); Status = NtOpenKey( &hkNewKey, KEY_ENUMERATE_SUB_KEYS | KEY_QUERY_VALUE, &Obja); if (!NT_SUCCESS(Status)) { return Status; } hkMachineCount = hkNewKey; } else { hkMachineCount = hkMachine; } // // Now check to see how many keys are in the user subtree // if (fCheckUser) { Status = GetSubKeyCount(hkUserCount, &cUserKeys); if (!NT_SUCCESS(Status)) { goto cleanup; } // // We only need to enumerate the user portion if it has subkeys // fCheckUser = (cUserKeys != 0); } // // Now check to see how many keys are in the user subtree // if (fCheckMachine) { Status = GetSubKeyCount(hkMachineCount, &cMachineKeys); if (!NT_SUCCESS(Status)) { goto cleanup; } // // We only need to enumerate the machine portion if it has subkeys // fCheckMachine = (cMachineKeys != 0); } if (!fCheckUser) { *pcSubKeys = cMachineKeys; Status = STATUS_SUCCESS; goto cleanup; } if (!fCheckMachine) { *pcSubKeys = cUserKeys; Status = STATUS_SUCCESS; goto cleanup; } ASSERT(fCheckMachine && fCheckUser); *pcSubKeys = 0; // // Keep enumerating subkeys until one of the locations // runs out of keys // for (;;) { NTSTATUS EnumStatus; // // If we can still check in the user hive and we // are missing user key info, query the kernel for it // if (!(UserTree.pKeyInfo)) { EnumStatus = EnumClassKey( hkUserCount, &UserTree); // // If there are no more user subkeys, set our // flag so that we no longer look in the user portion // for subkeys // if (!NT_SUCCESS(EnumStatus)) { if (STATUS_NO_MORE_ENTRIES == EnumStatus) { *pcSubKeys += cMachineKeys; Status = STATUS_SUCCESS; break; } else { Status = EnumStatus; break; } } } // // if we can still check in the machine hive and // we are missing machine info, query for it // if (!(MachineTree.pKeyInfo)) { EnumStatus = EnumClassKey( hkMachineCount, &MachineTree); // // Turn off checking in machine if there are // no more machine keys // if (!NT_SUCCESS(EnumStatus)) { if (STATUS_NO_MORE_ENTRIES == EnumStatus) { *pcSubKeys += cUserKeys; Status = STATUS_SUCCESS; break; } else { Status = EnumStatus; break; } } } // // If we have keys in both user and machine, we need to compare // the key names to see when to advance our subtree pointers // { LONG lCompare; UNICODE_STRING MachineKeyName; UNICODE_STRING UserKeyName; MachineKeyName.Buffer = MachineTree.pKeyInfo->Name; MachineKeyName.Length = (USHORT) MachineTree.pKeyInfo->NameLength; UserKeyName.Buffer = UserTree.pKeyInfo->Name; UserKeyName.Length = (USHORT) UserTree.pKeyInfo->NameLength; // // Do the comparison of user and machine keys // lCompare = RtlCompareUnicodeString(&UserKeyName, &MachineKeyName, TRUE); // // User is smaller, so move our user pointer up and clear it // so we'll query for user data next time // if (lCompare <= 0) { EnumSubtreeStateClear(&UserTree); UserTree.iSubKey++; cUserKeys--; } // // Machine is smaller, so move our user pointer up and clear it // so we'll query for machine data next time // if (lCompare >= 0) { EnumSubtreeStateClear(&MachineTree); MachineTree.iSubKey++; cMachineKeys--; } // // Increase the total number of subkeys // (*pcSubKeys)++; } // // Only enumerate up to max -- the caller // doesn't need to go all the way to the end // if (cMax && (*pcSubKeys > cMax)) { break; } } // // Free any buffer held by these subtree states // EnumSubtreeStateClear(&UserTree); EnumSubtreeStateClear(&MachineTree); cleanup: if (hkNewKey) { NtClose(hkNewKey); } return Status; } #endif // LOCAL
the_stack_data/36073964.c
int main() { int a; int b; int c; int d; a = 0; b = 42; c = 90; d = 1; if (d < 100) { c = 90; b = c; a = b; } while (d < 100) { } if (d < 100) { a = b; b = c; c = 90; d = d + 1; } if (d < 100) { a = b; b = c; d = d + 1; } if (d < 100) { a = b; d = d + 1; } while (d < 100) { d = d + 1; } }
the_stack_data/210284.c
/* { dg-do compile } */ /* { dg-options "-O2" } */ struct shared_ptr_struct { unsigned long phase : 48; unsigned thread : 16; void *addr; } x; void foo (void) { x.thread = 2; }
the_stack_data/96712.c
// WARNING: locking bug in corrupted // https://syzkaller.appspot.com/bug?id=31bb980c49ffa58e8dd2d95cc1cec36ec5b9c3a5 // status:fixed // autogenerated by syzkaller (https://github.com/google/syzkaller) #define _GNU_SOURCE #include <dirent.h> #include <endian.h> #include <errno.h> #include <fcntl.h> #include <signal.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/prctl.h> #include <sys/stat.h> #include <sys/syscall.h> #include <sys/types.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> static void sleep_ms(uint64_t ms) { usleep(ms * 1000); } static uint64_t current_time_ms(void) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000; } static void kill_and_wait(int pid, int* status) { kill(-pid, SIGKILL); kill(pid, SIGKILL); int i; for (i = 0; i < 100; i++) { if (waitpid(-1, status, WNOHANG | __WALL) == pid) return; usleep(1000); } DIR* dir = opendir("/sys/fs/fuse/connections"); if (dir) { for (;;) { struct dirent* ent = readdir(dir); if (!ent) break; if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) continue; char abort[300]; snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort", ent->d_name); int fd = open(abort, O_WRONLY); if (fd == -1) { continue; } if (write(fd, abort, 1) < 0) { } close(fd); } closedir(dir); } else { } while (waitpid(-1, status, __WALL) != pid) { } } #define SYZ_HAVE_SETUP_TEST 1 static void setup_test() { prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); setpgrp(); } #define SYZ_HAVE_RESET_TEST 1 static void reset_test() { int fd; for (fd = 3; fd < 30; fd++) close(fd); } static void execute_one(void); #define WAIT_FLAGS __WALL static void loop(void) { int iter; for (iter = 0;; iter++) { int pid = fork(); if (pid < 0) exit(1); if (pid == 0) { setup_test(); execute_one(); reset_test(); exit(0); } int status = 0; uint64_t start = current_time_ms(); for (;;) { if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid) break; sleep_ms(1); if (current_time_ms() - start < 5 * 1000) continue; kill_and_wait(pid, &status); break; } } } uint64_t r[1] = {0xffffffffffffffff}; void execute_one(void) { long res = 0; res = syscall(__NR_socket, 0xa, 2, 0x88); if (res != -1) r[0] = res; *(uint16_t*)0x20000080 = 0xa; *(uint16_t*)0x20000082 = htobe16(0); *(uint32_t*)0x20000084 = htobe32(0); *(uint64_t*)0x20000088 = htobe64(0); *(uint64_t*)0x20000090 = htobe64(1); *(uint32_t*)0x20000098 = 0; syscall(__NR_connect, r[0], 0x20000080, 0x1c); syscall(__NR_sendmmsg, r[0], 0x200092c0, 0x4ff, 0); } int main(void) { syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0); loop(); return 0; }
the_stack_data/220456133.c
/* Recorsive descent parsing: * rules are translated into equivalent C expressions * * Translate a rule such as: * sum : product { +|- product } ... * * into: * * void sum(void) { * product(); * for(;;) { * switch(token) { * case '+': * case '-': * scan(0), * product(); * continue; * } * return; * } * } * * We create a C function for each grammatical rule - * they call on each other. The only problem is * infinite recursion... (how does this case happen?) * * token always contains the next input symbol, * since scan returns the token val. Remember that * scan(0) persists the bp pointer so that it can * get the next token from the buf. Only when the * buffer is exhausted is scan(buf) called to * compute the next stuff. * */ /* Processor * simple arithmetic is mostly easy: * compute as soon as possible, * and pass back the results. * * Complicated expressions not easy: * Need to build a tree that can be * processed in traversals. * * a basic structure can be: * struct Node { * enum tokens token; * struct Node * left; * struct Node * right; * } * * but this is inflexible. Say we * have the token "number", and now * we need to store a number. then * we need a field for it... which * doesn't seem like a problem...? * * Space is wasted in the case of * unary operators. (where it's * an fn like 'abs', because now * one of the left and right nodes * will be null. * * It's... probably best to copy * over the files and then * implement them further instead * of doing this, because the * book serves as a complement to * the source files that you're * supposed to have access to. * I'm sure that there will be * enough exercises to extend * an arithmetic expression calc. */
the_stack_data/167331605.c
// RUN: %clang_cc1 -triple s390x-linux-gnu \ // RUN: -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -triple s390x-linux-gnu -target-feature +vector \ // RUN: -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 \ // RUN: -emit-llvm -o - %s | FileCheck %s // Scalar types char pass_char(char arg) { return arg; } // CHECK-LABEL: define signext i8 @pass_char(i8 signext %{{.*}}) short pass_short(short arg) { return arg; } // CHECK-LABEL: define signext i16 @pass_short(i16 signext %{{.*}}) int pass_int(int arg) { return arg; } // CHECK-LABEL: define signext i32 @pass_int(i32 signext %{{.*}}) long pass_long(long arg) { return arg; } // CHECK-LABEL: define i64 @pass_long(i64 %{{.*}}) long long pass_longlong(long long arg) { return arg; } // CHECK-LABEL: define i64 @pass_longlong(i64 %{{.*}}) __int128 pass_int128(__int128 arg) { return arg; } // CHECK-LABEL: define void @pass_int128(i128* noalias sret %{{.*}}, i128*) float pass_float(float arg) { return arg; } // CHECK-LABEL: define float @pass_float(float %{{.*}}) double pass_double(double arg) { return arg; } // CHECK-LABEL: define double @pass_double(double %{{.*}}) long double pass_longdouble(long double arg) { return arg; } // CHECK-LABEL: define void @pass_longdouble(fp128* noalias sret %{{.*}}, fp128*) // Complex types _Complex char pass_complex_char(_Complex char arg) { return arg; } // CHECK-LABEL: define void @pass_complex_char({ i8, i8 }* noalias sret %{{.*}}, { i8, i8 }* %{{.*}}arg) _Complex short pass_complex_short(_Complex short arg) { return arg; } // CHECK-LABEL: define void @pass_complex_short({ i16, i16 }* noalias sret %{{.*}}, { i16, i16 }* %{{.*}}arg) _Complex int pass_complex_int(_Complex int arg) { return arg; } // CHECK-LABEL: define void @pass_complex_int({ i32, i32 }* noalias sret %{{.*}}, { i32, i32 }* %{{.*}}arg) _Complex long pass_complex_long(_Complex long arg) { return arg; } // CHECK-LABEL: define void @pass_complex_long({ i64, i64 }* noalias sret %{{.*}}, { i64, i64 }* %{{.*}}arg) _Complex long long pass_complex_longlong(_Complex long long arg) { return arg; } // CHECK-LABEL: define void @pass_complex_longlong({ i64, i64 }* noalias sret %{{.*}}, { i64, i64 }* %{{.*}}arg) _Complex float pass_complex_float(_Complex float arg) { return arg; } // CHECK-LABEL: define void @pass_complex_float({ float, float }* noalias sret %{{.*}}, { float, float }* %{{.*}}arg) _Complex double pass_complex_double(_Complex double arg) { return arg; } // CHECK-LABEL: define void @pass_complex_double({ double, double }* noalias sret %{{.*}}, { double, double }* %{{.*}}arg) _Complex long double pass_complex_longdouble(_Complex long double arg) { return arg; } // CHECK-LABEL: define void @pass_complex_longdouble({ fp128, fp128 }* noalias sret %{{.*}}, { fp128, fp128 }* %{{.*}}arg) // Aggregate types struct agg_1byte { char a[1]; }; struct agg_1byte pass_agg_1byte(struct agg_1byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_1byte(%struct.agg_1byte* noalias sret %{{.*}}, i8 %{{.*}}) struct agg_2byte { char a[2]; }; struct agg_2byte pass_agg_2byte(struct agg_2byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_2byte(%struct.agg_2byte* noalias sret %{{.*}}, i16 %{{.*}}) struct agg_3byte { char a[3]; }; struct agg_3byte pass_agg_3byte(struct agg_3byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_3byte(%struct.agg_3byte* noalias sret %{{.*}}, %struct.agg_3byte* %{{.*}}) struct agg_4byte { char a[4]; }; struct agg_4byte pass_agg_4byte(struct agg_4byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_4byte(%struct.agg_4byte* noalias sret %{{.*}}, i32 %{{.*}}) struct agg_5byte { char a[5]; }; struct agg_5byte pass_agg_5byte(struct agg_5byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_5byte(%struct.agg_5byte* noalias sret %{{.*}}, %struct.agg_5byte* %{{.*}}) struct agg_6byte { char a[6]; }; struct agg_6byte pass_agg_6byte(struct agg_6byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_6byte(%struct.agg_6byte* noalias sret %{{.*}}, %struct.agg_6byte* %{{.*}}) struct agg_7byte { char a[7]; }; struct agg_7byte pass_agg_7byte(struct agg_7byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_7byte(%struct.agg_7byte* noalias sret %{{.*}}, %struct.agg_7byte* %{{.*}}) struct agg_8byte { char a[8]; }; struct agg_8byte pass_agg_8byte(struct agg_8byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_8byte(%struct.agg_8byte* noalias sret %{{.*}}, i64 %{{.*}}) struct agg_16byte { char a[16]; }; struct agg_16byte pass_agg_16byte(struct agg_16byte arg) { return arg; } // CHECK-LABEL: define void @pass_agg_16byte(%struct.agg_16byte* noalias sret %{{.*}}, %struct.agg_16byte* %{{.*}}) // Float-like aggregate types struct agg_float { float a; }; struct agg_float pass_agg_float(struct agg_float arg) { return arg; } // CHECK-LABEL: define void @pass_agg_float(%struct.agg_float* noalias sret %{{.*}}, float %{{.*}}) struct agg_double { double a; }; struct agg_double pass_agg_double(struct agg_double arg) { return arg; } // CHECK-LABEL: define void @pass_agg_double(%struct.agg_double* noalias sret %{{.*}}, double %{{.*}}) struct agg_longdouble { long double a; }; struct agg_longdouble pass_agg_longdouble(struct agg_longdouble arg) { return arg; } // CHECK-LABEL: define void @pass_agg_longdouble(%struct.agg_longdouble* noalias sret %{{.*}}, %struct.agg_longdouble* %{{.*}}) struct agg_float_a8 { float a __attribute__((aligned (8))); }; struct agg_float_a8 pass_agg_float_a8(struct agg_float_a8 arg) { return arg; } // CHECK-LABEL: define void @pass_agg_float_a8(%struct.agg_float_a8* noalias sret %{{.*}}, double %{{.*}}) struct agg_float_a16 { float a __attribute__((aligned (16))); }; struct agg_float_a16 pass_agg_float_a16(struct agg_float_a16 arg) { return arg; } // CHECK-LABEL: define void @pass_agg_float_a16(%struct.agg_float_a16* noalias sret %{{.*}}, %struct.agg_float_a16* %{{.*}}) // Verify that the following are *not* float-like aggregate types struct agg_nofloat1 { float a; float b; }; struct agg_nofloat1 pass_agg_nofloat1(struct agg_nofloat1 arg) { return arg; } // CHECK-LABEL: define void @pass_agg_nofloat1(%struct.agg_nofloat1* noalias sret %{{.*}}, i64 %{{.*}}) struct agg_nofloat2 { float a; int b; }; struct agg_nofloat2 pass_agg_nofloat2(struct agg_nofloat2 arg) { return arg; } // CHECK-LABEL: define void @pass_agg_nofloat2(%struct.agg_nofloat2* noalias sret %{{.*}}, i64 %{{.*}}) struct agg_nofloat3 { float a; int : 0; }; struct agg_nofloat3 pass_agg_nofloat3(struct agg_nofloat3 arg) { return arg; } // CHECK-LABEL: define void @pass_agg_nofloat3(%struct.agg_nofloat3* noalias sret %{{.*}}, i32 %{{.*}}) // Accessing variable argument lists int va_int(__builtin_va_list l) { return __builtin_va_arg(l, int); } // CHECK-LABEL: define signext i32 @va_int(%struct.__va_list_tag* %{{.*}}) // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 20 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to i32* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 4 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to i32* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi i32* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[RET:%[^ ]+]] = load i32, i32* [[VA_ARG_ADDR]] // CHECK: ret i32 [[RET]] long va_long(__builtin_va_list l) { return __builtin_va_arg(l, long); } // CHECK-LABEL: define i64 @va_long(%struct.__va_list_tag* %{{.*}}) // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to i64* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to i64* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi i64* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[RET:%[^ ]+]] = load i64, i64* [[VA_ARG_ADDR]] // CHECK: ret i64 [[RET]] long long va_longlong(__builtin_va_list l) { return __builtin_va_arg(l, long long); } // CHECK-LABEL: define i64 @va_longlong(%struct.__va_list_tag* %{{.*}}) // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to i64* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to i64* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi i64* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[RET:%[^ ]+]] = load i64, i64* [[VA_ARG_ADDR]] // CHECK: ret i64 [[RET]] double va_double(__builtin_va_list l) { return __builtin_va_arg(l, double); } // CHECK-LABEL: define double @va_double(%struct.__va_list_tag* %{{.*}}) // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 1 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 4 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 128 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to double* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to double* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi double* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[RET:%[^ ]+]] = load double, double* [[VA_ARG_ADDR]] // CHECK: ret double [[RET]] long double va_longdouble(__builtin_va_list l) { return __builtin_va_arg(l, long double); } // CHECK-LABEL: define void @va_longdouble(fp128* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}}) // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to fp128** // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to fp128** // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi fp128** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[INDIRECT_ARG:%[^ ]+]] = load fp128*, fp128** [[VA_ARG_ADDR]] // CHECK: [[RET:%[^ ]+]] = load fp128, fp128* [[INDIRECT_ARG]] // CHECK: store fp128 [[RET]], fp128* %{{.*}} // CHECK: ret void _Complex char va_complex_char(__builtin_va_list l) { return __builtin_va_arg(l, _Complex char); } // CHECK-LABEL: define void @va_complex_char({ i8, i8 }* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to { i8, i8 }** // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to { i8, i8 }** // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi { i8, i8 }** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[INDIRECT_ARG:%[^ ]+]] = load { i8, i8 }*, { i8, i8 }** [[VA_ARG_ADDR]] // CHECK: ret void struct agg_1byte va_agg_1byte(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_1byte); } // CHECK-LABEL: define void @va_agg_1byte(%struct.agg_1byte* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 23 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_1byte* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 7 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_1byte* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_1byte* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_2byte va_agg_2byte(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_2byte); } // CHECK-LABEL: define void @va_agg_2byte(%struct.agg_2byte* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 22 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_2byte* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 6 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_2byte* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_2byte* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_3byte va_agg_3byte(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_3byte); } // CHECK-LABEL: define void @va_agg_3byte(%struct.agg_3byte* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_3byte** // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_3byte** // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_3byte** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_3byte*, %struct.agg_3byte** [[VA_ARG_ADDR]] // CHECK: ret void struct agg_4byte va_agg_4byte(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_4byte); } // CHECK-LABEL: define void @va_agg_4byte(%struct.agg_4byte* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 20 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_4byte* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 4 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_4byte* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_4byte* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_8byte va_agg_8byte(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_8byte); } // CHECK-LABEL: define void @va_agg_8byte(%struct.agg_8byte* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_8byte* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_8byte* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_8byte* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_float va_agg_float(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_float); } // CHECK-LABEL: define void @va_agg_float(%struct.agg_float* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 1 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 4 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 128 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_float* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 4 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_float* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_float* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_double va_agg_double(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_double); } // CHECK-LABEL: define void @va_agg_double(%struct.agg_double* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 1 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 4 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 128 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_double* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_double* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_double* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_longdouble va_agg_longdouble(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_longdouble); } // CHECK-LABEL: define void @va_agg_longdouble(%struct.agg_longdouble* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_longdouble** // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_longdouble** // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_longdouble** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_longdouble*, %struct.agg_longdouble** [[VA_ARG_ADDR]] // CHECK: ret void struct agg_float_a8 va_agg_float_a8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_float_a8); } // CHECK-LABEL: define void @va_agg_float_a8(%struct.agg_float_a8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 1 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 4 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 128 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_float_a8* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_float_a8* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_float_a8* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_float_a16 va_agg_float_a16(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_float_a16); } // CHECK-LABEL: define void @va_agg_float_a16(%struct.agg_float_a16* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_float_a16** // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_float_a16** // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_float_a16** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_float_a16*, %struct.agg_float_a16** [[VA_ARG_ADDR]] // CHECK: ret void struct agg_nofloat1 va_agg_nofloat1(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_nofloat1); } // CHECK-LABEL: define void @va_agg_nofloat1(%struct.agg_nofloat1* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_nofloat1* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_nofloat1* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_nofloat1* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_nofloat2 va_agg_nofloat2(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_nofloat2); } // CHECK-LABEL: define void @va_agg_nofloat2(%struct.agg_nofloat2* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_nofloat2* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_nofloat2* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_nofloat2* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void struct agg_nofloat3 va_agg_nofloat3(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_nofloat3); } // CHECK-LABEL: define void @va_agg_nofloat3(%struct.agg_nofloat3* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}} // CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0 // CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]] // CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5 // CHECK: br i1 [[FITS_IN_REGS]], // CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8 // CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 20 // CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3 // CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]] // CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]] // CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_nofloat3* // CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1 // CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]] // CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2 // CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 4 // CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_nofloat3* // CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8 // CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]] // CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_nofloat3* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ] // CHECK: ret void
the_stack_data/154827280.c
#include <stdio.h> int main(void) { int liczba; liczba = 10; printf("Wartosc: %d\nPodwojona: %d\nPodniesiona do kwadratu: %d\n", liczba, liczba * 2, liczba * liczba); return 0; }
the_stack_data/59513129.c
/* * dlfcn-win32 * Copyright (c) 2007 Ramiro Polla * * dlfcn-win32 is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * dlfcn-win32 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with dlfcn-win32; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef _DEBUG #define _CRTDBG_MAP_ALLOC #include <stdlib.h> #include <crtdbg.h> #endif #include <stdio.h> #if defined(_WIN32) #define EXPORT __declspec(dllexport) #else #define EXPORT #endif EXPORT int function( void ) { printf( "Hello, world!\n" ); return 0; }
the_stack_data/156393900.c
#include<stdio.h> main() { printf("Hello World"); }
the_stack_data/132571.c
#include <stdio.h> #define QTD_NUMEROS 10 int main(int argc, char const *argv[]) { float reais[QTD_NUMEROS], soma_positivos = 0; int qtd_negativos = 0; printf("Ola' por favor preencha a seguinte lista de nu'meros reais que eu te dou algumas info's sobre eles\n"); for (int i = 0; i < QTD_NUMEROS; i++) { printf("Digite o nu'mero real %i de %i\n",i,QTD_NUMEROS); scanf("%f",&reais[i]); getchar(); if (reais[i] < 0) qtd_negativos += 1; else if (reais[i] > 0) soma_positivos += reais[i]; } printf("Quantidade de nu'meros negativos: %i\n", qtd_negativos); printf("Soma nu'meros positivos: %.2f\n", soma_positivos); return 0; }
the_stack_data/95882.c
/** * Calling pr_version_get_str() from Proftpd.so * and display memory map exported via /proc/ * * endrazine for Defcon 24 // August 2016 */ #include <stdio.h> #include <unistd.h> #include <dlfcn.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> static char* (*getversion)(void) = NULL; int get_symbol(char *filename, char *symbolname){ void *handle; char *error = 0; handle = dlopen(filename, RTLD_LAZY); if (!handle) { fprintf(stderr, "%s\n", dlerror()); exit(EXIT_FAILURE); } getversion = dlsym(handle, symbolname); if ((error = dlerror()) != NULL) { fprintf(stderr, "%s\n", error); exit(EXIT_FAILURE); } return 0; } int print_map(void){ char cmd[256]; memset(cmd, 0x00, 256); snprintf(cmd, 255, "cat /proc/%u/maps", getpid()); system(cmd); return 0; } int main(void){ get_symbol("/tmp/proftpd.so", "pr_version_get_str"); printf("Using proftpd.so version: %s\n", getversion()); print_map(); return 0; }
the_stack_data/115237.c
/* * --INFO-- * Address: 800E847C * Size: 00008C */ void GXSetTevOp(void) { /* .loc_0x0: cmpwi r3, 0 lis r5, 0x804B subi r0, r5, 0x7888 bne- .loc_0x20 rlwinm r4,r4,2,0,29 add r5, r0, r4 addi r9, r5, 0x28 b .loc_0x34 .loc_0x20: rlwinm r4,r4,2,0,29 add r5, r0, r4 add r9, r0, r4 addi r5, r5, 0x14 addi r9, r9, 0x3C .loc_0x34: lwz r7, -0x6D70(r2) rlwinm r3,r3,2,0,29 lwz r0, 0x0(r5) li r5, 0x61 add r6, r7, r3 lwz r3, 0x130(r6) lis r4, 0xCC01 stb r5, -0x8000(r4) rlwinm r3,r3,0,0,7 rlwimi r3,r0,0,8,31 stw r3, -0x8000(r4) li r0, 0 stw r3, 0x130(r6) lwz r8, 0x170(r6) lwz r3, 0x0(r9) rlwinm r8,r8,0,28,7 stb r5, -0x8000(r4) rlwimi r8,r3,0,8,27 stw r8, -0x8000(r4) stw r8, 0x170(r6) sth r0, 0x2(r7) blr */ } /* * --INFO-- * Address: 800E8508 * Size: 000044 */ void GXSetTevColorIn(void) { /* .loc_0x0: lwz r9, -0x6D70(r2) rlwinm r3,r3,2,0,29 li r0, 0x61 add r8, r9, r3 lwz r10, 0x130(r8) lis r3, 0xCC01 rlwimi r10,r4,12,16,19 stb r0, -0x8000(r3) addi r4, r10, 0 rlwimi r4,r5,8,20,23 rlwimi r4,r6,4,24,27 rlwimi r4,r7,0,28,31 stw r4, -0x8000(r3) li r0, 0 stw r4, 0x130(r8) sth r0, 0x2(r9) blr */ } /* * --INFO-- * Address: 800E854C * Size: 000044 */ void GXSetTevAlphaIn(void) { /* .loc_0x0: lwz r9, -0x6D70(r2) rlwinm r3,r3,2,0,29 li r0, 0x61 add r8, r9, r3 lwz r10, 0x170(r8) lis r3, 0xCC01 rlwimi r10,r4,13,16,18 stb r0, -0x8000(r3) addi r4, r10, 0 rlwimi r4,r5,10,19,21 rlwimi r4,r6,7,22,24 rlwimi r4,r7,4,25,27 stw r4, -0x8000(r3) li r0, 0 stw r4, 0x170(r8) sth r0, 0x2(r9) blr */ } /* * --INFO-- * Address: 800E8590 * Size: 000068 */ void GXSetTevColorOp(void) { /* .loc_0x0: lwz r0, -0x6D70(r2) rlwinm r9,r3,2,0,29 cmpwi r4, 0x1 add r3, r0, r9 lwz r3, 0x130(r3) rlwimi r3,r4,18,13,13 addi r10, r3, 0 bgt- .loc_0x2C rlwimi r10,r6,20,10,11 rlwimi r10,r5,16,14,15 b .loc_0x38 .loc_0x2C: li r0, 0x3 rlwimi r10,r4,19,10,11 rlwimi r10,r0,16,14,15 .loc_0x38: li r0, 0x61 lwz r4, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) rlwimi r10,r7,19,12,12 rlwimi r10,r8,22,8,9 stw r10, -0x8000(r5) add r3, r4, r9 li r0, 0 stw r10, 0x130(r3) sth r0, 0x2(r4) blr */ } /* * --INFO-- * Address: 800E85F8 * Size: 000068 */ void GXSetTevAlphaOp(void) { /* .loc_0x0: lwz r0, -0x6D70(r2) rlwinm r9,r3,2,0,29 cmpwi r4, 0x1 add r3, r0, r9 lwz r3, 0x170(r3) rlwimi r3,r4,18,13,13 addi r10, r3, 0 bgt- .loc_0x2C rlwimi r10,r6,20,10,11 rlwimi r10,r5,16,14,15 b .loc_0x38 .loc_0x2C: li r0, 0x3 rlwimi r10,r4,19,10,11 rlwimi r10,r0,16,14,15 .loc_0x38: li r0, 0x61 lwz r4, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) rlwimi r10,r7,19,12,12 rlwimi r10,r8,22,8,9 stw r10, -0x8000(r5) add r3, r4, r9 li r0, 0 stw r10, 0x170(r3) sth r0, 0x2(r4) blr */ } /* * --INFO-- * Address: 800E8660 * Size: 00007C */ void GXSetTevColor(void) { /* .loc_0x0: lbz r5, 0x0(r4) rlwinm r7,r3,1,0,30 lbz r0, 0x3(r4) li r3, 0 rlwimi r3,r5,0,21,31 addi r8, r3, 0 lbz r6, 0x2(r4) rlwimi r8,r0,12,9,19 lbz r0, 0x1(r4) li r9, 0 lwz r3, -0x6D70(r2) rlwimi r9,r6,0,21,31 addi r6, r9, 0 rlwimi r6,r0,12,9,19 addi r0, r7, 0xE0 rlwimi r8,r0,24,0,7 addi r0, r7, 0xE1 rlwimi r6,r0,24,0,7 li r5, 0x61 lis r4, 0xCC01 stb r5, -0x8000(r4) li r0, 0 stw r8, -0x8000(r4) stb r5, -0x8000(r4) stw r6, -0x8000(r4) stb r5, -0x8000(r4) stw r6, -0x8000(r4) stb r5, -0x8000(r4) stw r6, -0x8000(r4) sth r0, 0x2(r3) blr */ } /* * --INFO-- * Address: 800E86DC * Size: 00007C */ void GXSetTevColorS10(void) { /* .loc_0x0: lha r0, 0x0(r4) rlwinm r8,r3,1,0,30 li r5, 0 lha r3, 0x6(r4) rlwimi r5,r0,0,21,31 addi r9, r5, 0 lha r6, 0x4(r4) lha r0, 0x2(r4) li r5, 0x61 lis r4, 0xCC01 rlwimi r9,r3,12,9,19 stb r5, -0x8000(r4) addi r7, r8, 0xE0 lwz r3, -0x6D70(r2) rlwimi r9,r7,24,0,7 stw r9, -0x8000(r4) li r7, 0 rlwimi r7,r6,0,21,31 addi r6, r7, 0 stb r5, -0x8000(r4) rlwimi r6,r0,12,9,19 addi r0, r8, 0xE1 rlwimi r6,r0,24,0,7 stw r6, -0x8000(r4) li r0, 0 stb r5, -0x8000(r4) stw r6, -0x8000(r4) stb r5, -0x8000(r4) stw r6, -0x8000(r4) sth r0, 0x2(r3) blr */ } /* * --INFO-- * Address: 800E8758 * Size: 000074 */ void GXSetTevKColor(void) { /* .loc_0x0: lbz r5, 0x0(r4) rlwinm r6,r3,1,0,30 lbz r0, 0x3(r4) li r3, 0 rlwimi r3,r5,0,24,31 addi r7, r3, 0 lbz r5, 0x2(r4) rlwimi r7,r0,12,12,19 lbz r0, 0x1(r4) li r4, 0x8 lwz r3, -0x6D70(r2) li r8, 0 rlwimi r8,r5,0,24,31 rlwimi r8,r0,12,12,19 rlwimi r7,r4,20,8,11 rlwimi r8,r4,20,8,11 addi r5, r6, 0xE0 rlwimi r7,r5,24,0,7 addi r5, r6, 0xE1 li r0, 0x61 lis r4, 0xCC01 stb r0, -0x8000(r4) rlwimi r8,r5,24,0,7 stw r7, -0x8000(r4) stb r0, -0x8000(r4) li r0, 0 stw r8, -0x8000(r4) sth r0, 0x2(r3) blr */ } /* * --INFO-- * Address: 800E87CC * Size: 00005C */ void GXSetTevKColorSel(void) { /* .loc_0x0: srawi r5, r3, 0x1 lwz r6, -0x6D70(r2) rlwinm. r0,r3,0,31,31 rlwinm r3,r5,2,0,29 addi r7, r3, 0x1B0 add r7, r6, r7 beq- .loc_0x2C lwz r0, 0x0(r7) rlwimi r0,r4,14,13,17 stw r0, 0x0(r7) b .loc_0x38 .loc_0x2C: lwz r0, 0x0(r7) rlwimi r0,r4,4,23,27 stw r0, 0x0(r7) .loc_0x38: li r0, 0x61 lwz r3, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) li r0, 0 lwz r4, 0x0(r7) stw r4, -0x8000(r5) sth r0, 0x2(r3) blr */ } /* * --INFO-- * Address: 800E8828 * Size: 00005C */ void GXSetTevKAlphaSel(void) { /* .loc_0x0: srawi r5, r3, 0x1 lwz r6, -0x6D70(r2) rlwinm. r0,r3,0,31,31 rlwinm r3,r5,2,0,29 addi r7, r3, 0x1B0 add r7, r6, r7 beq- .loc_0x2C lwz r0, 0x0(r7) rlwimi r0,r4,19,8,12 stw r0, 0x0(r7) b .loc_0x38 .loc_0x2C: lwz r0, 0x0(r7) rlwimi r0,r4,9,18,22 stw r0, 0x0(r7) .loc_0x38: li r0, 0x61 lwz r3, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) li r0, 0 lwz r4, 0x0(r7) stw r4, -0x8000(r5) sth r0, 0x2(r3) blr */ } /* * --INFO-- * Address: 800E8884 * Size: 000048 */ void GXSetTevSwapMode(void) { /* .loc_0x0: rlwinm r3,r3,2,0,29 lwz r7, -0x6D70(r2) addi r8, r3, 0x170 add r8, r7, r8 lwz r0, 0x0(r8) rlwimi r0,r4,0,30,31 li r3, 0x61 stw r0, 0x0(r8) lis r4, 0xCC01 li r0, 0 lwz r6, 0x0(r8) rlwimi r6,r5,2,28,29 stw r6, 0x0(r8) stb r3, -0x8000(r4) lwz r3, 0x0(r8) stw r3, -0x8000(r4) sth r0, 0x2(r7) blr */ } /* * --INFO-- * Address: 800E88CC * Size: 000080 */ void GXSetTevSwapModeTable(void) { /* .loc_0x0: rlwinm r9,r3,1,0,30 lwz r8, -0x6D70(r2) rlwinm r3,r3,3,0,28 addi r10, r3, 0x1B0 add r10, r8, r10 lwz r3, 0x0(r10) rlwimi r3,r4,0,30,31 addi r0, r9, 0x1 stw r3, 0x0(r10) rlwinm r3,r0,2,0,29 addi r9, r3, 0x1B0 lwz r0, 0x0(r10) rlwimi r0,r5,2,28,29 li r5, 0x61 stw r0, 0x0(r10) lis r4, 0xCC01 add r9, r8, r9 stb r5, -0x8000(r4) li r0, 0 lwz r3, 0x0(r10) stw r3, -0x8000(r4) lwz r3, 0x0(r9) rlwimi r3,r6,0,30,31 stw r3, 0x0(r9) lwz r3, 0x0(r9) rlwimi r3,r7,2,28,29 stw r3, 0x0(r9) stb r5, -0x8000(r4) lwz r3, 0x0(r9) stw r3, -0x8000(r4) sth r0, 0x2(r8) blr */ } /* * --INFO-- * Address: ........ * Size: 000004 */ void GXSetTevClampMode(void) { // UNUSED FUNCTION } /* * --INFO-- * Address: 800E894C * Size: 000044 */ void GXSetAlphaCompare(void) { /* .loc_0x0: rlwinm r0,r4,0,24,31 lwz r4, -0x6D70(r2) lis r8, 0xF300 rlwimi r8,r0,0,24,31 rlwinm r0,r7,0,24,31 addi r7, r8, 0 rlwimi r7,r0,8,16,23 rlwimi r7,r3,16,13,15 rlwimi r7,r6,19,10,12 li r0, 0x61 lis r3, 0xCC01 stb r0, -0x8000(r3) rlwimi r7,r5,22,8,9 li r0, 0 stw r7, -0x8000(r3) sth r0, 0x2(r4) blr */ } /* * --INFO-- * Address: 800E8990 * Size: 00008C */ void GXSetZTexture(void) { /* .loc_0x0: cmpwi r4, 0x13 li r0, 0xF4 li r6, 0 rlwimi r6,r5,0,8,31 rlwimi r6,r0,24,0,7 li r7, 0 beq- .loc_0x40 bge- .loc_0x2C cmpwi r4, 0x11 beq- .loc_0x38 b .loc_0x50 .loc_0x2C: cmpwi r4, 0x16 beq- .loc_0x48 b .loc_0x50 .loc_0x38: li r8, 0 b .loc_0x54 .loc_0x40: li r8, 0x1 b .loc_0x54 .loc_0x48: li r8, 0x2 b .loc_0x54 .loc_0x50: li r8, 0x2 .loc_0x54: li r0, 0x61 lwz r4, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) rlwimi r7,r8,0,30,31 rlwimi r7,r3,2,28,29 stw r6, -0x8000(r5) li r3, 0xF5 rlwimi r7,r3,24,0,7 stb r0, -0x8000(r5) li r0, 0 stw r7, -0x8000(r5) sth r0, 0x2(r4) blr */ } /* * --INFO-- * Address: 800E8A1C * Size: 00019C */ void GXSetTevOrder(void) { /* .loc_0x0: srawi r7, r3, 0x1 lwz r9, -0x6D70(r2) rlwinm r10,r5,0,24,22 addze r7, r7 rlwinm r0,r3,2,0,29 add r8, r9, r0 rlwinm r7,r7,2,0,29 stw r5, 0x554(r8) addi r7, r7, 0x100 cmplwi r10, 0x8 addi r0, r10, 0 add r7, r9, r7 blt- .loc_0x3C li r9, 0 b .loc_0x40 .loc_0x3C: mr r9, r0 .loc_0x40: cmpwi r4, 0x8 blt- .loc_0x6C lwz r4, -0x6D70(r2) li r0, 0x1 slw r0, r0, r3 lwz r8, 0x598(r4) addi r10, r4, 0x598 li r4, 0 andc r0, r8, r0 stw r0, 0x0(r10) b .loc_0x88 .loc_0x6C: lwz r8, -0x6D70(r2) li r0, 0x1 slw r0, r0, r3 addi r10, r8, 0x598 lwz r8, 0x598(r8) or r0, r8, r0 stw r0, 0x0(r10) .loc_0x88: rlwinm. r0,r3,0,31,31 beq- .loc_0x100 lwz r0, 0x0(r7) rlwimi r0,r9,12,17,19 cmpwi r6, 0xFF stw r0, 0x0(r7) lwz r0, 0x0(r7) rlwimi r0,r4,15,14,16 stw r0, 0x0(r7) bne- .loc_0xB8 li r3, 0x7 b .loc_0xCC .loc_0xB8: lis r3, 0x804B rlwinm r4,r6,2,0,29 subi r0, r3, 0x7838 add r3, r0, r4 lwz r3, 0x0(r3) .loc_0xCC: lwz r0, 0x0(r7) rlwimi r0,r3,19,10,12 cmpwi r5, 0xFF stw r0, 0x0(r7) li r3, 0 beq- .loc_0xF0 rlwinm. r0,r5,0,23,23 bne- .loc_0xF0 li r3, 0x1 .loc_0xF0: lwz r0, 0x0(r7) rlwimi r0,r3,18,13,13 stw r0, 0x0(r7) b .loc_0x16C .loc_0x100: lwz r0, 0x0(r7) rlwimi r0,r9,0,29,31 cmpwi r6, 0xFF stw r0, 0x0(r7) lwz r0, 0x0(r7) rlwimi r0,r4,3,26,28 stw r0, 0x0(r7) bne- .loc_0x128 li r3, 0x7 b .loc_0x13C .loc_0x128: lis r3, 0x804B rlwinm r4,r6,2,0,29 subi r0, r3, 0x7838 add r3, r0, r4 lwz r3, 0x0(r3) .loc_0x13C: lwz r0, 0x0(r7) rlwimi r0,r3,7,22,24 cmpwi r5, 0xFF stw r0, 0x0(r7) li r3, 0 beq- .loc_0x160 rlwinm. r0,r5,0,23,23 bne- .loc_0x160 li r3, 0x1 .loc_0x160: lwz r0, 0x0(r7) rlwimi r0,r3,6,25,25 stw r0, 0x0(r7) .loc_0x16C: li r0, 0x61 lwz r3, -0x6D70(r2) lis r5, 0xCC01 stb r0, -0x8000(r5) li r0, 0 lwz r4, 0x0(r7) stw r4, -0x8000(r5) sth r0, 0x2(r3) lwz r0, 0x5AC(r3) ori r0, r0, 0x1 stw r0, 0x5AC(r3) blr */ } /* * --INFO-- * Address: 800E8BB8 * Size: 000028 */ void GXSetNumTevStages(void) { /* .loc_0x0: lwz r4, -0x6D70(r2) rlwinm r3,r3,0,24,31 subi r0, r3, 0x1 lwz r3, 0x204(r4) rlwimi r3,r0,10,18,21 stw r3, 0x204(r4) lwz r0, 0x5AC(r4) ori r0, r0, 0x4 stw r0, 0x5AC(r4) blr */ }
the_stack_data/132952248.c
/* { dg-options { -nostartfiles below100.o -Tbelow100.ld -O2 } } */ /* { dg-final { scan-assembler "clr1 B100,#7" } } */ typedef struct { unsigned char b0:1; unsigned char b1:1; unsigned char b2:1; unsigned char b3:1; unsigned char b4:1; unsigned char b5:1; unsigned char b6:1; unsigned char b7:1; } BitField; char acDummy[0xf0] __attribute__ ((__BELOW100__)); BitField B100 __attribute__ ((__BELOW100__)) = { 1, 1, 0, 1, 0, 0, 1, 1}; unsigned char *p = (unsigned char *) &B100; void Do (void) { B100.b7 = 0; } int main (void) { Do (); return (*p == 0x4b) ? 0 : 1; }
the_stack_data/150140662.c
void some_funtion() { } void main() { char* video_memory = (char*) 0xb8000; *video_memory = 'X'; some_funtion(); }
the_stack_data/1223049.c
#include <stdio.h> int main(void) { float fnum; printf("Enter float number: \n"); scanf("%f", &fnum); printf("The input is %.1f or %.1e\n", fnum, fnum); printf("The input is %+.3f or %.3E\n", fnum, fnum); return 0; }
the_stack_data/24204.c
/* Generated by CIL v. 1.7.3 */ /* print_CIL_Input is true */ #line 213 "/usr/lib/gcc/x86_64-linux-gnu/4.7/include/stddef.h" typedef unsigned long size_t; #line 140 "/usr/include/x86_64-linux-gnu/bits/types.h" typedef long __off_t; #line 141 "/usr/include/x86_64-linux-gnu/bits/types.h" typedef long __off64_t; #line 44 "/usr/include/stdio.h" struct _IO_FILE; #line 48 "/usr/include/stdio.h" typedef struct _IO_FILE FILE; #line 155 "/usr/include/libio.h" typedef void _IO_lock_t; #line 161 "/usr/include/libio.h" struct _IO_marker { struct _IO_marker *_next ; struct _IO_FILE *_sbuf ; int _pos ; }; #line 246 "/usr/include/libio.h" struct _IO_FILE { int _flags ; char *_IO_read_ptr ; char *_IO_read_end ; char *_IO_read_base ; char *_IO_write_base ; char *_IO_write_ptr ; char *_IO_write_end ; char *_IO_buf_base ; char *_IO_buf_end ; char *_IO_save_base ; char *_IO_backup_base ; char *_IO_save_end ; struct _IO_marker *_markers ; struct _IO_FILE *_chain ; int _fileno ; int _flags2 ; __off_t _old_offset ; unsigned short _cur_column ; signed char _vtable_offset ; char _shortbuf[1] ; _IO_lock_t *_lock ; __off64_t _offset ; void *__pad1 ; void *__pad2 ; void *__pad3 ; void *__pad4 ; size_t __pad5 ; int _mode ; char _unused2[(15UL * sizeof(int ) - 4UL * sizeof(void *)) - sizeof(size_t )] ; }; #line 75 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" struct __pthread_internal_list { struct __pthread_internal_list *__prev ; struct __pthread_internal_list *__next ; }; #line 75 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" typedef struct __pthread_internal_list __pthread_list_t; #line 90 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" struct __pthread_mutex_s { int __lock ; unsigned int __count ; int __owner ; unsigned int __nusers ; int __kind ; int __spins ; __pthread_list_t __list ; }; #line 90 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" union __anonunion_pthread_mutex_t_7 { struct __pthread_mutex_s __data ; char __size[40] ; long __align ; }; #line 90 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" typedef union __anonunion_pthread_mutex_t_7 pthread_mutex_t; #line 9 "queue_ts.h" struct _node { void *data ; struct _node *next ; }; #line 13 "queue_ts.h" typedef struct _node node; #line 15 "queue_ts.h" struct _end_q { node *first ; node *last ; }; #line 19 "queue_ts.h" typedef struct _end_q queue; #line 148 "/usr/include/x86_64-linux-gnu/bits/types.h" typedef long __time_t; #line 75 "/usr/include/time.h" typedef __time_t time_t; #line 60 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" typedef unsigned long pthread_t; #line 63 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" union pthread_attr_t { char __size[56] ; long __align ; }; #line 69 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" typedef union pthread_attr_t pthread_attr_t; #line 19 "fifo_test.c" struct _stru { int number ; int thread_no ; queue *q ; }; #line 1 "fifo.o" #pragma merger("0","/tmp/cil-Ub9rMHsE.i","-Wall,-Wstrict-prototypes,-Werror,-g") #line 170 "/usr/include/stdio.h" extern struct _IO_FILE *stderr ; #line 356 extern int fprintf(FILE * __restrict __stream , char const * __restrict __format , ...) ; #line 753 "/usr/include/pthread.h" extern __attribute__((__nothrow__)) int ( __attribute__((__nonnull__(1))) pthread_mutex_lock)(pthread_mutex_t *__mutex ) ; #line 764 extern __attribute__((__nothrow__)) int ( __attribute__((__nonnull__(1))) pthread_mutex_unlock)(pthread_mutex_t *__mutex ) ; #line 465 "/usr/include/stdlib.h" extern __attribute__((__nothrow__)) void *( __attribute__((__leaf__)) malloc)(size_t __size ) __attribute__((__malloc__)) ; #line 482 extern __attribute__((__nothrow__)) void ( __attribute__((__leaf__)) free)(void *__ptr ) ; #line 23 "queue_ts.h" queue *queue_factory(void) ; #line 26 void queue_destroy(queue *que ) ; #line 31 int enque(queue *que , void *data ) ; #line 36 void *deque(queue *que ) ; #line 32 "fifo.c" pthread_mutex_t mutex = {{0, 0U, 0, 0U, 0, 0, {(struct __pthread_internal_list *)0, (struct __pthread_internal_list *)0}}}; #line 37 "fifo.c" queue *queue_factory(void) { queue *new_queue ; void *tmp ; { #line 39 tmp = malloc(sizeof(queue )); #line 39 new_queue = (queue *)tmp; #line 40 if ((unsigned long )new_queue == (unsigned long )((void *)0)) { #line 41 fprintf((FILE * __restrict )stderr, (char const * __restrict )"Malloc failed creating the que\n"); #line 42 return ((queue *)((void *)0)); } #line 44 new_queue->first = (node *)((void *)0); #line 45 new_queue->last = (node *)((void *)0); #line 49 return (new_queue); } } #line 52 "fifo.c" void queue_destroy(queue *que ) { node *_node ; node *tmp ; { #line 57 if ((unsigned long )que == (unsigned long )((void *)0)) { #line 58 return; } #line 63 pthread_mutex_lock(& mutex); #line 64 if ((unsigned long )que->first == (unsigned long )((void *)0)) { #line 66 free((void *)que); #line 67 pthread_mutex_unlock(& mutex); #line 68 return; } #line 73 _node = que->first; #line 75 while ((unsigned long )_node != (unsigned long )((void *)0)) { #line 79 free(_node->data); #line 80 tmp = _node->next; #line 81 free((void *)_node); #line 82 _node = tmp; } #line 85 free((void *)que); #line 87 pthread_mutex_unlock(& mutex); #line 88 return; } } #line 94 "fifo.c" int enque(queue *que , void *data ) { node *new_node ; void *tmp ; { #line 96 tmp = malloc(sizeof(node )); #line 96 new_node = (node *)tmp; #line 97 if ((unsigned long )new_node == (unsigned long )((void *)0)) { #line 98 fprintf((FILE * __restrict )stderr, (char const * __restrict )"Malloc failed creating a node\n"); #line 99 return (-1); } #line 102 new_node->data = data; #line 103 new_node->next = (struct _node *)((void *)0); #line 105 pthread_mutex_lock(& mutex); #line 106 if ((unsigned long )que->first == (unsigned long )((void *)0)) { #line 108 que->first = new_node; #line 109 que->last = new_node; } else { #line 111 (que->last)->next = new_node; #line 112 que->last = new_node; } #line 114 pthread_mutex_unlock(& mutex); #line 116 return (0); } } #line 119 "fifo.c" void *deque(queue *que ) { void *data ; node *_node ; { #line 122 if ((unsigned long )que == (unsigned long )((void *)0)) { #line 124 return ((void *)0); } #line 128 pthread_mutex_lock(& mutex); #line 129 if ((unsigned long )que->first == (unsigned long )((void *)0)) { #line 130 pthread_mutex_unlock(& mutex); #line 132 return ((void *)0); } #line 136 _node = que->first; #line 137 if ((unsigned long )que->first == (unsigned long )que->last) { #line 138 que->first = (node *)((void *)0); #line 139 que->last = (node *)((void *)0); } else { #line 141 que->first = _node->next; } #line 144 data = _node->data; #line 147 free((void *)_node); #line 148 pthread_mutex_unlock(& mutex); #line 150 return (data); } } #line 1 "fifo_test.o" #pragma merger("0","/tmp/cil-nlY2RDlj.i","-Wall,-Wstrict-prototypes,-Werror,-g") #line 362 "/usr/include/stdio.h" extern int printf(char const * __restrict __format , ...) ; #line 386 extern __attribute__((__nothrow__)) int ( /* format attribute */ snprintf)(char * __restrict __s , size_t __maxlen , char const * __restrict __format , ...) ; #line 192 "/usr/include/time.h" extern __attribute__((__nothrow__)) time_t ( __attribute__((__leaf__)) time)(time_t *__timer ) ; #line 232 "/usr/include/pthread.h" extern __attribute__((__nothrow__)) int ( __attribute__((__nonnull__(1,3))) pthread_create)(pthread_t * __restrict __newthread , pthread_attr_t const * __restrict __attr , void *(*__start_routine)(void * ) , void * __restrict __arg ) ; #line 249 extern int pthread_join(pthread_t __th , void **__thread_return ) ; #line 374 "/usr/include/stdlib.h" extern __attribute__((__nothrow__)) int ( __attribute__((__leaf__)) rand)(void) ; #line 376 extern __attribute__((__nothrow__)) void ( __attribute__((__leaf__)) srand)(unsigned int __seed ) ; #line 25 "fifo_test.c" void *func(void *arg ) { struct _stru *args ; int number ; queue *q ; int i ; time_t tmp ; char *message ; void *tmp___0 ; int tmp___1 ; { #line 27 args = (struct _stru *)arg; #line 28 number = args->number; #line 30 q = args->q; #line 33 tmp = time((time_t *)((void *)0)); #line 33 srand((unsigned int )tmp); #line 34 i = 0; #line 34 while (i < number) { #line 35 tmp___0 = malloc((size_t )16); #line 35 message = (char *)tmp___0; #line 36 tmp___1 = rand(); #line 36 snprintf((char * __restrict )message, (size_t )15, (char const * __restrict )"rand: %d", tmp___1); #line 37 enque(q, (void *)message); #line 34 i ++; } #line 40 return ((void *)0); } } #line 44 "fifo_test.c" void *func_d(void *args ) { queue *q ; void *data ; char *string ; { #line 46 printf((char const * __restrict )"Func_D running....\n"); #line 48 q = (queue *)args; #line 51 while (1) { #line 51 data = deque(q); #line 51 if (! ((unsigned long )data != (unsigned long )((void *)0))) { #line 51 break; } #line 52 string = (char *)data; #line 53 printf((char const * __restrict )"DeQued : %s, @%p\n", string, data); #line 54 free(data); } #line 56 printf((char const * __restrict )"Func_D exiting....\n"); #line 58 return ((void *)0); } } #line 61 "fifo_test.c" int main(void) { queue *q ; queue *tmp ; pthread_t threads[6] ; pthread_t thread_d[5] ; int i ; struct _stru arg[6] ; { #line 63 tmp = queue_factory(); #line 63 q = tmp; #line 70 i = 0; #line 70 while (i < 6) { #line 71 arg[i].number = 1000; #line 72 arg[i].thread_no = i; #line 73 arg[i].q = q; #line 74 pthread_create((pthread_t * __restrict )(threads + i), (pthread_attr_t const * __restrict )((void *)0), & func, (void * __restrict )((void *)(& arg[i]))); #line 70 i ++; } #line 77 i = 0; #line 77 while (i < 5) { #line 78 pthread_create((pthread_t * __restrict )(thread_d + i), (pthread_attr_t const * __restrict )((void *)0), & func_d, (void * __restrict )((void *)q)); #line 77 i ++; } #line 81 i = 0; #line 81 while (i < 6) { #line 82 pthread_join(*(threads + i), (void **)((void *)0)); #line 81 i ++; } #line 85 i = 0; #line 85 while (i < 5) { #line 86 pthread_join(*(thread_d + i), (void **)((void *)0)); #line 85 i ++; } #line 89 printf((char const * __restrict )"Going to run queue_destroy(...) \n"); #line 90 queue_destroy(q); #line 91 printf((char const * __restrict )"que freeed... :) \n"); #line 93 return (0); } }
the_stack_data/237641902.c
//c. To print all prime numbers from 0 to a given number ‘n’ #include <stdio.h> int main() { int s,n; printf("Enter a starting range other than 0 and 1:"); scanf("%d", &s); printf("Enter a ending range:"); scanf("%d", &n); printf("prime numbers between %d and %d : ", s, n); for (int i = 2; i <= n; i++) { int c = 0; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { c++; break; } } if (c == 0) { printf("%d ", i); } } return 0; }
the_stack_data/86076084.c
//@ ltl invariant negative: (X (AP(x_2 - x_3 >= -1) && ([] AP(x_3 - x_2 > -10)))); float x_0; float x_1; float x_2; float x_3; int main() { float x_0_; float x_1_; float x_2_; float x_3_; while(1) { x_0_ = ((12.0 + x_1) > (10.0 + x_3)? (12.0 + x_1) : (10.0 + x_3)); x_1_ = ((11.0 + x_1) > (14.0 + x_2)? (11.0 + x_1) : (14.0 + x_2)); x_2_ = ((6.0 + x_0) > (15.0 + x_2)? (6.0 + x_0) : (15.0 + x_2)); x_3_ = ((17.0 + x_0) > (10.0 + x_1)? (17.0 + x_0) : (10.0 + x_1)); x_0 = x_0_; x_1 = x_1_; x_2 = x_2_; x_3 = x_3_; } return 0; }
the_stack_data/175141905.c
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2017 Gordon Williams <[email protected]> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * This file is designed to be parsed during the build process * * Contains WiFi functions * ---------------------------------------------------------------------------- */ /*JSON{ "type": "library", "class": "Wifi" } The Wifi library is designed to control the Wifi interface. It supports functionality such as connecting to wifi networks, getting network information, starting an access point, etc. It is available on these devices: * [Espruino WiFi](http://www.espruino.com/WiFi#using-wifi) * [ESP8266](http://www.espruino.com/EspruinoESP8266) * [ESP32](http://www.espruino.com/ESP32) **Certain features may or may not be implemented on your device** however we have documented what is available and what isn't. If you're not using one of the devices above, a separate WiFi library is provided. For instance: * An [ESP8266 connected to an Espruino board](http://www.espruino.com/ESP8266#software) * An [CC3000 WiFi Module](http://www.espruino.com/CC3000) [Other ways of connecting to the net](http://www.espruino.com/Internet#related-pages) such as GSM, Ethernet and LTE have their own libraries. You can use the WiFi library as follows: ``` var wifi = require("Wifi"); wifi.connect("my-ssid", {password:"my-pwd"}, function(ap){ console.log("connected:", ap); }); ``` On ESP32/ESP8266 if you want the connection to happen automatically at boot, add `wifi.save();`. On other platforms, place `wifi.connect` in a function called `onInit`. */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "associated", "params" : [ ["details","JsVar","An object with event details"] ] } The 'associated' event is called when an association with an access point has succeeded, i.e., a connection to the AP's network has been established. On ESP32/ESP8266 there is a `details` parameter which includes: * ssid - The SSID of the access point to which the association was established * mac - The BSSID/mac address of the access point * channel - The wifi channel used (an integer, typ 1..14) */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "disconnected", "params" : [ ["details","JsVar","An object with event details"] ] } The 'disconnected' event is called when an association with an access point has been lost. On ESP32/ESP8266 there is a `details` parameter which includes: * ssid - The SSID of the access point from which the association was lost * mac - The BSSID/mac address of the access point * reason - The reason for the disconnection (string) */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "auth_change", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["details","JsVar","An object with event details"] ] } The 'auth_change' event is called when the authentication mode with the associated access point changes. The details include: * oldMode - The old auth mode (string: open, wep, wpa, wpa2, wpa_wpa2) * newMode - The new auth mode (string: open, wep, wpa, wpa2, wpa_wpa2) */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "dhcp_timeout", "#if" : "defined(ESP32) || defined(ESP8266)" } The 'dhcp_timeout' event is called when a DHCP request to the connected access point fails and thus no IP address could be acquired (or renewed). */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "connected", "params" : [ ["details","JsVar","An object with event details"] ] } The 'connected' event is called when the connection with an access point is ready for traffic. In the case of a dynamic IP address configuration this is when an IP address is obtained, in the case of static IP address allocation this happens when an association is formed (in that case the 'associated' and 'connected' events are fired in rapid succession). On ESP32/ESP8266 there is a `details` parameter which includes: * ip - The IP address obtained as string * netmask - The network's IP range mask as string * gw - The network's default gateway as string */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "sta_joined", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["details","JsVar","An object with event details"] ] } The 'sta_joined' event is called when a station establishes an association (i.e. connects) with the esp8266's access point. The details include: * mac - The MAC address of the station in string format (00:00:00:00:00:00) */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "sta_left", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["details","JsVar","An object with event details"] ] } The 'sta_left' event is called when a station disconnects from the esp8266's access point (or its association times out?). The details include: * mac - The MAC address of the station in string format (00:00:00:00:00:00) */ /*JSON{ "type" : "event", "class" : "Wifi", "name" : "probe_recv", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["details","JsVar","An object with event details"] ] } The 'probe_recv' event is called when a probe request is received from some station by the esp8266's access point. The details include: * mac - The MAC address of the station in string format (00:00:00:00:00:00) * rssi - The signal strength in dB of the probe request */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "disconnect", "generate" : "jswrap_wifi_disconnect", "params" : [ ["callback", "JsVar", "An optional `callback()` function to be called back on disconnection. The callback function receives no argument."] ] } Disconnect the wifi station from an access point and disable the station mode. It is OK to call `disconnect` to turn off station mode even if no connection exists (for example, connection attempts may be failing). Station mode can be re-enabled by calling `connect` or `scan`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "stopAP", "generate" : "jswrap_wifi_stopAP", "params" : [ ["callback", "JsVar", "An optional `callback()` function to be called back on successful stop. The callback function receives no argument."] ] } Stop being an access point and disable the AP operation mode. AP mode can be re-enabled by calling `startAP`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "connect", "generate" : "jswrap_wifi_connect", "params" : [ ["ssid", "JsVar", "The access point network id."], ["options", "JsVar", "Connection options (optional)."], ["callback", "JsVar", "A `callback(err)` function to be called back on completion. `err` is null on success, or contains an error string on failure."] ] } Connect to an access point as a station. If there is an existing connection to an AP it is first disconnected if the SSID or password are different from those passed as parameters. Put differently, if the passed SSID and password are identical to the currently connected AP then nothing is changed. When the connection attempt completes the callback function is invoked with one `err` parameter, which is NULL if there is no error and a string message if there is an error. If DHCP is enabled the callback occurs once an IP addres has been obtained, if a static IP is set the callback occurs once the AP's network has been joined. The callback is also invoked if a connection already exists and does not need to be changed. The options properties may contain: * `password` - Password string to be used to access the network. * `dnsServers` (array of String) - An array of up to two DNS servers in dotted decimal format string. * `channel` - Wifi channel of the access point (integer, typ 0..14, 0 means any channel), only on ESP8266. * `bssid` - Mac address of the access point (string, type "00:00:00:00:00:00"), only on ESP8266. Notes: * the options should include the ability to set a static IP and associated netmask and gateway, this is a future enhancement. * the only error reported in the callback is "Bad password", all other errors (such as access point not found or DHCP timeout) just cause connection retries. If the reporting of such temporary errors is desired, the caller must use its own timeout and the `getDetails().status` field. * the `connect` call automatically enabled station mode, it can be disabled again by calling `disconnect`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "scan", "generate" : "jswrap_wifi_scan", "params" : [ ["callback", "JsVar", "A `callback(err, ap_list)` function to be called back on completion. `err==null` and `ap_list` is an array on success, or `err` is an error string and `ap_list` is undefined on failure."] ] } Perform a scan for access points. This will enable the station mode if it is not currently enabled. Once the scan is complete the callback function is called with an array of APs found, each AP is an object with: * `ssid`: SSID string. * `mac`: access point MAC address in 00:00:00:00:00:00 format. * `authMode`: `open`, `wep`, `wpa`, `wpa2`, or `wpa_wpa2`. * `channel`: wifi channel 1..13. * `hidden`: true if the SSID is hidden (ESP32/ESP8266 only) * `rssi`: signal strength in dB in the range -110..0. Notes: * in order to perform the scan the station mode is turned on and remains on, use Wifi.disconnect() to turn it off again, if desired. * only one scan can be in progress at a time. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "startAP", "generate" : "jswrap_wifi_startAP", "params" : [ ["ssid", "JsVar", "The network id."], ["options", "JsVar", "Configuration options (optional)."], ["callback", "JsVar", "Optional `callback(err)` function to be called when the AP is successfully started. `err==null` on success, or an error string on failure."] ] } Create a WiFi access point allowing stations to connect. If the password is NULL or an empty string the access point is open, otherwise it is encrypted. The callback function is invoked once the access point is set-up and receives one `err` argument, which is NULL on success and contains an error message string otherwise. The `options` object can contain the following properties. * `authMode` - The authentication mode to use. Can be one of "open", "wpa2", "wpa", "wpa_wpa2". The default is open (but open access points are not recommended). * `password` - The password for connecting stations if authMode is not open. * `channel` - The channel to be used for the access point in the range 1..13. If the device is also connected to an access point as a station then that access point determines the channel. * `hidden` - The flag if visible or not (0:visible, 1:hidden), default is visible. Notes: * the options should include the ability to set the AP IP and associated netmask, this is a future enhancement. * the `startAP` call automatically enables AP mode. It can be disabled again by calling `stopAP`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getStatus", "generate" : "jswrap_wifi_getStatus", "#if" : "defined(ESP32) || defined(ESP8266)", "return" : ["JsVar", "An object representing the current WiFi status, if available immediately."], "params" : [ ["callback", "JsVar", "Optional `callback(status)` function to be called back with the current Wifi status, i.e. the same object as returned directly."] ] } Retrieve the current overall WiFi configuration. This call provides general information that pertains to both station and access point modes. The getDetails and getAPDetails calls provide more in-depth information about the station and access point configurations, respectively. The status object has the following properties: * `station` - Status of the wifi station: `off`, `connecting`, ... * `ap` - Status of the wifi access point: `disabled`, `enabled`. * `mode` - The current operation mode: `off`, `sta`, `ap`, `sta+ap`. * `phy` - Modulation standard configured: `11b`, `11g`, `11n` (the esp8266 docs are not very clear, but it is assumed that 11n means b/g/n). This setting limits the modulations that the radio will use, it does not indicate the current modulation used with a specific access point. * `powersave` - Power saving mode: `none` (radio is on all the time), `ps-poll` (radio is off between beacons as determined by the access point's DTIM setting). Note that in 'ap' and 'sta+ap' modes the radio is always on, i.e., no power saving is possible. * `savedMode` - The saved operation mode which will be applied at boot time: `off`, `sta`, `ap`, `sta+ap`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "setConfig", "generate" : "jswrap_wifi_setConfig", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["settings", "JsVar", "An object with the configuration settings to change."] ] } Sets a number of global wifi configuration settings. All parameters are optional and which are passed determines which settings are updated. The settings available are: * `phy` - Modulation standard to allow: `11b`, `11g`, `11n` (the esp8266 docs are not very clear, but it is assumed that 11n means b/g/n). * `powersave` - Power saving mode: `none` (radio is on all the time), `ps-poll` (radio is off between beacons as determined by the access point's DTIM setting). Note that in 'ap' and 'sta+ap' modes the radio is always on, i.e., no power saving is possible. Note: esp8266 SDK programmers may be missing an "opmode" option to set the sta/ap/sta+ap operation mode. Please use connect/scan/disconnect/startAP/stopAP, which all set the esp8266 opmode indirectly. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getDetails", "generate" : "jswrap_wifi_getDetails", "#if" : "defined(ESP32) || defined(ESP8266)", "return" : ["JsVar", "An object representing the wifi station details, if available immediately."], "params" : [ ["callback", "JsVar", "An optional `callback(details)` function to be called back with the wifi details, i.e. the same object as returned directly."] ] } Retrieve the wifi station configuration and status details. The details object has the following properties: * `status` - Details about the wifi station connection, one of `off`, `connecting`, `wrong_password`, `no_ap_found`, `connect_fail`, or `connected`. The off, bad_password and connected states are stable, the other states are transient. The connecting state will either result in connected or one of the error states (bad_password, no_ap_found, connect_fail) and the no_ap_found and connect_fail states will result in a reconnection attempt after some interval. * `rssi` - signal strength of the connected access point in dB, typically in the range -110 to 0, with anything greater than -30 being an excessively strong signal. * `ssid` - SSID of the access point. * `password` - the password used to connect to the access point. * `authMode` - the authentication used: `open`, `wpa`, `wpa2`, `wpa_wpa2` (not currently supported). * `savedSsid` - the SSID to connect to automatically at boot time, null if none. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getAPDetails", "generate" : "jswrap_wifi_getAPDetails", "#if" : "defined(ESP32) || defined(ESP8266)", "return" : ["JsVar", "An object representing the current access point details, if available immediately."], "params" : [ ["callback", "JsVar", "An optional `callback(details)` function to be called back with the current access point details, i.e. the same object as returned directly."] ] } Retrieve the current access point configuration and status. The details object has the following properties: * `status` - Current access point status: `enabled` or `disabled` * `stations` - an array of the stations connected to the access point. This array may be empty. Each entry in the array is an object describing the station which, at a minimum contains `ip` being the IP address of the station. * `ssid` - SSID to broadcast. * `password` - Password for authentication. * `authMode` - the authentication required of stations: `open`, `wpa`, `wpa2`, `wpa_wpa2`. * `hidden` - True if the SSID is hidden, false otherwise. * `maxConn` - Max number of station connections supported. * `savedSsid` - the SSID to broadcast automatically at boot time, null if the access point is to be disabled at boot. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "save", "generate" : "jswrap_wifi_save", "#if" : "defined(ESP32) || defined(ESP8266)", "params" : [ ["what", "JsVar", "An optional parameter to specify what to save, on the esp8266 the two supported values are `clear` and `sta+ap`. The default is `sta+ap`"] ] } On boards where this is not available, just issue the `connect` commands you need to run at startup from an `onInit` function. Save the current wifi configuration (station and access point) to flash and automatically apply this configuration at boot time, unless `what=="clear"`, in which case the saved configuration is cleared such that wifi remains disabled at boot. The saved configuration includes: * mode (off/sta/ap/sta+ap) * SSIDs & passwords * phy (11b/g/n) * powersave setting * DHCP hostname */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "restore", "generate" : "jswrap_wifi_restore", "#if" : "defined(ESP32) || defined(ESP8266)" } Restores the saved Wifi configuration from flash. See `Wifi.save()`. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getIP", "generate" : "jswrap_wifi_getIP", "return" : ["JsVar", "An object representing the station IP information, if available immediately (**ONLY** on ESP8266/ESP32)."], "params" : [ ["callback", "JsVar", "An optional `callback(err, ipinfo)` function to be called back with the IP information."] ] } Return the station IP information in an object as follows: * ip - IP address as string (e.g. "192.168.1.5") * netmask - The interface netmask as string (ESP8266/ESP32 only) * gw - The network gateway as string (ESP8266/ESP32 only) * mac - The MAC address as string of the form 00:00:00:00:00:00 Note that the `ip`, `netmask`, and `gw` fields are omitted if no connection is established: */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getAPIP", "generate" : "jswrap_wifi_getAPIP", "return" : ["JsVar", "An object representing the esp8266's Access Point IP information, if available immediately (**ONLY** on ESP8266/ESP32)."], "params" : [ ["callback", "JsVar", "An optional `callback(err, ipinfo)` function to be called back with the the IP information."] ] } Return the access point IP information in an object which contains: * ip - IP address as string (typ "192.168.4.1") * netmask - The interface netmask as string * gw - The network gateway as string * mac - The MAC address as string of the form 00:00:00:00:00:00 */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getHostByName", "generate" : "jswrap_wifi_getHostByName", "#if" : "defined(ESP8266) || defined(ESP32)", "params" : [ ["hostname", "JsVar", "The hostname to lookup."], ["callback", "JsVar", "The `callback(ip)` to invoke when the IP is returned. `ip==null` on failure."] ] } Lookup the hostname and invoke a callback with the IP address as integer argument. If the lookup fails, the callback is invoked with a null argument. **Note:** only a single hostname lookup can be made at a time, concurrent lookups are not supported. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "getHostname", "generate" : "jswrap_wifi_getHostname", "#if" : "defined(ESP8266) || defined(ESP32)", "return" : ["JsVar", "The currently configured hostname, if available immediately."], "params" : [ ["callback", "JsVar", "An optional `callback(hostname)` function to be called back with the hostname."] ] } Returns the hostname announced to the DHCP server and broadcast via mDNS when connecting to an access point. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "setHostname", "generate" : "jswrap_wifi_setHostname", "#if" : "defined(ESP8266) || defined(ESPRUINOWIFI) || defined(ESP32)", "params" : [ ["hostname", "JsVar", "The new hostname."], ["callback", "JsVar", "An optional `callback()` function to be called back when the hostname is set"] ] } Set the hostname. Depending on implemenation, the hostname is sent with every DHCP request and is broadcast via mDNS. The DHCP hostname may be visible in the access point and may be forwarded into DNS as hostname.local. If a DHCP lease currently exists changing the hostname will cause a disconnect and reconnect in order to transmit the change to the DHCP server. The mDNS announcement also includes an announcement for the "espruino" service. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "setSNTP", "generate" : "jswrap_wifi_setSNTP", "#if" : "defined(ESP8266) || defined(ESP32)", "params" : [ ["server", "JsVar", "The NTP server to query, for example, `us.pool.ntp.org`"], ["tz_offset", "JsVar", "Local time zone offset in the range -11..13."] ] } Starts the SNTP (Simple Network Time Protocol) service to keep the clock synchronized with the specified server. Note that the time zone is really just an offset to UTC and doesn't handle daylight savings time. The interval determines how often the time server is queried and Espruino's time is synchronized. The initial synchronization occurs asynchronously after setSNTP returns. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "setIP", "generate" : "jswrap_wifi_setIP", "#if" : "defined(ESP8266) || defined(ESPRUINOWIFI)", "params" : [ ["settings", "JsVar", "Configuration settings"], ["callback", "JsVar", "A `callback(err)` function to invoke when ip is set. `err==null` on success, or a string on failure."] ] } The `settings` object must contain the following properties. * `ip` IP address as string (e.g. "192.168.5.100") * `gw` The network gateway as string (e.g. "192.168.5.1") * `netmask` The interface netmask as string (e.g. "255.255.255.0") */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "setAPIP", "#if" : "defined(ESPRUINOWIFI) || defined(ESP8266)", "generate" : "jswrap_wifi_setAPIP", "params" : [ ["settings", "JsVar", "Configuration settings"], ["callback", "JsVar", "A `callback(err)` function to invoke when ip is set. `err==null` on success, or a string on failure."] ] } The `settings` object must contain the following properties. * `ip` IP address as string (e.g. "192.168.5.100") * `gw` The network gateway as string (e.g. "192.168.5.1") * `netmask` The interface netmask as string (e.g. "255.255.255.0") */ //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "ping", "#if" : "defined(ESPRUINOWIFI) || defined(ESP8266) || defined(ESP32)", "generate" : "jswrap_wifi_ping", "params" : [ ["hostname", "JsVar", "The host to ping"], ["callback", "JsVar", "A `callback(time)` function to invoke when a ping is received"] ] } Issues a ping to the given host, and calls a callback with the time when the ping is received. */ /*JSON{ "type" : "staticmethod", "class" : "Wifi", "name" : "turbo", "#if" : "defined(ESPRUINOWIFI)", "generate_full" : "", "params" : [ ["enable", "JsVar", "true (or a baud rate as a number) to enable, false to disable"], ["callback", "JsVar", "A `callback()` function to invoke when turbo mode has been set"] ] } Switch to using a higher communication speed with the WiFi module. * `true` = 921600 baud * `false` = 115200 * `1843200` (or any number) = use a specific baud rate. * eg. `wifi.turbo(true,callback)` or `wifi.turbo(1843200,callback)` */
the_stack_data/117328892.c
extern void exit (int); typedef unsigned int dev_t; typedef unsigned int kdev_t; static inline kdev_t to_kdev_t(int dev) { int major, minor; if (sizeof(kdev_t) == 16) return (kdev_t)dev; major = (dev >> 8); minor = (dev & 0xff); return ((( major ) << 22 ) | ( minor )) ; } void do_mknod(const char * filename, int mode, kdev_t dev) { if (dev==0x15800078) exit(0); else abort(); } char * getname(const char * filename) { register unsigned int a1,a2,a3,a4,a5,a6,a7,a8,a9; a1 = (unsigned int)(filename) *5 + 1; a2 = (unsigned int)(filename) *6 + 2; a3 = (unsigned int)(filename) *7 + 3; a4 = (unsigned int)(filename) *8 + 4; a5 = (unsigned int)(filename) *9 + 5; a6 = (unsigned int)(filename) *10 + 5; a7 = (unsigned int)(filename) *11 + 5; a8 = (unsigned int)(filename) *12 + 5; a9 = (unsigned int)(filename) *13 + 5; return (char *)(a1*a2+a3*a4+a5*a6+a7*a8+a9); } int sys_mknod(const char * filename, int mode, dev_t dev) { int error; char * tmp; tmp = getname(filename); error = ((long)( tmp )) ; do_mknod(tmp,mode,to_kdev_t(dev)); return error; } int main(void) { if (sizeof (int) != 4) exit (0); return sys_mknod("test",1,0x12345678); }
the_stack_data/445622.c
#include <stdio.h> #include <string.h> #include <setjmp.h> #if !defined(_WIN32) #include <pthread.h> #else #include <windows.h> #endif #define SIZE 10 #define COUNT 10 #define TST int i, a[2], b[2]; \ for (i = 0; i < 2; i++) a[i] = 0; \ for (i = 0; i < 2; i++) b[i] = 0 static int count[SIZE]; static void tst1 (jmp_buf loc) { TST; longjmp(loc, 1); } static void tst2(jmp_buf loc) { jmp_buf jmp; setjmp (jmp); TST; tst1(loc); } static void *tst (void * index) { jmp_buf loc; int i = *(int *) index; static int v[SIZE]; for (v[i] = 0; v[i] < COUNT; v[i]++) { if (setjmp (loc) == 0) { TST; tst2(loc); } else { count[i]++; } i = *(int *) index; } return NULL; } int main (void) { int i; #if !defined(_WIN32) pthread_t id[SIZE]; #else HANDLE id[SIZE]; #endif int index[SIZE]; for (i = 0; i < SIZE; i++) { index[i] = i; #if !defined(_WIN32) pthread_create (&id[i], NULL, tst, (void *) &index[i]); #else id[i] = CreateThread(NULL, 8192, (LPTHREAD_START_ROUTINE) tst, (void *) &index[i], 0, NULL); #endif } for (i = 0; i < SIZE; i++) { #if !defined(_WIN32) pthread_join (id[i], NULL); #else WaitForSingleObject(id[i], INFINITE); #endif } for (i = 0; i < SIZE; i++) { if (count[i] != COUNT) printf ("error: %d %d\n", i, count[i]); } return 0; }
the_stack_data/1152464.c
#include <stdio.h> #include <stdlib.h> int comp (const void* a, const void* b) { const int* ai = (const int*) a; const int* bi = (const int*) b; return *ai-*bi; } int main () { int i, k, A[12],sum=0,ans=0,sum1 = 0; scanf("%d", &k); for (i = 0; i < 12; i++) { scanf ("%d", &A[i]); sum1+=A[i]; } if (sum1 < k) { puts("-1"); return 0; } // else if (sum1 == k) { // puts("12"); // return 0; // } qsort((void*) A, 12, sizeof(int), comp); for(i = 11; i >=0; i--) { if (sum >= k) break; sum+=A[i]; ans++; } printf("%d\n",ans); return 0; }
the_stack_data/178265021.c
/* * Benchmarks contributed by Divyesh Unadkat[1,2], Supratik Chakraborty[1], Ashutosh Gupta[1] * [1] Indian Institute of Technology Bombay, Mumbai * [2] TCS Innovation labs, Pune * */ extern void __VERIFIER_error() __attribute__ ((__noreturn__)); extern void __VERIFIER_assume(int); void __VERIFIER_assert(int cond) { if(!(cond)) { ERROR: __VERIFIER_error(); } } extern int __VERIFIER_nondet_int(void); int N; int main() { N = __VERIFIER_nondet_int(); if(N <= 0) return 1; __VERIFIER_assume(N <= 2147483647/sizeof(int)); int i, j; int sum[1]; int a[N]; int b[N]; for (i = 0; i < N; i++) { b[i] = 1; } sum[0] = 0; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { sum[0] = sum[0] + a[i]; } } for (i = 0; i < N; i++) { for (j = i; j < N; j++) { b[j] = b[j] + sum[0]; } } for (i = 0; i < N; i++) { __VERIFIER_assert(b[i] == (i+1)*sum[0]); } }
the_stack_data/182954438.c
double U[100][100]; void foo() { for (int I = 1; I < 100; ++I) for (int J = 0; J < 99; ++J) U[I][J] = U[I - 1][J]; } //CHECK: Printing analysis 'Dependency Analysis (Metadata)' for function 'foo': //CHECK: loop at depth 1 distance_3.c:4:3 //CHECK: private: //CHECK: <J:5[5:5], 4> //CHECK: flow: //CHECK: <U, 80000>:[1:1,0:0] //CHECK: induction: //CHECK: <I:4[4:3], 4>:[Int,1,100,1] //CHECK: lock: //CHECK: <I:4[4:3], 4> //CHECK: header access: //CHECK: <I:4[4:3], 4> //CHECK: explicit access: //CHECK: <I:4[4:3], 4> | <J:5[5:5], 4> //CHECK: explicit access (separate): //CHECK: <I:4[4:3], 4> <J:5[5:5], 4> //CHECK: lock (separate): //CHECK: <I:4[4:3], 4> //CHECK: direct access (separate): //CHECK: <I:4[4:3], 4> <J:5[5:5], 4> <U, 80000> //CHECK: loop at depth 2 distance_3.c:5:5 //CHECK: shared: //CHECK: <U, 80000> //CHECK: induction: //CHECK: <J:5[5:5], 4>:[Int,0,99,1] //CHECK: read only: //CHECK: <I:4[4:3], 4> //CHECK: lock: //CHECK: <J:5[5:5], 4> //CHECK: header access: //CHECK: <J:5[5:5], 4> //CHECK: explicit access: //CHECK: <I:4[4:3], 4> | <J:5[5:5], 4> //CHECK: explicit access (separate): //CHECK: <I:4[4:3], 4> <J:5[5:5], 4> //CHECK: lock (separate): //CHECK: <J:5[5:5], 4> //CHECK: direct access (separate): //CHECK: <I:4[4:3], 4> <J:5[5:5], 4> <U, 80000>
the_stack_data/45451452.c
// RUN: %llvmgcc -S %s -o - | llvm-as -o /dev/null typedef struct { int op; } event_t; event_t test(int X) { event_t foo = { 1 }, bar = { 2 }; return X ? foo : bar; }
the_stack_data/175143456.c
#include <stdio.h> int main(void) { int digit_1, digit_2; printf("Enter a two-digit number:"); scanf("%1d%1d", &digit_1, &digit_2); printf("The reversal is: %d%d", digit_2, digit_1); return 0; }
the_stack_data/766314.c
// print top 10 lines of file // Author: Travis Dowd // Date: 3-22-2020 #include <stdio.h> int main( int argc, char *argv[] ) { int lines = 0; if ( argc == 2 ) { static int c; FILE *f = fopen( argv[1], "r" ); if ( f ) { while (( c = getc( f )) != EOF ) { if ( c == '\n' ) { lines++; } if ( lines < 10 ) { putchar( c ); } } printf( "\n" ); fclose( f ); } else { perror( "head" ); } } return 0; }
the_stack_data/237643451.c
char shellcode[] = "\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80";
the_stack_data/1218898.c
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <string.h> #include <unistd.h> #include <resolv.h> #include <alloca.h> #include <errno.h> #include <fcntl.h> #include <sys/socket.h> #if defined(__UCLIBC__) \ && (__UCLIBC_MAJOR__ == 0 \ && (__UCLIBC_MINOR__ < 9 || (__UCLIBC_MINOR__ == 9 && __UCLIBC_SUBLEVEL__ < 33))) #define NS_MAXCDNAME 255 /* max compressed domain name length */ #define NS_MAXLABEL 63 /* max label length */ int __attribute__ ((weak)) dn_comp(const char *src, uint8_t *dst, int length, uint8_t __attribute__((unused)) **dnptrs, uint8_t __attribute__((unused)) **lastdnptr) { uint8_t *buf, *ptr; int len; if (src == NULL || dst == NULL) return -1; buf = ptr = alloca(strlen(src) + 2); while (src && *src) { uint8_t *lenptr = ptr++; for (len = 0; *src && *src != '.'; len++) *ptr++ = *src++; if (len == 0 || len > NS_MAXLABEL) return -1; *lenptr = len; if (*src) src++; } *ptr++ = 0; len = ptr - buf; if (len > NS_MAXCDNAME || len > length) return -1; memcpy(dst, buf, len); return len; } #endif #if !defined(SOCK_CLOEXEC) int fflags(int sock, int flags) { int err; if (sock < 0) return sock; if (flags & O_CLOEXEC) { if (fcntl(sock, F_SETFD, FD_CLOEXEC) < 0) goto error; } return sock; error: err = errno; close(sock); errno = err; return -1; } #endif
the_stack_data/20450329.c
#include <stdio.h> #include <string.h> #include <stdbool.h> bool is_all_lower_case_alphabet(const char *sentence) { int i = 0; int cnt = 0; int flag[26] = {0}; while (sentence[i] != '\0') { if (flag[sentence[i] - 'a'] == 0) { flag[sentence[i] - 'a'] = 1; ++cnt; } ++i; } if (cnt == 26) { return true; } else { return false; } } int main() { char sentence[1000]; gets(sentence); printf("%d\n", is_all_lower_case_alphabet(sentence)); return 0; }
the_stack_data/248581828.c
#include <stdio.h> int main(){ int a,b,c; int *d,*e,*f,*g; printf("请输入三个数: "); scanf("%d%d%d",&a,&b,&c); d = &a; e = &b; f = &c; if(a > b){ g = e; e = d; d = g; } if(a > c){ g = f; f = d; d = g; } if(b > c){ g = f; f = e; e = g; } printf("%d <= %d <= %d\n", *d,*e,*f); return 0; }
the_stack_data/89715.c
#include <string.h> #include <fcntl.h> #define PATHMAX 1023 int main(int argc, char **argv) { char pathname[PATHMAX+1]; char *suffixes[] = { ".dir", ".peg" }; if (argc != 2 || strlen(argv[1]) + strlen(suffixes[0]) > sizeof(pathname)-1) return 1; for (size_t i = 0; i < sizeof(suffixes) / sizeof(char *); i++) { strncpy(pathname, argv[1], sizeof(pathname)-1); strncat(pathname, suffixes[i], sizeof(pathname)-1); if (creat(pathname, 0777) < 0) return 1; } return 0; }
the_stack_data/61074753.c
#include <stdio.h> #include <getopt.h> int main(int argc, char **argv) { // checks if the arg count is lower than 2 or higher than 3 if(argc < 2 || argc > 3){ printf("Invalid syntax!\n"); printf("use 'cp -h' for help!\n"); return 1; } char ch; FILE *source, *target; int c; while ((c = getopt (argc, argv, "hv")) != -1){ switch (c) { case 'h': printf("cp: Copies a file to another file\n"); printf("Syntax:\n"); printf("cp [source file] [target file]\n"); return 0; case 'v': printf("cp command, part of snekutils\n"); printf("version 1.0\n"); return 0; default: return 0; } } // reads the source file source = fopen(argv[1], "r"); // if source is null, the file doesn't exist. if (source == NULL){ printf("cp: File '%s' does not exist.\n", argv[1]); return 1; } // checks if target file already exists if ((target = fopen(argv[2], "r"))) { fclose(target); printf("cp: File '%s' already exists!\n", argv[2]); return 1; } // writes the target file target = fopen(argv[2], "w"); // if target is null, the user forgot to input the target file. if (target == NULL){ fclose(source); printf("cp: Invalid Syntax! Use the --help flag for help.\n"); return 1; } while ((ch = fgetc(source)) != EOF){ fputc(ch, target); } fclose(source); fclose(target); return 0; }
the_stack_data/1059942.c
/** * @file xmc_eth_phy_dp83848.c * @date 2018-08-06 * * @cond ********************************************************************************************************************* * XMClib v2.1.20 - XMC Peripheral Driver Library * * Copyright (c) 2015-2018, Infineon Technologies AG * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification,are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided with the distribution. * * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY,OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * To improve the quality of the software, users are encouraged to share modifications, enhancements or bug fixes with * Infineon Technologies AG [email protected]). ********************************************************************************************************************* * * Change History * -------------- * * 2015-06-20: * - Initial <br> * * 2015-12-15: * - Added Reset and exit power down * - Reset function called in Init function * * 2018-08-06: * - Fixed XMC_ETH_PHY_Init waiting for PHY MDIO being ready * * @endcond */ /******************************************************************************* * HEADER FILES *******************************************************************************/ #if defined(XMC_ETH_PHY_DP83848C) #include <xmc_eth_phy.h> /******************************************************************************* * MACROS *******************************************************************************/ /* Basic Registers */ #define REG_BMCR (0x00U) /* Basic Mode Control Register */ #define REG_BMSR (0x01U) /* Basic Mode Status Register */ #define REG_PHYIDR1 (0x02U) /* PHY Identifier 1 */ #define REG_PHYIDR2 (0x03U) /* PHY Identifier 2 */ #define REG_ANAR (0x04U) /* Auto-Negotiation Advertisement */ #define REG_ANLPAR (0x05U) /* Auto-Neg. Link Partner Abitily */ #define REG_ANER (0x06U) /* Auto-Neg. Expansion Register */ #define REG_ANNPTR (0x07U) /* Auto-Neg. Next Page TX */ #define REG_RBR (0x17U) /* RMII and Bypass Register */ /* Extended Registers */ #define REG_PHYSTS (0x10U) /* Status Register */ /* Basic Mode Control Register */ #define BMCR_RESET (0x8000U) /* Software Reset */ #define BMCR_LOOPBACK (0x4000U) /* Loopback mode */ #define BMCR_SPEED_SEL (0x2000U) /* Speed Select (1=100Mb/s) */ #define BMCR_ANEG_EN (0x1000U) /* Auto Negotiation Enable */ #define BMCR_POWER_DOWN (0x0800U) /* Power Down */ #define BMCR_ISOLATE (0x0400U) /* Isolate Media interface */ #define BMCR_REST_ANEG (0x0200U) /* Restart Auto Negotiation */ #define BMCR_DUPLEX (0x0100U) /* Duplex Mode (1=Full duplex) */ #define BMCR_COL_TEST (0x0080U) /* Collision Test */ /* Basic Mode Status Register */ #define BMSR_100B_T4 (0x8000U) /* 100BASE-T4 Capable */ #define BMSR_100B_TX_FD (0x4000U) /* 100BASE-TX Full Duplex Capable */ #define BMSR_100B_TX_HD (0x2000U) /* 100BASE-TX Half Duplex Capable */ #define BMSR_10B_T_FD (0x1000U) /* 10BASE-T Full Duplex Capable */ #define BMSR_10B_T_HD (0x0800U) /* 10BASE-T Half Duplex Capable */ #define BMSR_MF_PRE_SUP (0x0040U) /* Preamble suppression Capable */ #define BMSR_ANEG_COMPL (0x0020U) /* Auto Negotiation Complete */ #define BMSR_REM_FAULT (0x0010U) /* Remote Fault */ #define BMSR_ANEG_ABIL (0x0008U) /* Auto Negotiation Ability */ #define BMSR_LINK_STAT (0x0004U) /* Link Status (1=established) */ #define BMSR_JABBER_DET (0x0002U) /* Jaber Detect */ #define BMSR_EXT_CAPAB (0x0001U) /* Extended Capability */ /* RMII and Bypass Register */ #define RBR_RMII_MODE (0x0020U) /* Reduced MII Mode */ /* PHY Identifier Registers */ #define PHY_ID1 0x2000 /* DP83848C Device Identifier MSB */ #define PHY_ID2 0x5C90 /* DP83848C Device Identifier LSB */ /* PHY Status Register */ #define PHYSTS_MDI_X 0x4000 /* MDI-X mode enabled by Auto-Negot. */ #define PHYSTS_REC_ERR 0x2000 /* Receive Error Latch */ #define PHYSTS_POL_STAT 0x1000 /* Polarity Status */ #define PHYSTS_FC_SENSE 0x0800 /* False Carrier Sense Latch */ #define PHYSTS_SIG_DET 0x0400 /* 100Base-TX Signal Detect */ #define PHYSTS_DES_LOCK 0x0200 /* 100Base-TX Descrambler Lock */ #define PHYSTS_PAGE_REC 0x0100 /* Link Code Word Page Received */ #define PHYSTS_MII_INT 0x0080 /* MII Interrupt Pending */ #define PHYSTS_REM_FAULT 0x0040 /* Remote Fault */ #define PHYSTS_JABBER_DET 0x0020 /* Jabber Detect */ #define PHYSTS_ANEG_COMPL 0x0010 /* Auto Negotiation Complete */ #define PHYSTS_LOOPBACK 0x0008 /* Loopback Status */ #define PHYSTS_DUPLEX 0x0004 /* Duplex Status (1=Full duplex) */ #define PHYSTS_SPEED 0x0002 /* Speed10 Status (1=10MBit/s) */ #define PHYSTS_LINK_STAT 0x0001 /* Link Status (1=established) */ /******************************************************************************* * API IMPLEMENTATION *******************************************************************************/ /* Check if the device identifier is valid */ static int32_t XMC_ETH_PHY_IsDeviceIdValid(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { uint16_t phy_id1; uint16_t phy_id2; XMC_ETH_PHY_STATUS_t status; /* Check Device Identification. */ if ((XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_PHYIDR1, &phy_id1) == XMC_ETH_MAC_STATUS_OK) && (XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_PHYIDR2, &phy_id2) == XMC_ETH_MAC_STATUS_OK)) { if ((phy_id1 == PHY_ID1) && ((phy_id2 & (uint16_t)0xfff0) == PHY_ID2)) { status = XMC_ETH_PHY_STATUS_OK; } else { status = XMC_ETH_PHY_STATUS_ERROR_DEVICE_ID; } } else { status = XMC_ETH_PHY_STATUS_ERROR_TIMEOUT; } return (int32_t)status; } /* PHY initialize */ int32_t XMC_ETH_PHY_Init(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr, const XMC_ETH_PHY_CONFIG_t *const config) { uint32_t retries = 0xffffffffUL; int32_t status; uint16_t reg_val; while (((status = XMC_ETH_PHY_IsDeviceIdValid(eth_mac, phy_addr)) != XMC_ETH_PHY_STATUS_OK) && --retries); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { status = XMC_ETH_PHY_Reset(eth_mac, phy_addr); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { reg_val = 0U; if (config->speed == XMC_ETH_LINK_SPEED_100M) { reg_val |= BMCR_SPEED_SEL; } if (config->duplex == XMC_ETH_LINK_DUPLEX_FULL) { reg_val |= BMCR_DUPLEX; } if (config->enable_auto_negotiate == true) { reg_val |= BMCR_ANEG_EN; } if (config->enable_loop_back == true) { reg_val |= BMCR_LOOPBACK; } status = (int32_t)XMC_ETH_MAC_WritePhy(eth_mac, phy_addr, REG_BMCR, reg_val); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { /* Configure interface mode */ switch (config->interface) { case XMC_ETH_LINK_INTERFACE_MII: reg_val = 0x0001; break; case XMC_ETH_LINK_INTERFACE_RMII: reg_val = RBR_RMII_MODE | 0x0001; break; } status = (int32_t)XMC_ETH_MAC_WritePhy(eth_mac, phy_addr, REG_RBR, reg_val); } } } return status; } /* Reset */ int32_t XMC_ETH_PHY_Reset(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { int32_t status; uint16_t reg_bmcr; /* Reset PHY*/ status = (int32_t)XMC_ETH_MAC_WritePhy(eth_mac, phy_addr, REG_BMCR, BMCR_RESET); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { /* Wait for the reset to complete */ do { status = XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_BMCR, &reg_bmcr); } while ((reg_bmcr & BMCR_RESET) != 0); } return status; } /* Initiate power down */ int32_t XMC_ETH_PHY_PowerDown(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { int32_t status; uint16_t reg_bmcr; status = XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_BMCR, &reg_bmcr); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { reg_bmcr |= BMCR_POWER_DOWN; status = (int32_t)XMC_ETH_MAC_WritePhy(eth_mac, phy_addr, REG_BMCR, reg_bmcr); } return status; } /* Exit power down */ int32_t XMC_ETH_PHY_ExitPowerDown(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { int32_t status; uint16_t reg_bmcr; status = XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_BMCR, &reg_bmcr); if (status == (int32_t)XMC_ETH_PHY_STATUS_OK) { reg_bmcr &= ~BMCR_POWER_DOWN; status = (int32_t)XMC_ETH_MAC_WritePhy(eth_mac, phy_addr, REG_BMCR, reg_bmcr); } return status; } /* Get link status */ XMC_ETH_LINK_STATUS_t XMC_ETH_PHY_GetLinkStatus(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { uint16_t val; XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_BMSR, &val); return (XMC_ETH_LINK_STATUS_t)((val & BMSR_LINK_STAT) ? XMC_ETH_LINK_STATUS_UP : XMC_ETH_LINK_STATUS_DOWN); } /* Get link speed */ XMC_ETH_LINK_SPEED_t XMC_ETH_PHY_GetLinkSpeed(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { uint16_t val; XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_PHYSTS, &val); return (XMC_ETH_LINK_SPEED_t)((val & PHYSTS_SPEED) ? XMC_ETH_LINK_SPEED_10M : XMC_ETH_LINK_SPEED_100M); } /* Get link duplex settings */ XMC_ETH_LINK_DUPLEX_t XMC_ETH_PHY_GetLinkDuplex(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { uint16_t val; XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_PHYSTS, &val); return (XMC_ETH_LINK_DUPLEX_t)((val & PHYSTS_DUPLEX) ? XMC_ETH_LINK_DUPLEX_FULL : XMC_ETH_LINK_DUPLEX_HALF); } bool XMC_ETH_PHY_IsAutonegotiationCompleted(XMC_ETH_MAC_t *const eth_mac, uint8_t phy_addr) { uint16_t val; XMC_ETH_MAC_ReadPhy(eth_mac, phy_addr, REG_BMSR, &val); return ((val & BMSR_ANEG_COMPL) == BMSR_ANEG_COMPL); } #endif // defined(XMC_ETH_PHY_DP83848C)
the_stack_data/69253.c
#include <stdio.h> int main() { printf("Hello World\n"); }
the_stack_data/96659.c
/* * Copyright (c) 2017-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*=== Tuning parameter ===*/ #ifndef MAX_TESTED_LEVEL #define MAX_TESTED_LEVEL 12 #endif /*=== Dependencies ===*/ #include <stdio.h> /* printf */ #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" /*=== functions ===*/ /*! readU32FromChar() : @return : unsigned integer value read from input in `char` format allows and interprets K, KB, KiB, M, MB and MiB suffix. Will also modify `*stringPtr`, advancing it to position where it stopped reading. Note : function result can overflow if digit string > MAX_UINT */ static unsigned readU32FromChar(const char** stringPtr) { unsigned result = 0; while ((**stringPtr >='0') && (**stringPtr <='9')) result *= 10, result += **stringPtr - '0', (*stringPtr)++ ; if ((**stringPtr=='K') || (**stringPtr=='M')) { result <<= 10; if (**stringPtr=='M') result <<= 10; (*stringPtr)++ ; if (**stringPtr=='i') (*stringPtr)++; if (**stringPtr=='B') (*stringPtr)++; } return result; } int main(int argc, char const *argv[]) { printf("\n Zstandard (v%u) memory usage for streaming contexts : \n\n", ZSTD_versionNumber()); unsigned wLog = 0; if (argc > 1) { const char* valStr = argv[1]; wLog = readU32FromChar(&valStr); } int compressionLevel; for (compressionLevel = 1; compressionLevel <= MAX_TESTED_LEVEL; compressionLevel++) { #define INPUT_SIZE 5 #define COMPRESSED_SIZE 128 char const dataToCompress[INPUT_SIZE] = "abcde"; char compressedData[COMPRESSED_SIZE]; char decompressedData[INPUT_SIZE]; ZSTD_CStream* const cstream = ZSTD_createCStream(); if (cstream==NULL) { printf("Level %i : ZSTD_CStream Memory allocation failure \n", compressionLevel); return 1; } /* forces compressor to use maximum memory size for given compression level, * by not providing any information on input size */ ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, 0); if (wLog) { /* special mode : specific wLog */ printf("Using custom compression parameter : level 1 + wLog=%u \n", wLog); params = ZSTD_getParams(1, 1 << wLog, 0); size_t const error = ZSTD_initCStream_advanced(cstream, NULL, 0, params, 0); if (ZSTD_isError(error)) { printf("ZSTD_initCStream_advanced error : %s \n", ZSTD_getErrorName(error)); return 1; } } else { size_t const error = ZSTD_initCStream(cstream, compressionLevel); if (ZSTD_isError(error)) { printf("ZSTD_initCStream error : %s \n", ZSTD_getErrorName(error)); return 1; } } size_t compressedSize; { ZSTD_inBuffer inBuff = { dataToCompress, sizeof(dataToCompress), 0 }; ZSTD_outBuffer outBuff = { compressedData, sizeof(compressedData), 0 }; size_t const cError = ZSTD_compressStream(cstream, &outBuff, &inBuff); if (ZSTD_isError(cError)) { printf("ZSTD_compressStream error : %s \n", ZSTD_getErrorName(cError)); return 1; } size_t const fError = ZSTD_endStream(cstream, &outBuff); if (ZSTD_isError(fError)) { printf("ZSTD_endStream error : %s \n", ZSTD_getErrorName(fError)); return 1; } compressedSize = outBuff.pos; } ZSTD_DStream* dstream = ZSTD_createDStream(); if (dstream==NULL) { printf("Level %i : ZSTD_DStream Memory allocation failure \n", compressionLevel); return 1; } { size_t const error = ZSTD_initDStream(dstream); if (ZSTD_isError(error)) { printf("ZSTD_initDStream error : %s \n", ZSTD_getErrorName(error)); return 1; } } /* forces decompressor to use maximum memory size, as decompressed size is not known */ { ZSTD_inBuffer inBuff = { compressedData, compressedSize, 0 }; ZSTD_outBuffer outBuff = { decompressedData, sizeof(decompressedData), 0 }; size_t const dResult = ZSTD_decompressStream(dstream, &outBuff, &inBuff); if (ZSTD_isError(dResult)) { printf("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(dResult)); return 1; } if (dResult != 0) { printf("ZSTD_decompressStream error : unfinished decompression \n"); return 1; } if (outBuff.pos != sizeof(dataToCompress)) { printf("ZSTD_decompressStream error : incorrect decompression \n"); return 1; } } size_t const cstreamSize = ZSTD_sizeof_CStream(cstream); size_t const cstreamEstimatedSize = wLog ? ZSTD_estimateCStreamSize_usingCParams(params.cParams) : ZSTD_estimateCStreamSize(compressionLevel); size_t const dstreamSize = ZSTD_sizeof_DStream(dstream); printf("Level %2i : Compression Mem = %5u KB (estimated : %5u KB) ; Decompression Mem = %4u KB \n", compressionLevel, (unsigned)(cstreamSize>>10), (unsigned)(cstreamEstimatedSize>>10), (unsigned)(dstreamSize>>10)); ZSTD_freeDStream(dstream); ZSTD_freeCStream(cstream); if (wLog) break; /* single test */ } return 0; }
the_stack_data/417739.c
/* 1- Faça um programa que leia seu nome e sua idade. Após duas linhas e exiba nome e idade separados por hífen. Exemplo: Digite seu nome >> Fulano Idade >> 30 Fulano – 30 anos */ #include <stdio.h> int main(int argc, char** argv[]){ char nome[50]; int idade; printf("Digite seu nome >> "); scanf("%s",&nome); printf("Idade >> "); scanf("%d",&idade); printf("\n\n%s - %d anos", nome, idade); return 0; }
the_stack_data/220456078.c
extern void abort(void); void reach_error(){} /* * Recursive implementation integer addition. * * Author: Matthias Heizmann * Date: 2013-07-13 * */ extern int __VERIFIER_nondet_int(void); int isOdd(int n); int isEven(int n); int isOdd(int n) { if (n == 0) { return 0; } else if (n == 1) { return 1; } else { return isEven(n - 1); } } int isEven(int n) { if (n == 0) { return 1; } else if (n == 1) { return 0; } else { return isOdd(n - 1); } } int main() { int n = __VERIFIER_nondet_int(); int result = isEven(n); int mod = n % 2; if (result < 0 || result == mod) { return 0; } else { ERROR: {reach_error();abort();} } }
the_stack_data/77097.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> int main(int argc, char *argv[]) { if (argc != 4) { fprintf(stderr,"usage: cpu <string>\n"); exit(1); } char *name = argv[1]; int num1 = atoi(argv[2]); int num2 = atoi(argv[3]); int rc = fork(); if (rc < 0) { fprintf(stderr, "fork failed\n"); exit(1); } else if (rc == 0) { int sum = num1 + num2; printf("Sum of the two numbers is %d\n",sum); } else { printf("My name is %s\n",name); } return 0; }
the_stack_data/170453034.c
#include <stdio.h> #include <math.h> /* exp */ int main(void) { double a, p, r; int t; printf("Enter amount deposited: "); scanf("%lf", &p); printf("Enter interest rate: "); scanf("%lf", &r); printf("Enter number of years: "); scanf("%d", &t); r /= 100.0; a = p * exp(r * t); printf("Total compounded: $ %.2lf\n", a); return 0; }
the_stack_data/1080986.c
#include <stdio.h> int main() { int fred[3] = { 12, 34, 56 }; double joe[] = { 23.4, 56.7, 89.0 }; printf("%d %d %d\n", fred[0], fred[1], fred[2]); printf("%f %f %f\n", joe[0], joe[1], joe[2]); return 0; }
the_stack_data/23574999.c
#include <stdio.h> #include <sys/types.h> extern int64_t test_07(int64_t,int64_t); int64_t test_function(int64_t x, int64_t y) { int64_t j=0; while (j<10) { if (x > y) x = x-y; else x = x+y; j++; } return x; } int main() { int64_t i, j, k; int errors=0; int success=0; for (i=-2,j=10; i<20; i++,j--) if (test_07(i,j)!=test_function(i,j)) errors++; else success++; printf("success,%d\nerrors,%d\ntotal,%d\n",success,errors,success+errors); return 0; }
the_stack_data/51459.c
/* ** 2015-08-18 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file demonstrates how to create a table-valued-function using ** a virtual table. This demo implements the generate_series() function ** which gives similar results to the eponymous function in PostgreSQL. ** Examples: ** ** SELECT * FROM generate_series(0,100,5); ** ** The query above returns integers from 0 through 100 counting by steps ** of 5. ** ** SELECT * FROM generate_series(0,100); ** ** Integers from 0 through 100 with a step size of 1. ** ** SELECT * FROM generate_series(20) LIMIT 10; ** ** Integers 20 through 29. ** ** HOW IT WORKS ** ** The generate_series "function" is really a virtual table with the ** following schema: ** ** CREATE FUNCTION generate_series( ** value, ** start HIDDEN, ** stop HIDDEN, ** step HIDDEN ** ); ** ** Function arguments in queries against this virtual table are translated ** into equality constraints against successive hidden columns. In other ** words, the following pairs of queries are equivalent to each other: ** ** SELECT * FROM generate_series(0,100,5); ** SELECT * FROM generate_series WHERE start=0 AND stop=100 AND step=5; ** ** SELECT * FROM generate_series(0,100); ** SELECT * FROM generate_series WHERE start=0 AND stop=100; ** ** SELECT * FROM generate_series(20) LIMIT 10; ** SELECT * FROM generate_series WHERE start=20 LIMIT 10; ** ** The generate_series virtual table implementation leaves the xCreate method ** set to NULL. This means that it is not possible to do a CREATE VIRTUAL ** TABLE command with "generate_series" as the USING argument. Instead, there ** is a single generate_series virtual table that is always available without ** having to be created first. ** ** The xBestIndex method looks for equality constraints against the hidden ** start, stop, and step columns, and if present, it uses those constraints ** to bound the sequence of generated values. If the equality constraints ** are missing, it uses 0 for start, 4294967295 for stop, and 1 for step. ** xBestIndex returns a small cost when both start and stop are available, ** and a very large cost if either start or stop are unavailable. This ** encourages the query planner to order joins such that the bounds of the ** series are well-defined. */ #include "sqlite3ext.h" SQLITE_EXTENSION_INIT1 #include <assert.h> #include <string.h> #ifndef SQLITE_OMIT_VIRTUALTABLE /* series_cursor is a subclass of sqlite3_vtab_cursor which will ** serve as the underlying representation of a cursor that scans ** over rows of the result */ typedef struct series_cursor series_cursor; struct series_cursor { sqlite3_vtab_cursor base; /* Base class - must be first */ int isDesc; /* True to count down rather than up */ sqlite3_int64 iRowid; /* The rowid */ sqlite3_int64 iValue; /* Current value ("value") */ sqlite3_int64 mnValue; /* Mimimum value ("start") */ sqlite3_int64 mxValue; /* Maximum value ("stop") */ sqlite3_int64 iStep; /* Increment ("step") */ }; /* ** The seriesConnect() method is invoked to create a new ** series_vtab that describes the generate_series virtual table. ** ** Think of this routine as the constructor for series_vtab objects. ** ** All this routine needs to do is: ** ** (1) Allocate the series_vtab object and initialize all fields. ** ** (2) Tell SQLite (via the sqlite3_declare_vtab() interface) what the ** result set of queries against generate_series will look like. */ static int seriesConnect( sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVtab, char **pzErr ){ sqlite3_vtab *pNew; int rc; /* Column numbers */ #define SERIES_COLUMN_VALUE 0 #define SERIES_COLUMN_START 1 #define SERIES_COLUMN_STOP 2 #define SERIES_COLUMN_STEP 3 rc = sqlite3_declare_vtab(db, "CREATE TABLE x(value,start hidden,stop hidden,step hidden)"); if( rc==SQLITE_OK ){ pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) ); if( pNew==0 ) return SQLITE_NOMEM; memset(pNew, 0, sizeof(*pNew)); } return rc; } /* ** This method is the destructor for series_cursor objects. */ static int seriesDisconnect(sqlite3_vtab *pVtab){ sqlite3_free(pVtab); return SQLITE_OK; } /* ** Constructor for a new series_cursor object. */ static int seriesOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ series_cursor *pCur; pCur = sqlite3_malloc( sizeof(*pCur) ); if( pCur==0 ) return SQLITE_NOMEM; memset(pCur, 0, sizeof(*pCur)); *ppCursor = &pCur->base; return SQLITE_OK; } /* ** Destructor for a series_cursor. */ static int seriesClose(sqlite3_vtab_cursor *cur){ sqlite3_free(cur); return SQLITE_OK; } /* ** Advance a series_cursor to its next row of output. */ static int seriesNext(sqlite3_vtab_cursor *cur){ series_cursor *pCur = (series_cursor*)cur; if( pCur->isDesc ){ pCur->iValue -= pCur->iStep; }else{ pCur->iValue += pCur->iStep; } pCur->iRowid++; return SQLITE_OK; } /* ** Return values of columns for the row at which the series_cursor ** is currently pointing. */ static int seriesColumn( sqlite3_vtab_cursor *cur, /* The cursor */ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ int i /* Which column to return */ ){ series_cursor *pCur = (series_cursor*)cur; sqlite3_int64 x = 0; switch( i ){ case SERIES_COLUMN_START: x = pCur->mnValue; break; case SERIES_COLUMN_STOP: x = pCur->mxValue; break; case SERIES_COLUMN_STEP: x = pCur->iStep; break; default: x = pCur->iValue; break; } sqlite3_result_int64(ctx, x); return SQLITE_OK; } /* ** Return the rowid for the current row. In this implementation, the ** rowid is the same as the output value. */ static int seriesRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ series_cursor *pCur = (series_cursor*)cur; *pRowid = pCur->iRowid; return SQLITE_OK; } /* ** Return TRUE if the cursor has been moved off of the last ** row of output. */ static int seriesEof(sqlite3_vtab_cursor *cur){ series_cursor *pCur = (series_cursor*)cur; if( pCur->isDesc ){ return pCur->iValue < pCur->mnValue; }else{ return pCur->iValue > pCur->mxValue; } } /* ** This method is called to "rewind" the series_cursor object back ** to the first row of output. This method is always called at least ** once prior to any call to seriesColumn() or seriesRowid() or ** seriesEof(). ** ** The query plan selected by seriesBestIndex is passed in the idxNum ** parameter. (idxStr is not used in this implementation.) idxNum ** is a bitmask showing which constraints are available: ** ** 1: start=VALUE ** 2: stop=VALUE ** 4: step=VALUE ** ** Also, if bit 8 is set, that means that the series should be output ** in descending order rather than in ascending order. ** ** This routine should initialize the cursor and position it so that it ** is pointing at the first row, or pointing off the end of the table ** (so that seriesEof() will return true) if the table is empty. */ static int seriesFilter( sqlite3_vtab_cursor *pVtabCursor, int idxNum, const char *idxStr, int argc, sqlite3_value **argv ){ series_cursor *pCur = (series_cursor *)pVtabCursor; int i = 0; if( idxNum & 1 ){ pCur->mnValue = sqlite3_value_int64(argv[i++]); }else{ pCur->mnValue = 0; } if( idxNum & 2 ){ pCur->mxValue = sqlite3_value_int64(argv[i++]); }else{ pCur->mxValue = 0xffffffff; } if( idxNum & 4 ){ pCur->iStep = sqlite3_value_int64(argv[i++]); if( pCur->iStep<1 ) pCur->iStep = 1; }else{ pCur->iStep = 1; } if( idxNum & 8 ){ pCur->isDesc = 1; pCur->iValue = pCur->mxValue; if( pCur->iStep>0 ){ pCur->iValue -= (pCur->mxValue - pCur->mnValue)%pCur->iStep; } }else{ pCur->isDesc = 0; pCur->iValue = pCur->mnValue; } pCur->iRowid = 1; return SQLITE_OK; } /* ** SQLite will invoke this method one or more times while planning a query ** that uses the generate_series virtual table. This routine needs to create ** a query plan for each invocation and compute an estimated cost for that ** plan. ** ** In this implementation idxNum is used to represent the ** query plan. idxStr is unused. ** ** The query plan is represented by bits in idxNum: ** ** (1) start = $value -- constraint exists ** (2) stop = $value -- constraint exists ** (4) step = $value -- constraint exists ** (8) output in descending order */ static int seriesBestIndex( sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo ){ int i; /* Loop over constraints */ int idxNum = 0; /* The query plan bitmask */ int startIdx = -1; /* Index of the start= constraint, or -1 if none */ int stopIdx = -1; /* Index of the stop= constraint, or -1 if none */ int stepIdx = -1; /* Index of the step= constraint, or -1 if none */ int nArg = 0; /* Number of arguments that seriesFilter() expects */ const struct sqlite3_index_constraint *pConstraint; pConstraint = pIdxInfo->aConstraint; for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){ if( pConstraint->usable==0 ) continue; if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; switch( pConstraint->iColumn ){ case SERIES_COLUMN_START: startIdx = i; idxNum |= 1; break; case SERIES_COLUMN_STOP: stopIdx = i; idxNum |= 2; break; case SERIES_COLUMN_STEP: stepIdx = i; idxNum |= 4; break; } } if( startIdx>=0 ){ pIdxInfo->aConstraintUsage[startIdx].argvIndex = ++nArg; pIdxInfo->aConstraintUsage[startIdx].omit = 1; } if( stopIdx>=0 ){ pIdxInfo->aConstraintUsage[stopIdx].argvIndex = ++nArg; pIdxInfo->aConstraintUsage[stopIdx].omit = 1; } if( stepIdx>=0 ){ pIdxInfo->aConstraintUsage[stepIdx].argvIndex = ++nArg; pIdxInfo->aConstraintUsage[stepIdx].omit = 1; } if( (idxNum & 3)==3 ){ /* Both start= and stop= boundaries are available. This is the ** the preferred case */ pIdxInfo->estimatedCost = (double)1; pIdxInfo->estimatedRows = 1000; if( pIdxInfo->nOrderBy==1 ){ if( pIdxInfo->aOrderBy[0].desc ) idxNum |= 8; pIdxInfo->orderByConsumed = 1; } }else{ /* If either boundary is missing, we have to generate a huge span ** of numbers. Make this case very expensive so that the query ** planner will work hard to avoid it. */ pIdxInfo->estimatedCost = (double)2147483647; pIdxInfo->estimatedRows = 2147483647; } pIdxInfo->idxNum = idxNum; return SQLITE_OK; } /* ** This following structure defines all the methods for the ** generate_series virtual table. */ static sqlite3_module seriesModule = { 0, /* iVersion */ 0, /* xCreate */ seriesConnect, /* xConnect */ seriesBestIndex, /* xBestIndex */ seriesDisconnect, /* xDisconnect */ 0, /* xDestroy */ seriesOpen, /* xOpen - open a cursor */ seriesClose, /* xClose - close a cursor */ seriesFilter, /* xFilter - configure scan constraints */ seriesNext, /* xNext - advance a cursor */ seriesEof, /* xEof - check for end of scan */ seriesColumn, /* xColumn - read data */ seriesRowid, /* xRowid - read data */ 0, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindMethod */ 0, /* xRename */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifdef _WIN32 __declspec(dllexport) #endif int sqlite3_series_init( sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi ){ int rc = SQLITE_OK; SQLITE_EXTENSION_INIT2(pApi); #ifndef SQLITE_OMIT_VIRTUALTABLE if( sqlite3_libversion_number()<3008012 ){ *pzErrMsg = sqlite3_mprintf( "generate_series() requires SQLite 3.8.12 or later"); return SQLITE_ERROR; } rc = sqlite3_create_module(db, "generate_series", &seriesModule, 0); #endif return rc; }