func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int parse_unc(const char *unc_name, struct parsed_mount_info *parsed_info) { int length = strnlen(unc_name, MAX_UNC_LEN); const char *host, *share, *prepath; size_t hostlen, sharelen, prepathlen; if (length > (MAX_UNC_LEN - 1)) { fprintf(stderr, "mount error: UNC name too long\n"); return EX_USAGE; } if (length < 3) { fprintf(stderr, "mount error: UNC name too short\n"); return EX_USAGE; } if ((strncasecmp("cifs://", unc_name, 7) == 0) || (strncasecmp("smb://", unc_name, 6) == 0)) { fprintf(stderr, "Mounting cifs URL not implemented yet. Attempt to mount %s\n", unc_name); return EX_USAGE; } /* Set up "host" and "share" pointers based on UNC format. */ if (strncmp(unc_name, "//", 2) && strncmp(unc_name, "\\\\", 2)) { /* * check for nfs syntax (server:/share/prepath) * * FIXME: IPv6 addresses? */ host = unc_name; share = strchr(host, ':'); if (!share) { fprintf(stderr, "mount.cifs: bad UNC (%s)\n", unc_name); return EX_USAGE; } hostlen = share - host; share++; if (*share == '/') ++share; } else { host = unc_name + 2; hostlen = strcspn(host, "/\\"); if (!hostlen) { fprintf(stderr, "mount.cifs: bad UNC (%s)\n", unc_name); return EX_USAGE; } share = host + hostlen + 1; } if (hostlen + 1 > sizeof(parsed_info->host)) { fprintf(stderr, "mount.cifs: host portion of UNC too long\n"); return EX_USAGE; } sharelen = strcspn(share, "/\\"); if (sharelen + 1 > sizeof(parsed_info->share)) { fprintf(stderr, "mount.cifs: share portion of UNC too long\n"); return EX_USAGE; } prepath = share + sharelen; if (*prepath != '\0') prepath++; prepathlen = strlen(prepath); if (prepathlen + 1 > sizeof(parsed_info->prefix)) { fprintf(stderr, "mount.cifs: UNC prefixpath too long\n"); return EX_USAGE; } /* copy pieces into their resepective buffers */ memcpy(parsed_info->host, host, hostlen); memcpy(parsed_info->share, share, sharelen); memcpy(parsed_info->prefix, prepath, prepathlen); return 0; }
0
[ "CWE-20" ]
cifs-utils
f6eae44a3d05b6515a59651e6bed8b6dde689aec
178,111,741,943,109,670,000,000,000,000,000,000,000
80
mtab: handle ENOSPC/EFBIG condition properly when altering mtab It's possible that when mount.cifs goes to append the mtab that there won't be enough space to do so, and the mntent won't be appended to the file in its entirety. Add a my_endmntent routine that will fflush and then fsync the FILE if that succeeds. If either fails then it will truncate the file back to its provided size. It will then call endmntent unconditionally. Have add_mtab call fstat on the opened mtab file in order to get the size of the file before it has been appended. Assuming that that succeeds, use my_endmntent to ensure that the file is not corrupted before closing it. It's possible that we'll have a small race window where the mtab is incorrect, but it should be quickly corrected. This was reported some time ago as CVE-2011-1678: http://openwall.com/lists/oss-security/2011/03/04/9 ...and it seems to fix the reproducer that I was able to come up with. Signed-off-by: Jeff Layton <[email protected]> Reviewed-by: Suresh Jayaraman <[email protected]>
enum_func_status php_mysqlnd_rowp_read_text_protocol_aux(MYSQLND_MEMORY_POOL_CHUNK * row_buffer, zval ** fields, unsigned int field_count, const MYSQLND_FIELD * fields_metadata, zend_bool as_int_or_float, zend_bool copy_data, MYSQLND_STATS * stats TSRMLS_DC) { unsigned int i; zend_bool last_field_was_string = FALSE; zval **current_field, **end_field, **start_field; zend_uchar * p = row_buffer->ptr; size_t data_size = row_buffer->app; zend_uchar * bit_area = (zend_uchar*) row_buffer->ptr + data_size + 1; /* we allocate from here */ const zend_uchar * const packet_end = (zend_uchar*) row_buffer->ptr + data_size; DBG_ENTER("php_mysqlnd_rowp_read_text_protocol_aux"); if (!fields) { DBG_RETURN(FAIL); } end_field = (start_field = fields) + field_count; for (i = 0, current_field = start_field; current_field < end_field; current_field++, i++) { DBG_INF("Directly creating zval"); MAKE_STD_ZVAL(*current_field); if (!*current_field) { DBG_RETURN(FAIL); } } for (i = 0, current_field = start_field; current_field < end_field; current_field++, i++) { /* Don't reverse the order. It is significant!*/ zend_uchar *this_field_len_pos = p; /* php_mysqlnd_net_field_length() call should be after *this_field_len_pos = p; */ const unsigned long len = php_mysqlnd_net_field_length(&p); if (len != MYSQLND_NULL_LENGTH && ((p + len) > packet_end)) { php_error_docref(NULL, E_WARNING, "Malformed server packet. Field length pointing "MYSQLND_SZ_T_SPEC " bytes after end of packet", (p + len) - packet_end - 1); DBG_RETURN(FAIL); } if (copy_data == FALSE && current_field > start_field && last_field_was_string) { /* Normal queries: We have to put \0 now to the end of the previous field, if it was a string. IS_NULL doesn't matter. Because we have already read our length, then we can overwrite it in the row buffer. This statement terminates the previous field, not the current one. NULL_LENGTH is encoded in one byte, so we can stick a \0 there. Any string's length is encoded in at least one byte, so we can stick a \0 there. */ *this_field_len_pos = '\0'; } /* NULL or NOT NULL, this is the question! */ if (len == MYSQLND_NULL_LENGTH) { ZVAL_NULL(*current_field); last_field_was_string = FALSE; } else { #if defined(MYSQLND_STRING_TO_INT_CONVERSION) struct st_mysqlnd_perm_bind perm_bind = mysqlnd_ps_fetch_functions[fields_metadata[i].type]; #endif if (MYSQLND_G(collect_statistics)) { enum_mysqlnd_collected_stats statistic; switch (fields_metadata[i].type) { case MYSQL_TYPE_DECIMAL: statistic = STAT_TEXT_TYPE_FETCHED_DECIMAL; break; case MYSQL_TYPE_TINY: statistic = STAT_TEXT_TYPE_FETCHED_INT8; break; case MYSQL_TYPE_SHORT: statistic = STAT_TEXT_TYPE_FETCHED_INT16; break; case MYSQL_TYPE_LONG: statistic = STAT_TEXT_TYPE_FETCHED_INT32; break; case MYSQL_TYPE_FLOAT: statistic = STAT_TEXT_TYPE_FETCHED_FLOAT; break; case MYSQL_TYPE_DOUBLE: statistic = STAT_TEXT_TYPE_FETCHED_DOUBLE; break; case MYSQL_TYPE_NULL: statistic = STAT_TEXT_TYPE_FETCHED_NULL; break; case MYSQL_TYPE_TIMESTAMP: statistic = STAT_TEXT_TYPE_FETCHED_TIMESTAMP; break; case MYSQL_TYPE_LONGLONG: statistic = STAT_TEXT_TYPE_FETCHED_INT64; break; case MYSQL_TYPE_INT24: statistic = STAT_TEXT_TYPE_FETCHED_INT24; break; case MYSQL_TYPE_DATE: statistic = STAT_TEXT_TYPE_FETCHED_DATE; break; case MYSQL_TYPE_TIME: statistic = STAT_TEXT_TYPE_FETCHED_TIME; break; case MYSQL_TYPE_DATETIME: statistic = STAT_TEXT_TYPE_FETCHED_DATETIME; break; case MYSQL_TYPE_YEAR: statistic = STAT_TEXT_TYPE_FETCHED_YEAR; break; case MYSQL_TYPE_NEWDATE: statistic = STAT_TEXT_TYPE_FETCHED_DATE; break; case MYSQL_TYPE_VARCHAR: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break; case MYSQL_TYPE_BIT: statistic = STAT_TEXT_TYPE_FETCHED_BIT; break; case MYSQL_TYPE_NEWDECIMAL: statistic = STAT_TEXT_TYPE_FETCHED_DECIMAL; break; case MYSQL_TYPE_ENUM: statistic = STAT_TEXT_TYPE_FETCHED_ENUM; break; case MYSQL_TYPE_SET: statistic = STAT_TEXT_TYPE_FETCHED_SET; break; case MYSQL_TYPE_JSON: statistic = STAT_TEXT_TYPE_FETCHED_JSON; break; case MYSQL_TYPE_TINY_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break; case MYSQL_TYPE_MEDIUM_BLOB:statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break; case MYSQL_TYPE_LONG_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break; case MYSQL_TYPE_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break; case MYSQL_TYPE_VAR_STRING: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break; case MYSQL_TYPE_STRING: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break; case MYSQL_TYPE_GEOMETRY: statistic = STAT_TEXT_TYPE_FETCHED_GEOMETRY; break; default: statistic = STAT_TEXT_TYPE_FETCHED_OTHER; break; } MYSQLND_INC_CONN_STATISTIC_W_VALUE2(stats, statistic, 1, STAT_BYTES_RECEIVED_PURE_DATA_TEXT, len); } #ifdef MYSQLND_STRING_TO_INT_CONVERSION if (as_int_or_float && perm_bind.php_type == IS_LONG) { zend_uchar save = *(p + len); /* We have to make it ASCIIZ temporarily */ *(p + len) = '\0'; if (perm_bind.pack_len < SIZEOF_LONG) { /* direct conversion */ int64_t v = #ifndef PHP_WIN32 atoll((char *) p); #else _atoi64((char *) p); #endif ZVAL_LONG(*current_field, (long) v); /* the cast is safe */ } else { uint64_t v = #ifndef PHP_WIN32 (uint64_t) atoll((char *) p); #else (uint64_t) _atoi64((char *) p); #endif zend_bool uns = fields_metadata[i].flags & UNSIGNED_FLAG? TRUE:FALSE; /* We have to make it ASCIIZ temporarily */ #if SIZEOF_LONG==8 if (uns == TRUE && v > 9223372036854775807L) #elif SIZEOF_LONG==4 if ((uns == TRUE && v > L64(2147483647)) || (uns == FALSE && (( L64(2147483647) < (int64_t) v) || (L64(-2147483648) > (int64_t) v)))) #else #error Need fix for this architecture #endif /* SIZEOF */ { ZVAL_STRINGL(*current_field, (char *)p, len, 0); } else { ZVAL_LONG(*current_field, (long) v); /* the cast is safe */ } } *(p + len) = save; } else if (as_int_or_float && perm_bind.php_type == IS_DOUBLE) { zend_uchar save = *(p + len); /* We have to make it ASCIIZ temporarily */ *(p + len) = '\0'; ZVAL_DOUBLE(*current_field, atof((char *) p)); *(p + len) = save; } else #endif /* MYSQLND_STRING_TO_INT_CONVERSION */ if (fields_metadata[i].type == MYSQL_TYPE_BIT) { /* BIT fields are specially handled. As they come as bit mask, we have to convert it to human-readable representation. As the bits take less space in the protocol than the numbers they represent, we don't have enough space in the packet buffer to overwrite inside. Thus, a bit more space is pre-allocated at the end of the buffer, see php_mysqlnd_rowp_read(). And we add the strings at the end. Definitely not nice, _hackish_ :(, but works. */ zend_uchar *start = bit_area; ps_fetch_from_1_to_8_bytes(*current_field, &(fields_metadata[i]), 0, &p, len TSRMLS_CC); /* We have advanced in ps_fetch_from_1_to_8_bytes. We should go back because later in this function there will be an advancement. */ p -= len; if (Z_TYPE_PP(current_field) == IS_LONG) { bit_area += 1 + sprintf((char *)start, "%ld", Z_LVAL_PP(current_field)); ZVAL_STRINGL(*current_field, (char *) start, bit_area - start - 1, copy_data); } else if (Z_TYPE_PP(current_field) == IS_STRING){ memcpy(bit_area, Z_STRVAL_PP(current_field), Z_STRLEN_PP(current_field)); bit_area += Z_STRLEN_PP(current_field); *bit_area++ = '\0'; zval_dtor(*current_field); ZVAL_STRINGL(*current_field, (char *) start, bit_area - start - 1, copy_data); } } else { ZVAL_STRINGL(*current_field, (char *)p, len, copy_data); } p += len; last_field_was_string = TRUE; } } if (copy_data == FALSE && last_field_was_string) { /* Normal queries: The buffer has one more byte at the end, because we need it */ row_buffer->ptr[data_size] = '\0'; } DBG_RETURN(PASS);
0
[ "CWE-119", "CWE-787" ]
php-src
28f80baf3c53e267c9ce46a2a0fadbb981585132
325,703,409,906,620,970,000,000,000,000,000,000,000
188
Fix bug #72293 - Heap overflow in mysqlnd related to BIT fields
static Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define SkipLinesOp 0x01 #define SetColorOp 0x02 #define SkipPixelsOp 0x03 #define ByteDataOp 0x05 #define RunDataOp 0x06 #define EOFOp 0x07 #define ThrowRLEException(exception,message) \ { \ if (colormap != (unsigned char *) NULL) \ colormap=(unsigned char *) RelinquishMagickMemory(colormap); \ if (pixel_info != (MemoryInfo *) NULL) \ pixel_info=RelinquishVirtualMemory(pixel_info); \ ThrowReaderException((exception),(message)); \ } char magick[12]; Image *image; int opcode, operand, status; MagickStatusType flags; MagickSizeType number_pixels; MemoryInfo *pixel_info; Quantum index; register ssize_t x; register Quantum *q; register ssize_t i; register unsigned char *p; size_t bits_per_pixel, map_length, number_colormaps, number_planes, number_planes_filled, one, pixel_info_length; ssize_t count, offset, y; unsigned char background_color[256], *colormap, pixel, plane, *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Determine if this a RLE file. */ colormap=(unsigned char *) NULL; pixel_info=(MemoryInfo *) NULL; count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 2) || (memcmp(magick,"\122\314",2) != 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); do { /* Read image header. */ image->page.x=(ssize_t) ReadBlobLSBShort(image); image->page.y=(ssize_t) ReadBlobLSBShort(image); image->columns=ReadBlobLSBShort(image); image->rows=ReadBlobLSBShort(image); flags=(MagickStatusType) ReadBlobByte(image); image->alpha_trait=flags & 0x04 ? BlendPixelTrait : UndefinedPixelTrait; number_planes=(size_t) ReadBlobByte(image); bits_per_pixel=(size_t) ReadBlobByte(image); number_colormaps=(size_t) ReadBlobByte(image); map_length=(unsigned char) ReadBlobByte(image); if (map_length >= 22) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (EOFBlob(image) != MagickFalse) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); one=1; map_length=one << map_length; if ((number_planes == 0) || (number_planes == 2) || ((flags & 0x04) && (number_colormaps > 254)) || (bits_per_pixel != 8) || (image->columns == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (flags & 0x02) { /* No background color-- initialize to black. */ for (i=0; i < (ssize_t) number_planes; i++) background_color[i]=0; (void) ReadBlobByte(image); } else { /* Initialize background color. */ p=background_color; for (i=0; i < (ssize_t) number_planes; i++) *p++=(unsigned char) ReadBlobByte(image); } if ((number_planes & 0x01) == 0) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if (number_colormaps != 0) { /* Read image colormaps. */ colormap=(unsigned char *) AcquireQuantumMemory(number_colormaps, 3*map_length*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; for (i=0; i < (ssize_t) number_colormaps; i++) for (x=0; x < (ssize_t) map_length; x++) { *p++=(unsigned char) ScaleQuantumToChar(ScaleShortToQuantum( ReadBlobLSBShort(image))); if (EOFBlob(image) != MagickFalse) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } } if ((flags & 0x08) != 0) { char *comment; size_t length; /* Read image comment. */ length=ReadBlobLSBShort(image); if (length != 0) { comment=(char *) AcquireQuantumMemory(length,sizeof(*comment)); if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,length-1,(unsigned char *) comment); comment[length-1]='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); if ((length & 0x01) == 0) (void) ReadBlobByte(image); } } if (EOFBlob(image) != MagickFalse) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate RLE pixels. */ if (image->alpha_trait != UndefinedPixelTrait) number_planes++; number_pixels=(MagickSizeType) image->columns*image->rows; number_planes_filled=(number_planes % 2 == 0) ? number_planes : number_planes+1; if ((number_pixels*number_planes_filled) != (size_t) (number_pixels* number_planes_filled)) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory(image->columns,image->rows* MagickMax(number_planes_filled,4)*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info_length=image->columns*image->rows* MagickMax(number_planes_filled,4); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); (void) ResetMagickMemory(pixels,0,pixel_info_length); if ((flags & 0x01) && !(flags & 0x02)) { ssize_t j; /* Set background color. */ p=pixels; for (i=0; i < (ssize_t) number_pixels; i++) { if (image->alpha_trait == UndefinedPixelTrait) for (j=0; j < (ssize_t) number_planes; j++) *p++=background_color[j]; else { for (j=0; j < (ssize_t) (number_planes-1); j++) *p++=background_color[j]; *p++=0; /* initialize matte channel */ } } } /* Read runlength-encoded image. */ plane=0; x=0; y=0; opcode=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); do { switch (opcode & 0x3f) { case SkipLinesOp: { operand=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if (opcode & 0x40) { operand=ReadBlobLSBSignedShort(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } x=0; y+=operand; break; } case SetColorOp: { operand=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); plane=(unsigned char) operand; if (plane == 255) plane=(unsigned char) (number_planes-1); x=0; break; } case SkipPixelsOp: { operand=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if (opcode & 0x40) { operand=ReadBlobLSBSignedShort(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } x+=operand; break; } case ByteDataOp: { operand=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if (opcode & 0x40) { operand=ReadBlobLSBSignedShort(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } offset=(ssize_t) (((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane); operand++; if ((offset < 0) || ((offset+operand*number_planes) > (ssize_t) pixel_info_length)) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } p=pixels+offset; for (i=0; i < (ssize_t) operand; i++) { pixel=(unsigned char) ReadBlobByte(image); if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } if (operand & 0x01) (void) ReadBlobByte(image); x+=operand; break; } case RunDataOp: { operand=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); if (opcode & 0x40) { operand=ReadBlobLSBSignedShort(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } pixel=(unsigned char) ReadBlobByte(image); (void) ReadBlobByte(image); offset=(ssize_t) (((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane); operand++; if ((offset < 0) || ((offset+operand*number_planes) > (ssize_t) pixel_info_length)) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } p=pixels+offset; for (i=0; i < (ssize_t) operand; i++) { if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } x+=operand; break; } default: break; } opcode=ReadBlobByte(image); if (opcode == EOF) ThrowRLEException(CorruptImageError,"UnexpectedEndOfFile"); } while (((opcode & 0x3f) != EOFOp) && (opcode != EOF)); if (number_colormaps != 0) { MagickStatusType mask; /* Apply colormap affineation to image. */ mask=(MagickStatusType) (map_length-1); p=pixels; x=(ssize_t) number_planes; if (number_colormaps == 1) for (i=0; i < (ssize_t) number_pixels; i++) { ValidateColormapValue(image,(ssize_t) (*p & mask),&index,exception); *p=colormap[(ssize_t) index]; p++; } else if ((number_planes >= 3) && (number_colormaps >= 3)) for (i=0; i < (ssize_t) number_pixels; i++) for (x=0; x < (ssize_t) number_planes; x++) { ValidateColormapValue(image,(ssize_t) (x*map_length+ (*p & mask)),&index,exception); *p=colormap[(ssize_t) index]; p++; } if ((i < (ssize_t) number_pixels) || (x < (ssize_t) number_planes)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } } /* Initialize image structure. */ if (number_planes >= 3) { /* Convert raster image to DirectClass pixel packets. */ p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else { /* Create colormap. */ if (number_colormaps == 0) map_length=256; if (AcquireImageColormap(image,map_length,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; if (number_colormaps == 1) for (i=0; i < (ssize_t) image->colors; i++) { /* Pseudocolor. */ image->colormap[i].red=(MagickRealType) ScaleCharToQuantum((unsigned char) i); image->colormap[i].green=(MagickRealType) ScaleCharToQuantum((unsigned char) i); image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum((unsigned char) i); } else if (number_colormaps > 1) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(*p); image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(*(p+map_length)); image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(*(p+map_length*2)); p++; } p=pixels; if (image->alpha_trait == UndefinedPixelTrait) { /* Convert raster image to PseudoClass pixel packets. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) *p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image,exception); } else { /* Image has a matte channel-- promote to DirectClass. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { ValidateColormapValue(image,(ssize_t) *p++,&index,exception); SetPixelRed(image,ClampToQuantum(image->colormap[(ssize_t) index].red),q); ValidateColormapValue(image,(ssize_t) *p++,&index,exception); SetPixelGreen(image,ClampToQuantum(image->colormap[(ssize_t) index].green),q); ValidateColormapValue(image,(ssize_t) *p++,&index,exception); SetPixelBlue(image,ClampToQuantum(image->colormap[(ssize_t) index].blue),q); SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } image->colormap=(PixelInfo *) RelinquishMagickMemory( image->colormap); image->storage_class=DirectClass; image->colors=0; } } if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; (void) ReadBlobByte(image); count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 0) && (memcmp(magick,"\122\314",2) == 0)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (memcmp(magick,"\122\314",2) == 0)); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
1
[]
ImageMagick
86cb33143c5b21912187403860a7c26761a3cd23
258,602,064,411,101,930,000,000,000,000,000,000,000
578
https://github.com/ImageMagick/ImageMagick/issues/502
schedule_folder_update (CamelEwsStore *ews_store, GHashTable *folder_ids) { struct ScheduleUpdateData *sud; CamelSettings *settings; g_return_if_fail (ews_store != NULL); g_return_if_fail (ews_store->priv != NULL); UPDATE_LOCK (ews_store); g_hash_table_foreach (folder_ids, get_folder_names_to_update, ews_store); if (ews_store->priv->update_folder_names == NULL) goto exit; sud = g_new0 (struct ScheduleUpdateData, 1); sud->ews_store = g_object_ref (ews_store); sud->cancellable = g_object_ref (ews_store->priv->updates_cancellable); if (ews_store->priv->update_folder_id > 0) g_source_remove (ews_store->priv->update_folder_id); settings = camel_service_ref_settings (CAMEL_SERVICE (ews_store)); ews_store->priv->update_folder_id = e_named_timeout_add_seconds_full ( G_PRIORITY_LOW, 1, folder_update_cb, sud, free_schedule_update_data); sud->expected_id = ews_store->priv->update_folder_id; g_object_unref (settings); exit: UPDATE_UNLOCK (ews_store); }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
199,149,153,936,077,100,000,000,000,000,000,000,000
37
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct net *net = sock_net(asoc->base.sk); sctp_bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (asoc->base.dead) goto out_unlock; sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: sctp_bh_unlock_sock(asoc->base.sk); sctp_association_put(asoc); }
0
[]
linux
196d67593439b03088913227093e374235596e33
27,126,248,455,742,747,000,000,000,000,000,000,000
31
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int ns_msg_getflag(ns_msg handle, int flag) { return ((handle)._flags & _ns_flagdata[flag].mask) >> _ns_flagdata[flag].shift; }
0
[ "CWE-79" ]
uclibc-ng
0f822af0445e5348ce7b7bd8ce1204244f31d174
910,185,736,537,334,700,000,000,000,000,000,000
4
libc/inet/resolv.c: add __hnbad to check DNS entries for validity… … using the same rules glibc does also call __hnbad in some places to check answers
static int llc_ui_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int rc = -ENOTCONN; lock_sock(sk); if (unlikely(sk->sk_state != TCP_ESTABLISHED)) goto out; rc = -EINVAL; if (how != 2) goto out; rc = llc_send_disc(sk); if (!rc) rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); /* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); out: release_sock(sk); return rc; }
0
[ "CWE-200" ]
linux-2.6
28e9fc592cb8c7a43e4d3147b38be6032a0e81bc
275,405,257,452,466,100,000,000,000,000,000,000,000
20
NET: llc, zero sockaddr_llc struct sllc_arphrd member of sockaddr_llc might not be changed. Zero sllc before copying to the above layer's structure. Signed-off-by: Jiri Slaby <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); }
0
[ "CWE-476" ]
linux-2.6
59839dfff5eabca01cc4e20b45797a60a80af8cb
37,997,309,324,738,405,000,000,000,000,000,000,000
4
KVM: x86: check for cr3 validity in ioctl_set_sregs Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity checking for the new cr3 value: "Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to the kernel. This will trigger a NULL pointer access in gfn_to_rmap() when userspace next tries to call KVM_RUN on the affected VCPU and kvm attempts to activate the new non-existent page table root. This happens since kvm only validates that cr3 points to a valid guest physical memory page when code *inside* the guest sets cr3. However, kvm currently trusts the userspace caller (e.g. QEMU) on the host machine to always supply a valid page table root, rather than properly validating it along with the rest of the reloaded guest state." http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599 Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple fault in case of failure. Cc: [email protected] Signed-off-by: Marcelo Tosatti <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
static inline int sniffing_mode(struct airo_info *ai) { return (le16_to_cpu(ai->config.rmode) & le16_to_cpu(RXMODE_MASK)) >= le16_to_cpu(RXMODE_RFMON); }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
221,858,116,746,159,880,000,000,000,000,000,000,000
5
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; }
1
[ "CWE-703" ]
linux
38327424b40bcebe2de92d07312c89360ac9229a
300,831,704,163,784,120,000,000,000,000,000,000,000
62
KEYS: potential uninitialized variable If __key_link_begin() failed then "edit" would be uninitialized. I've added a check to fix that. This allows a random user to crash the kernel, though it's quite difficult to achieve. There are three ways it can be done as the user would have to cause an error to occur in __key_link(): (1) Cause the kernel to run out of memory. In practice, this is difficult to achieve without ENOMEM cropping up elsewhere and aborting the attempt. (2) Revoke the destination keyring between the keyring ID being looked up and it being tested for revocation. In practice, this is difficult to time correctly because the KEYCTL_REJECT function can only be used from the request-key upcall process. Further, users can only make use of what's in /sbin/request-key.conf, though this does including a rejection debugging test - which means that the destination keyring has to be the caller's session keyring in practice. (3) Have just enough key quota available to create a key, a new session keyring for the upcall and a link in the session keyring, but not then sufficient quota to create a link in the nominated destination keyring so that it fails with EDQUOT. The bug can be triggered using option (3) above using something like the following: echo 80 >/proc/sys/kernel/keys/root_maxbytes keyctl request2 user debug:fred negate @t The above sets the quota to something much lower (80) to make the bug easier to trigger, but this is dependent on the system. Note also that the name of the keyring created contains a random number that may be between 1 and 10 characters in size, so may throw the test off by changing the amount of quota used. Assuming the failure occurs, something like the following will be seen: kfree_debugcheck: out of range ptr 6b6b6b6b6b6b6b68h ------------[ cut here ]------------ kernel BUG at ../mm/slab.c:2821! ... RIP: 0010:[<ffffffff811600f9>] kfree_debugcheck+0x20/0x25 RSP: 0018:ffff8804014a7de8 EFLAGS: 00010092 RAX: 0000000000000034 RBX: 6b6b6b6b6b6b6b68 RCX: 0000000000000000 RDX: 0000000000040001 RSI: 00000000000000f6 RDI: 0000000000000300 RBP: ffff8804014a7df0 R08: 0000000000000001 R09: 0000000000000000 R10: ffff8804014a7e68 R11: 0000000000000054 R12: 0000000000000202 R13: ffffffff81318a66 R14: 0000000000000000 R15: 0000000000000001 ... Call Trace: kfree+0xde/0x1bc assoc_array_cancel_edit+0x1f/0x36 __key_link_end+0x55/0x63 key_reject_and_link+0x124/0x155 keyctl_reject_key+0xb6/0xe0 keyctl_negate_key+0x10/0x12 SyS_keyctl+0x9f/0xe7 do_syscall_64+0x63/0x13a entry_SYSCALL64_slow_path+0x25/0x25 Fixes: f70e2e06196a ('KEYS: Do preallocation for __key_link()') Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: David Howells <[email protected]> cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static bool only_ipaddrs_in_list(const char **list) { bool only_ip = true; if (!list) { return true; } for (; *list ; list++) { /* factor out the special strings */ if (strequal(*list, "ALL") || strequal(*list, "FAIL") || strequal(*list, "EXCEPT")) { continue; } if (!is_ipaddress(*list)) { /* * If we failed, make sure that it was not because * the token was a network/netmask pair. Only * network/netmask pairs have a '/' in them. */ if ((strchr_m(*list, '/')) == NULL) { only_ip = false; DEBUG(3,("only_ipaddrs_in_list: list has " "non-ip address (%s)\n", *list)); break; } } } return only_ip; }
0
[]
samba
91f4275873ebeda8f57684f09df67162ae80515a
116,473,457,006,821,860,000,000,000,000,000,000,000
33
swat: Use additional nonce on XSRF protection If the user had a weak password on the root account of a machine running SWAT, there still was a chance of being targetted by an XSRF on a malicious web site targetting the SWAT setup. Use a random nonce stored in secrets.tdb to close this possible attack window. Thanks to Jann Horn for reporting this issue. Signed-off-by: Kai Blin <[email protected]> Fix bug #9577: CVE-2013-0214: Potential XSRF in SWAT.
static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; struct device *dma_dev = vdpa_get_dma_dev(vdpa); struct bus_type *bus; int ret; /* Device want to do DMA by itself */ if (ops->set_map || ops->dma_map) return 0; bus = dma_dev->bus; if (!bus) return -EFAULT; if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) return -ENOTSUPP; v->domain = iommu_domain_alloc(bus); if (!v->domain) return -EIO; ret = iommu_attach_device(v->domain, dma_dev); if (ret) goto err_attach; return 0; err_attach: iommu_domain_free(v->domain); return ret; }
0
[ "CWE-416" ]
linux
f6bbf0010ba004f5e90c7aefdebc0ee4bd3283b9
9,426,622,213,958,834,000,000,000,000,000,000,000
33
vhost-vdpa: fix use-after-free of v->config_ctx When the 'v->config_ctx' eventfd_ctx reference is released we didn't set it to NULL. So if the same character device (e.g. /dev/vhost-vdpa-0) is re-opened, the 'v->config_ctx' is invalid and calling again vhost_vdpa_config_put() causes use-after-free issues like the following refcount_t underflow: refcount_t: underflow; use-after-free. WARNING: CPU: 2 PID: 872 at lib/refcount.c:28 refcount_warn_saturate+0xae/0xf0 RIP: 0010:refcount_warn_saturate+0xae/0xf0 Call Trace: eventfd_ctx_put+0x5b/0x70 vhost_vdpa_release+0xcd/0x150 [vhost_vdpa] __fput+0x8e/0x240 ____fput+0xe/0x10 task_work_run+0x66/0xa0 exit_to_user_mode_prepare+0x118/0x120 syscall_exit_to_user_mode+0x21/0x50 ? __x64_sys_close+0x12/0x40 do_syscall_64+0x45/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: 776f395004d8 ("vhost_vdpa: Support config interrupt in vdpa") Cc: [email protected] Cc: [email protected] Signed-off-by: Stefano Garzarella <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Zhu Lingshan <[email protected]> Acked-by: Jason Wang <[email protected]>
int main(int argc, char* argv[]) { char* l_csr_file; REALTYPE* l_a_sp; unsigned int* l_rowptr; unsigned int* l_colidx; unsigned int l_rowcount, l_colcount, l_elements; REALTYPE* l_a_dense; REALTYPE* l_b; REALTYPE* l_c_betaone; REALTYPE* l_c_betazero; REALTYPE* l_c_gold_betaone; REALTYPE* l_c_gold_betazero; REALTYPE* l_c_dense_betaone; REALTYPE* l_c_dense_betazero; REALTYPE l_max_error = 0.0; unsigned int l_m; unsigned int l_n; unsigned int l_k; unsigned int l_i; unsigned int l_j; unsigned int l_z; unsigned int l_elems; unsigned int l_reps; unsigned int l_n_block; struct timeval l_start, l_end; double l_total; double alpha = 1.0; double beta = 1.0; char trans = 'N'; libxsmm_dfsspmdm* gemm_op_betazero = NULL; libxsmm_dfsspmdm* gemm_op_betaone = NULL; if (argc != 4 ) { fprintf( stderr, "need csr-filename N reps!\n" ); exit(-1); } /* read sparse A */ l_csr_file = argv[1]; l_n = atoi(argv[2]); l_reps = atoi(argv[3]); if (my_csr_reader( l_csr_file, &l_rowptr, &l_colidx, &l_a_sp, &l_rowcount, &l_colcount, &l_elements ) != 0 ) { exit(-1); } l_m = l_rowcount; l_k = l_colcount; printf("CSR matrix data structure we just read:\n"); printf("rows: %u, columns: %u, elements: %u\n", l_rowcount, l_colcount, l_elements); /* allocate dense matrices */ l_a_dense = (REALTYPE*)_mm_malloc(l_k * l_m * sizeof(REALTYPE), 64); l_b = (REALTYPE*)_mm_malloc(l_k * l_n * sizeof(REALTYPE), 64); l_c_betazero = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); l_c_betaone = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); l_c_gold_betazero = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); l_c_gold_betaone = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); l_c_dense_betazero = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); l_c_dense_betaone = (REALTYPE*)_mm_malloc(l_m * l_n * sizeof(REALTYPE), 64); /* touch B */ for ( l_i = 0; l_i < l_k*l_n; l_i++) { l_b[l_i] = (REALTYPE)libxsmm_rand_f64(); } /* touch dense A */ for ( l_i = 0; l_i < l_k*l_m; l_i++) { l_a_dense[l_i] = (REALTYPE)0.0; } /* init dense A using sparse A */ for ( l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; for ( l_z = 0; l_z < l_elems; l_z++ ) { l_a_dense[(l_i*l_k)+l_colidx[l_rowptr[l_i]+l_z]] = l_a_sp[l_rowptr[l_i]+l_z]; } } /* touch C */ for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_gold_betaone[l_i] = (REALTYPE)libxsmm_rand_f64(); } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_betaone[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_dense_betaone[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_betazero[l_i] = l_c_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_gold_betazero[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_dense_betazero[l_i] = l_c_dense_betaone[l_i]; } /* setting up fsspmdm */ l_n_block = 48; beta = 0.0; gemm_op_betazero = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, l_a_dense ); beta = 1.0; gemm_op_betaone = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, l_a_dense ); /* compute golden results */ printf("computing golden solution...\n"); for ( l_j = 0; l_j < l_n; l_j++ ) { for (l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; l_c_gold_betazero[(l_n*l_i) + l_j] = 0.0; for (l_z = 0; l_z < l_elems; l_z++) { l_c_gold_betazero[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j]; } } } for ( l_j = 0; l_j < l_n; l_j++ ) { for (l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; for (l_z = 0; l_z < l_elems; l_z++) { l_c_gold_betaone[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j]; } } } printf("...done!\n"); /* libxsmm generated code */ printf("computing libxsmm (A sparse) solution...\n"); #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z ); } #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z ); } printf("...done!\n"); /* BLAS code */ printf("computing BLAS (A dense) solution...\n"); beta = 0.0; dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n ); beta = 1.0; dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n ); printf("...done!\n"); /* check for errors */ l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) { l_max_error = fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]); } } printf("max error beta=0 (libxmm vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) { l_max_error = fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]); } } printf("max error beta=1 (libxmm vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) { l_max_error = fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]); } } printf("max error beta=0 (dense vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) { l_max_error = fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]); } } printf("max error beta=1 (dense vs. gold): %f\n", l_max_error); /* Let's measure performance */ gettimeofday(&l_start, NULL); for ( l_j = 0; l_j < l_reps; l_j++ ) { #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z ); } } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); for ( l_j = 0; l_j < l_reps; l_j++ ) { #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z ); } } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); beta = 0.0; for ( l_j = 0; l_j < l_reps; l_j++ ) { dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); beta = 1.0; for ( l_j = 0; l_j < l_reps; l_j++ ) { dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); /* free */ libxsmm_dfsspmdm_destroy( gemm_op_betazero ); libxsmm_dfsspmdm_destroy( gemm_op_betaone ); }
0
[ "CWE-119", "CWE-787" ]
libxsmm
151481489192e6d1997f8bde52c5c425ea41741d
320,661,761,140,806,240,000,000,000,000,000,000,000
248
Issue #287: made CSR/CSC readers more robust against invalid input (case #1).
int dvb_unregister_frontend(struct dvb_frontend* fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); mutex_lock(&frontend_mutex); dvb_frontend_stop(fe); dvb_remove_device(fepriv->dvbdev); /* fe is invalid now */ mutex_unlock(&frontend_mutex); dvb_frontend_put(fe); return 0; }
0
[ "CWE-416" ]
linux
b1cb7372fa822af6c06c8045963571d13ad6348b
301,279,295,463,313,770,000,000,000,000,000,000,000
14
dvb_frontend: don't use-after-free the frontend struct dvb_frontend_invoke_release() may free the frontend struct. So, the free logic can't update it anymore after calling it. That's OK, as __dvb_frontend_free() is called only when the krefs are zeroed, so nobody is using it anymore. That should fix the following KASAN error: The KASAN report looks like this (running on kernel 3e0cc09a3a2c40ec1ffb6b4e12da86e98feccb11 (4.14-rc5+)): ================================================================== BUG: KASAN: use-after-free in __dvb_frontend_free+0x113/0x120 Write of size 8 at addr ffff880067d45a00 by task kworker/0:1/24 CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc5-43687-g06ab8a23e0e6 #545 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 kasan_report+0x23d/0x350 mm/kasan/report.c:409 __asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435 __dvb_frontend_free+0x113/0x120 drivers/media/dvb-core/dvb_frontend.c:156 dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176 dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803 dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340 dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116 dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132 dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295 usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423 __device_release_driver drivers/base/dd.c:861 device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893 device_release_driver+0x1e/0x30 drivers/base/dd.c:918 bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565 device_del+0x5c4/0xab0 drivers/base/core.c:1985 usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170 usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124 hub_port_connect drivers/usb/core/hub.c:4754 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Allocated by task 24: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551 kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772 kmalloc ./include/linux/slab.h:493 kzalloc ./include/linux/slab.h:666 dtt200u_fe_attach+0x4c/0x110 drivers/media/usb/dvb-usb/dtt200u-fe.c:212 dtt200u_frontend_attach+0x35/0x80 drivers/media/usb/dvb-usb/dtt200u.c:136 dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286 dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86 dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162 dvb_usb_device_init+0xf73/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277 dtt200u_usb_probe+0xa1/0xe0 drivers/media/usb/dvb-usb/dtt200u.c:155 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26b/0x3c0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26b/0x3c0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Freed by task 24: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524 slab_free_hook mm/slub.c:1390 slab_free_freelist_hook mm/slub.c:1412 slab_free mm/slub.c:2988 kfree+0xf6/0x2f0 mm/slub.c:3919 dtt200u_fe_release+0x3c/0x50 drivers/media/usb/dvb-usb/dtt200u-fe.c:202 dvb_frontend_invoke_release.part.13+0x1c/0x30 drivers/media/dvb-core/dvb_frontend.c:2790 dvb_frontend_invoke_release drivers/media/dvb-core/dvb_frontend.c:2789 __dvb_frontend_free+0xad/0x120 drivers/media/dvb-core/dvb_frontend.c:153 dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176 dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803 dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340 dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116 dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132 dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295 usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423 __device_release_driver drivers/base/dd.c:861 device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893 device_release_driver+0x1e/0x30 drivers/base/dd.c:918 bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565 device_del+0x5c4/0xab0 drivers/base/core.c:1985 usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170 usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124 hub_port_connect drivers/usb/core/hub.c:4754 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 The buggy address belongs to the object at ffff880067d45500 which belongs to the cache kmalloc-2048 of size 2048 The buggy address is located 1280 bytes inside of 2048-byte region [ffff880067d45500, ffff880067d45d00) The buggy address belongs to the page: page:ffffea00019f5000 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x100000000008100(slab|head) raw: 0100000000008100 0000000000000000 0000000000000000 00000001000f000f raw: dead000000000100 dead000000000200 ffff88006c002d80 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff880067d45900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff880067d45a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Fixes: ead666000a5f ("media: dvb_frontend: only use kref after initialized") Reported-by: Andrey Konovalov <[email protected]> Suggested-by: Matthias Schwarzott <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
_g_object_ref (gpointer object) { return (object != NULL) ? g_object_ref (object) : NULL; }
0
[ "CWE-22" ]
file-roller
b147281293a8307808475e102a14857055f81631
36,226,643,098,851,332,000,000,000,000,000,000,000
4
libarchive: sanitize filenames before extracting
Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryContext& context) { std::shared_ptr<Http::TlsCachingDateProviderImpl> date_provider = context.singletonManager().getTyped<Http::TlsCachingDateProviderImpl>( SINGLETON_MANAGER_REGISTERED_NAME(date_provider), [&context] { return std::make_shared<Http::TlsCachingDateProviderImpl>(context.dispatcher(), context.threadLocal()); }); Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager = context.singletonManager().getTyped<Router::RouteConfigProviderManager>( SINGLETON_MANAGER_REGISTERED_NAME(route_config_provider_manager), [&context] { return std::make_shared<Router::RouteConfigProviderManagerImpl>(context.admin()); }); Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager = context.singletonManager().getTyped<Router::ScopedRoutesConfigProviderManager>( SINGLETON_MANAGER_REGISTERED_NAME(scoped_routes_config_provider_manager), [&context, route_config_provider_manager] { return std::make_shared<Router::ScopedRoutesConfigProviderManager>( context.admin(), *route_config_provider_manager); }); auto http_tracer_manager = context.singletonManager().getTyped<Tracing::HttpTracerManagerImpl>( SINGLETON_MANAGER_REGISTERED_NAME(http_tracer_manager), [&context] { return std::make_shared<Tracing::HttpTracerManagerImpl>( std::make_unique<Tracing::TracerFactoryContextImpl>( context.getServerFactoryContext(), context.messageValidationVisitor())); }); std::shared_ptr<Filter::Http::FilterConfigProviderManager> filter_config_provider_manager = context.singletonManager().getTyped<Filter::Http::FilterConfigProviderManager>( SINGLETON_MANAGER_REGISTERED_NAME(filter_config_provider_manager), [] { return std::make_shared<Filter::Http::FilterConfigProviderManagerImpl>(); }); return {date_provider, route_config_provider_manager, scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager}; }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
106,279,643,120,997,620,000,000,000,000,000,000,000
37
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) { if (!ctx) return; if (*locked) { io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); *locked = false; } percpu_ref_put(&ctx->refs); }
0
[ "CWE-416" ]
linux
e677edbcabee849bfdd43f1602bccbecf736a646
28,348,960,428,014,700,000,000,000,000,000,000,000
11
io_uring: fix race between timeout flush and removal io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: [email protected] # 5.5+ Signed-off-by: Jens Axboe <[email protected]>
date_s__valid_jd_p(int argc, VALUE *argv, VALUE klass) { VALUE vjd, vsg; VALUE argv2[2]; rb_scan_args(argc, argv, "11", &vjd, &vsg); argv2[0] = vjd; if (argc < 2) argv2[1] = DBL2NUM(GREGORIAN); else argv2[1] = vsg; return valid_jd_sub(2, argv2, klass, 1); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
171,103,382,558,445,200,000,000,000,000,000,000,000
15
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
12,897,700,834,266,328,000,000,000,000,000,000,000
1
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static char *vfswrap_getwd(vfs_handle_struct *handle) { char *result; START_PROFILE(syscall_getwd); result = sys_getwd(); END_PROFILE(syscall_getwd); return result; }
0
[ "CWE-665" ]
samba
30e724cbff1ecd90e5a676831902d1e41ec1b347
197,707,404,025,477,200,000,000,000,000,000,000,000
9
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero Otherwise num_volumes and the end marker can return uninitialized data to the client. Signed-off-by: Christof Schmitt <[email protected]> Reviewed-by: Jeremy Allison <[email protected]> Reviewed-by: Simo Sorce <[email protected]>
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) { struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; bool force_flush = false; unsigned long len = old_end - old_addr; /* * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma * locks to ensure that rmap will always observe either the old or the * new ptes. This is the easiest way to avoid races with * truncate_pagecache(), page migration, etc... * * When need_rmap_locks is false, we use other ways to avoid * such races: * * - During exec() shift_arg_pages(), we use a specially tagged vma * which rmap call sites look for using is_vma_temporary_stack(). * * - During mremap(), new_vma is often known to be placed after vma * in rmap traversal order. This ensures rmap will always observe * either the old pte, or the new pte, or both (the page table locks * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ if (need_rmap_locks) take_rmap_locks(vma); /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); /* * If we are remapping a valid PTE, make sure * to flush TLB before we drop the PTL for the * PTE. * * NOTE! Both old and new PTL matter: the old one * for racing with page_mkclean(), the new one to * make sure the physical page stays valid until * the TLB entry for the old mapping has been * flushed. */ if (pte_present(pte)) force_flush = true; pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (force_flush) flush_tlb_range(vma, old_end - len, old_end); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (need_rmap_locks) drop_rmap_locks(vma); }
0
[ "CWE-459" ]
linux
eb66ae030829605d61fbef1909ce310e29f78821
312,575,985,127,842,000,000,000,000,000,000,000,000
78
mremap: properly flush TLB before releasing the page Jann Horn points out that our TLB flushing was subtly wrong for the mremap() case. What makes mremap() special is that we don't follow the usual "add page to list of pages to be freed, then flush tlb, and then free pages". No, mremap() obviously just _moves_ the page from one page table location to another. That matters, because mremap() thus doesn't directly control the lifetime of the moved page with a freelist: instead, the lifetime of the page is controlled by the page table locking, that serializes access to the entry. As a result, we need to flush the TLB not just before releasing the lock for the source location (to avoid any concurrent accesses to the entry), but also before we release the destination page table lock (to avoid the TLB being flushed after somebody else has already done something to that page). This also makes the whole "need_flush" logic unnecessary, since we now always end up flushing the TLB for every valid entry. Reported-and-tested-by: Jann Horn <[email protected]> Acked-by: Will Deacon <[email protected]> Tested-by: Ingo Molnar <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
put_post_cb (void *opaque, http_t http) { struct put_post_parm_s *parm = opaque; gpg_error_t err = 0; estream_t fp; size_t len; fp = http_get_write_ptr (http); len = strlen (parm->datastring); es_fprintf (fp, "Content-Type: application/x-www-form-urlencoded\r\n" "Content-Length: %zu\r\n", len+8 /* 8 is for "keytext" */); http_start_data (http); if (es_fputs ("keytext=", fp) || es_write (fp, parm->datastring, len, NULL)) err = gpg_error_from_syserror (); return err; }
0
[ "CWE-352" ]
gnupg
4a4bb874f63741026bd26264c43bb32b1099f060
218,886,790,296,541,050,000,000,000,000,000,000,000
18
dirmngr: Avoid possible CSRF attacks via http redirects. * dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path. (http_redir_info_t): New. * dirmngr/http.c (do_parse_uri): Set new fields. (same_host_p): New. (http_prepare_redirect): New. * dirmngr/t-http-basic.c: New test. * dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect instead of the open code. * dirmngr/ks-engine-http.c (ks_http_fetch): Ditto. -- With this change a http query will not follow a redirect unless the Location header gives the same host. If the host is different only the host and port is taken from the Location header and the original path and query parts are kept. Signed-off-by: Werner Koch <[email protected]> (cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144)
static inline bool d_is_whiteout(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; }
0
[ "CWE-284" ]
linux
54d5ca871e72f2bb172ec9323497f01cd5091ec7
30,607,013,763,859,807,000,000,000,000,000,000,000
4
vfs: add vfs_select_inode() helper Signed-off-by: Miklos Szeredi <[email protected]> Cc: <[email protected]> # v4.2+
static u32 __init armv8pmu_read_num_pmnc_events(void) { u32 nb_cnt; /* Read the nb of CNTx counters supported from PMNC */ nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; /* Add the CPU cycles counter and return */ return nb_cnt + 1; }
0
[ "CWE-284", "CWE-264" ]
linux
8fff105e13041e49b82f92eef034f363a6b1c071
265,843,683,763,926,200,000,000,000,000,000,000,000
10
arm64: perf: reject groups spanning multiple HW PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc [<ffffffc00015d870>] perf_try_init_event+0x34/0x70 [<ffffffc000164094>] perf_init_event+0xe0/0x10c [<ffffffc000164348>] perf_event_alloc+0x288/0x358 [<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <[email protected]> Acked-by: Mark Rutland <[email protected]> Acked-by: Peter Ziljstra (Intel) <[email protected]> Signed-off-by: Suzuki K. Poulose <[email protected]> Signed-off-by: Will Deacon <[email protected]>
static void ShowMakerNoteGeneric(unsigned char * ValuePtr, int ByteCount) { int a; for (a=0;a<ByteCount;a++){ if (a > 10){ printf("..."); break; } printf(" %02x",ValuePtr[a]); } printf(" (%d bytes)", ByteCount); printf("\n"); }
0
[ "CWE-703" ]
jhead
a50953a266583981b51a181c2fce73dad2ac5d7d
137,483,937,650,086,690,000,000,000,000,000,000,000
13
Make pointer range checks more consistent. Also twiddle the unused floating point print code (not used in real exif files), but fuzz testing hits it. New code is equivalent but doesn't cause bus error (don't understand why, but this is all a very bogus thing anyway, just trying to avoid fuzz testing hits.
static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct task_struct *task, const struct pid_entry *p) { return proc_fill_cache(filp, dirent, filldir, p->name, p->len, proc_base_instantiate, task, p); }
0
[]
linux
0499680a42141d86417a8fbaa8c8db806bea1201
103,489,044,027,711,190,000,000,000,000,000,000,000
6
procfs: add hidepid= and gid= mount options Add support for mount options to restrict access to /proc/PID/ directories. The default backward-compatible "relaxed" behaviour is left untouched. The first mount option is called "hidepid" and its value defines how much info about processes we want to be available for non-owners: hidepid=0 (default) means the old behavior - anybody may read all world-readable /proc/PID/* files. hidepid=1 means users may not access any /proc/<pid>/ directories, but their own. Sensitive files like cmdline, sched*, status are now protected against other users. As permission checking done in proc_pid_permission() and files' permissions are left untouched, programs expecting specific files' modes are not confused. hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other users. It doesn't mean that it hides whether a process exists (it can be learned by other means, e.g. by kill -0 $PID), but it hides process' euid and egid. It compicates intruder's task of gathering info about running processes, whether some daemon runs with elevated privileges, whether another user runs some sensitive program, whether other users run any program at all, etc. gid=XXX defines a group that will be able to gather all processes' info (as in hidepid=0 mode). This group should be used instead of putting nonroot user in sudoers file or something. However, untrusted users (like daemons, etc.) which are not supposed to monitor the tasks in the whole system should not be added to the group. hidepid=1 or higher is designed to restrict access to procfs files, which might reveal some sensitive private information like precise keystrokes timings: http://www.openwall.com/lists/oss-security/2011/11/05/3 hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and conky gracefully handle EPERM/ENOENT and behave as if the current user is the only user running processes. pstree shows the process subtree which contains "pstree" process. Note: the patch doesn't deal with setuid/setgid issues of keeping preopened descriptors of procfs files (like https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked information like the scheduling counters of setuid apps doesn't threaten anybody's privacy - only the user started the setuid program may read the counters. Signed-off-by: Vasiliy Kulikov <[email protected]> Cc: Alexey Dobriyan <[email protected]> Cc: Al Viro <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Greg KH <[email protected]> Cc: Theodore Tso <[email protected]> Cc: Alan Cox <[email protected]> Cc: James Morris <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static __net_init int proto_init_net(struct net *net) { if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; return 0; }
0
[]
net
35306eb23814444bd4021f8a1c3047d3cb0c8b2b
273,153,200,303,323,730,000,000,000,000,000,000,000
8
af_unix: fix races in sk_peer_pid and sk_peer_cred accesses Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred. In order to fix this issue, this patch adds a new spinlock that needs to be used whenever these fields are read or written. Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently reading sk->sk_peer_pid which makes no sense, as this field is only possibly set by AF_UNIX sockets. We will have to clean this in a separate patch. This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback" or implementing what was truly expected. Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jann Horn <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Luiz Augusto von Dentz <[email protected]> Cc: Marcel Holtmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
QPDF::getFilename() const { return this->file->getName(); }
0
[ "CWE-399", "CWE-835" ]
qpdf
701b518d5c56a1449825a3a37a716c58e05e1c3e
182,496,100,140,855,650,000,000,000,000,000,000,000
4
Detect recursion loops resolving objects (fixes #51) During parsing of an object, sometimes parts of the object have to be resolved. An example is stream lengths. If such an object directly or indirectly points to the object being parsed, it can cause an infinite loop. Guard against all cases of re-entrant resolution of objects.
grammar_start_symbol_set (symbol *sym, location loc) { if (start_flag) complain (&loc, complaint, _("multiple %s declarations"), "%start"); else { start_flag = true; startsymbol = sym; startsymbol_loc = loc; } }
0
[]
bison
b7aab2dbad43aaf14eebe78d54aafa245a000988
183,532,230,039,409,270,000,000,000,000,000,000,000
11
fix: crash when redefining the EOF token Reported by Agency for Defense Development. https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html On an empty such as %token FOO BAR FOO 0 %% input: %empty we crash because when we find FOO 0, we decrement ntokens (since FOO was discovered to be EOF, which is already known to be a token, so we increment ntokens for it, and need to cancel this). This "works well" when EOF is properly defined in one go, but here it is first defined and later only assign token code 0. In the meanwhile BAR was given the token number that we just decremented. To fix this, assign symbol numbers after parsing, not during parsing, so that we also saw all the explicit token codes. To maintain the current numbers (I'd like to keep no difference in the output, not just equivalence), we need to make sure the symbols are numbered in the same order: that of appearance in the source file. So we need the locations to be correct, which was almost the case, except for nterms that appeared several times as LHS (i.e., several times as "foo: ..."). Fixing the use of location_of_lhs sufficed (it appears it was intended for this use, but its implementation was unfinished: it was always set to "false" only). * src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs. (symbol_code_set): Remove broken hack that decremented ntokens. (symbol_class_set, dummy_symbol_get): Don't set number, ntokens and nnterms. (symbol_check_defined): Do it. (symbols): Don't count nsyms here. Actually, don't count nsyms at all: let it be done in... * src/reader.c (check_and_convert_grammar): here. Define nsyms from ntokens and nnterms after parsing. * tests/input.at (EOF redeclared): New. * examples/c/bistromathic/bistromathic.test: Adjust the traces: in "%nterm <double> exp %% input: ...", exp used to be numbered before input.
static int pkey_gost2018_decrypt(EVP_PKEY_CTX *pctx, unsigned char *key, size_t *key_len, const unsigned char *in, size_t in_len) { const unsigned char *p = in; struct gost_pmeth_data *data; EVP_PKEY *priv; PSKeyTransport_gost *pst = NULL; int ret = 0; unsigned char expkeys[64]; EVP_PKEY *eph_key = NULL; int pkey_nid; int mac_nid = NID_undef; int iv_len = 0; if (!(data = EVP_PKEY_CTX_get_data(pctx)) || !(priv = EVP_PKEY_CTX_get0_pkey(pctx))) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_ERROR_COMPUTING_EXPORT_KEYS); ret = 0; goto err; } pkey_nid = EVP_PKEY_base_id(priv); switch (data->cipher_nid) { case NID_magma_ctr: mac_nid = NID_magma_mac; iv_len = 4; break; case NID_grasshopper_ctr: mac_nid = NID_grasshopper_mac; iv_len = 8; break; default: GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_INVALID_CIPHER); return -1; break; } pst = d2i_PSKeyTransport_gost(NULL, (const unsigned char **)&p, in_len); if (!pst) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_ERROR_PARSING_KEY_TRANSPORT_INFO); return -1; } eph_key = X509_PUBKEY_get(pst->ephem_key); /* * TODO beldmit 1. Checks the next three conditions fulfilling and terminates the connection with fatal error if not. o Q_eph is on the same curve as server public key; o Q_eph is not equal to zero point; o q * Q_eph is not equal to zero point. */ if (eph_key == NULL) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_ERROR_COMPUTING_EXPORT_KEYS); ret = 0; goto err; } if (data->shared_ukm_size == 0 && pst->ukm != NULL) { if (EVP_PKEY_CTX_ctrl(pctx, -1, -1, EVP_PKEY_CTRL_SET_IV, ASN1_STRING_length(pst->ukm), (void *)ASN1_STRING_get0_data(pst->ukm)) < 0) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_UKM_NOT_SET); goto err; } } if (gost_keg(data->shared_ukm, pkey_nid, EC_KEY_get0_public_key(EVP_PKEY_get0(eph_key)), EVP_PKEY_get0(priv), expkeys) <= 0) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_ERROR_COMPUTING_EXPORT_KEYS); goto err; } if (gost_kimp15(ASN1_STRING_get0_data(pst->psexp), ASN1_STRING_length(pst->psexp), data->cipher_nid, expkeys + 32, mac_nid, expkeys + 0, data->shared_ukm + 24, iv_len, key) <= 0) { GOSTerr(GOST_F_PKEY_GOST2018_DECRYPT, GOST_R_CANNOT_UNPACK_EPHEMERAL_KEY); goto err; } *key_len = 32; ret = 1; err: OPENSSL_cleanse(expkeys, sizeof(expkeys)); EVP_PKEY_free(eph_key); PSKeyTransport_gost_free(pst); return ret; }
0
[ "CWE-120", "CWE-787" ]
engine
b2b4d629f100eaee9f5942a106b1ccefe85b8808
113,851,282,919,558,530,000,000,000,000,000,000,000
96
On unpacking key blob output buffer size should be fixed Related: CVE-2022-29242
void utf8_snprintf(gchar *dst, gsize byte_len, const gchar *fmt, ...) { va_list ap; va_start(ap, fmt); gchar *str = g_strdup_vprintf(fmt, ap); va_end(ap); utf8_strncpy(dst, str, byte_len); g_free(str); }
0
[ "CWE-200" ]
gerbv
319a8af890e4d0a5c38e6d08f510da8eefc42537
96,287,189,463,301,710,000,000,000,000,000,000,000
10
Remove local alias to parameter array Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
ns_client_error(ns_client_t *client, isc_result_t result) { dns_message_t *message = NULL; dns_rcode_t rcode; bool trunc = false; REQUIRE(NS_CLIENT_VALID(client)); CTRACE("error"); message = client->message; if (client->rcode_override == -1) { rcode = dns_result_torcode(result); } else { rcode = (dns_rcode_t)(client->rcode_override & 0xfff); } if (result == ISC_R_MAXSIZE) { trunc = true; } #if NS_CLIENT_DROPPORT /* * Don't send FORMERR to ports on the drop port list. */ if (rcode == dns_rcode_formerr && ns_client_dropport(isc_sockaddr_getport(&client->peeraddr)) != DROPPORT_NO) { char buf[64]; isc_buffer_t b; isc_buffer_init(&b, buf, sizeof(buf) - 1); if (dns_rcode_totext(rcode, &b) != ISC_R_SUCCESS) { isc_buffer_putstr(&b, "UNKNOWN RCODE"); } ns_client_log(client, DNS_LOGCATEGORY_SECURITY, NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(10), "dropped error (%.*s) response: suspicious port", (int)isc_buffer_usedlength(&b), buf); ns_client_drop(client, ISC_R_SUCCESS); return; } #endif /* if NS_CLIENT_DROPPORT */ /* * Try to rate limit error responses. */ if (client->view != NULL && client->view->rrl != NULL) { bool wouldlog; char log_buf[DNS_RRL_LOG_BUF_LEN]; dns_rrl_result_t rrl_result; int loglevel; if ((client->sctx->options & NS_SERVER_LOGQUERIES) != 0) { loglevel = DNS_RRL_LOG_DROP; } else { loglevel = ISC_LOG_DEBUG(1); } wouldlog = isc_log_wouldlog(ns_lctx, loglevel); rrl_result = dns_rrl( client->view, &client->peeraddr, TCP_CLIENT(client), dns_rdataclass_in, dns_rdatatype_none, NULL, result, client->now, wouldlog, log_buf, sizeof(log_buf)); if (rrl_result != DNS_RRL_RESULT_OK) { /* * Log dropped errors in the query category * so that they are not lost in silence. * Starts of rate-limited bursts are logged in * NS_LOGCATEGORY_RRL. */ if (wouldlog) { ns_client_log(client, NS_LOGCATEGORY_QUERY_ERRORS, NS_LOGMODULE_CLIENT, loglevel, "%s", log_buf); } /* * Some error responses cannot be 'slipped', * so don't try to slip any error responses. */ if (!client->view->rrl->log_only) { ns_stats_increment(client->sctx->nsstats, ns_statscounter_ratedropped); ns_stats_increment(client->sctx->nsstats, ns_statscounter_dropped); ns_client_drop(client, DNS_R_DROP); return; } } } /* * Message may be an in-progress reply that we had trouble * with, in which case QR will be set. We need to clear QR before * calling dns_message_reply() to avoid triggering an assertion. */ message->flags &= ~DNS_MESSAGEFLAG_QR; /* * AA and AD shouldn't be set. */ message->flags &= ~(DNS_MESSAGEFLAG_AA | DNS_MESSAGEFLAG_AD); result = dns_message_reply(message, true); if (result != ISC_R_SUCCESS) { /* * It could be that we've got a query with a good header, * but a bad question section, so we try again with * want_question_section set to false. */ result = dns_message_reply(message, false); if (result != ISC_R_SUCCESS) { ns_client_drop(client, result); return; } } message->rcode = rcode; if (trunc) { message->flags |= DNS_MESSAGEFLAG_TC; } if (rcode == dns_rcode_formerr) { /* * FORMERR loop avoidance: If we sent a FORMERR message * with the same ID to the same client less than two * seconds ago, assume that we are in an infinite error * packet dialog with a server for some protocol whose * error responses look enough like DNS queries to * elicit a FORMERR response. Drop a packet to break * the loop. */ if (isc_sockaddr_equal(&client->peeraddr, &client->formerrcache.addr) && message->id == client->formerrcache.id && (isc_time_seconds(&client->requesttime) - client->formerrcache.time) < 2) { /* Drop packet. */ ns_client_log(client, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1), "possible error packet loop, " "FORMERR dropped"); ns_client_drop(client, result); return; } client->formerrcache.addr = client->peeraddr; client->formerrcache.time = isc_time_seconds(&client->requesttime); client->formerrcache.id = message->id; } else if (rcode == dns_rcode_servfail && client->query.qname != NULL && client->view != NULL && client->view->fail_ttl != 0 && ((client->attributes & NS_CLIENTATTR_NOSETFC) == 0)) { /* * SERVFAIL caching: store qname/qtype of failed queries */ isc_time_t expire; isc_interval_t i; uint32_t flags = 0; if ((message->flags & DNS_MESSAGEFLAG_CD) != 0) { flags = NS_FAILCACHE_CD; } isc_interval_set(&i, client->view->fail_ttl, 0); result = isc_time_nowplusinterval(&expire, &i); if (result == ISC_R_SUCCESS) { dns_badcache_add( client->view->failcache, client->query.qname, client->query.qtype, true, flags, &expire); } } ns_client_send(client); }
0
[ "CWE-617" ]
bind9
15996f0cb15631b95a801e3e88928494a69ad6ee
260,426,348,446,429,800,000,000,000,000,000,000,000
175
ns_client_error() could assert if rcode was overridden to NOERROR The client->rcode_override was originally created to force the server to send SERVFAIL in some cases when it would normally have sent FORMERR. More recently, it was used in a3ba95116ed04594ea59a8124bf781b30367a7a2 commit (part of GL #2790) to force the sending of a TC=1 NOERROR response, triggering a retry via TCP, when a UDP packet could not be sent due to ISC_R_MAXSIZE. This ran afoul of a pre-existing INSIST in ns_client_error() when RRL was in use. the INSIST was based on the assumption that ns_client_error() could never result in a non-error rcode. as that assumption is no longer valid, the INSIST has been removed.
int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, u8 myand, u8 myor) { int ret; u8 tmp; ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, idx); ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, &tmp); tmp &= myand; tmp |= myor; ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, tmp); return ret; }
0
[ "CWE-476" ]
linux
9a5729f68d3a82786aea110b1bfe610be318f80a
179,637,127,172,165,620,000,000,000,000,000,000,000
13
USB: sisusbvga: fix oops in error path of sisusb_probe The pointer used to log a failure of usb_register_dev() must be set before the error is logged. v2: fix that minor is not available before registration Signed-off-by: oliver Neukum <[email protected]> Reported-by: [email protected] Fixes: 7b5cd5fefbe02 ("USB: SisUSB2VGA: Convert printk to dev_* macros") Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
void Http2Session::Settings(const FunctionCallbackInfo<Value>& args) { Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); CHECK(args[0]->IsFunction()); args.GetReturnValue().Set(session->AddSettings(args[0].As<Function>())); }
0
[ "CWE-416" ]
node
a3c33d4ce78f74d1cf1765704af5b427aa3840a6
183,203,151,898,232,780,000,000,000,000,000,000,000
6
http2: update handling of rst_stream with error code NGHTTP2_CANCEL The PR updates the handling of rst_stream frames and adds all streams to the pending list on receiving rst frames with the error code NGHTTP2_CANCEL. The changes will remove dependency on the stream state that may allow bypassing the checks in certain cases. I think a better solution is to delay streams in all cases if rst_stream is received for the cancel events. The rst_stream frames can be received for protocol/connection error as well it should be handled immediately. Adding streams to the pending list in such cases may cause errors. CVE-ID: CVE-2021-22930 Refs: https://nvd.nist.gov/vuln/detail/CVE-2021-22930 PR-URL: https://github.com/nodejs/node/pull/39622 Refs: https://github.com/nodejs/node/pull/39423 Reviewed-By: Matteo Collina <[email protected]> Reviewed-By: James M Snell <[email protected]> Reviewed-By: Beth Griggs <[email protected]>
void md5_final(unsigned char *md, MD5CTX c) { gcry_md_final(c); memcpy(md, gcry_md_read(c, 0), MD5_DIGEST_LEN); gcry_md_close(c); }
0
[ "CWE-310" ]
libssh
e99246246b4061f7e71463f8806b9dcad65affa0
123,766,872,588,939,800,000,000,000,000,000,000,000
5
security: fix for vulnerability CVE-2014-0017 When accepting a new connection, a forking server based on libssh forks and the child process handles the request. The RAND_bytes() function of openssl doesn't reset its state after the fork, but simply adds the current process id (getpid) to the PRNG state, which is not guaranteed to be unique. This can cause several children to end up with same PRNG state which is a security issue.
void Convert::generic_convert(const char * in, int size, CharVector & out) { buf_.clear(); decode_->decode(in, size, buf_); FilterChar * start = buf_.pbegin(); FilterChar * stop = buf_.pend(); if (!filter.empty()) filter.process(start, stop); encode_->encode(start, stop, out); }
0
[ "CWE-125" ]
aspell
de29341638833ba7717bd6b5e6850998454b044b
178,888,736,689,418,900,000,000,000,000,000,000,000
10
Don't allow null-terminated UCS-2/4 strings using the original API. Detect if the encoding is UCS-2/4 and the length is -1 in affected API functions and refuse to convert the string. If the string ends up being converted somehow, abort with an error message in DecodeDirect and ConvDirect. To convert a null terminated string in Decode/ConvDirect, a negative number corresponding to the width of the underlying character type for the encoding is expected; for example, if the encoding is "ucs-2" then a the size is expected to be -2. Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4 strings when a size is provided (found by OSS-Fuzz). Also fix a bug in DecodeDirect that caused DocumentChecker to return the wrong offsets when working with UCS-2/4 strings.
static struct hlist_head *netdev_create_hash(void) { int i; struct hlist_head *hash; hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); if (hash != NULL) for (i = 0; i < NETDEV_HASHENTRIES; i++) INIT_HLIST_HEAD(&hash[i]); return hash; }
0
[ "CWE-399" ]
linux
6ec82562ffc6f297d0de36d65776cff8e5704867
178,360,135,170,068,070,000,000,000,000,000,000,000
12
veth: Dont kfree_skb() after dev_forward_skb() In case of congestion, netif_rx() frees the skb, so we must assume dev_forward_skb() also consume skb. Bug introduced by commit 445409602c092 (veth: move loopback logic to common location) We must change dev_forward_skb() to always consume skb, and veth to not double free it. Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3 Reported-by: Martín Ferrari <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int ext4_dio_get_block_unwritten_async(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret; /* We don't expect handle for direct IO */ WARN_ON_ONCE(ext4_journal_current_handle()); ret = ext4_get_block_trans(inode, iblock, bh_result, EXT4_GET_BLOCKS_IO_CREATE_EXT); /* * When doing DIO using unwritten extents, we need io_end to convert * unwritten extents to written on IO completion. We allocate io_end * once we spot unwritten extent and store it in b_private. Generic * DIO code keeps b_private set and furthermore passes the value to * our completion callback in 'private' argument. */ if (!ret && buffer_unwritten(bh_result)) { if (!bh_result->b_private) { ext4_io_end_t *io_end; io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!io_end) return -ENOMEM; bh_result->b_private = io_end; ext4_set_io_unwritten_flag(inode, io_end); } set_buffer_defer_completion(bh_result); } return ret; }
0
[ "CWE-200" ]
linux
06bd3c36a733ac27962fea7d6f47168841376824
303,803,639,371,503,340,000,000,000,000,000,000,000
33
ext4: fix data exposure after a crash Huang has reported that in his powerfail testing he is seeing stale block contents in some of recently allocated blocks although he mounts ext4 in data=ordered mode. After some investigation I have found out that indeed when delayed allocation is used, we don't add inode to transaction's list of inodes needing flushing before commit. Originally we were doing that but commit f3b59291a69d removed the logic with a flawed argument that it is not needed. The problem is that although for delayed allocated blocks we write their contents immediately after allocating them, there is no guarantee that the IO scheduler or device doesn't reorder things and thus transaction allocating blocks and attaching them to inode can reach stable storage before actual block contents. Actually whenever we attach freshly allocated blocks to inode using a written extent, we should add inode to transaction's ordered inode list to make sure we properly wait for block contents to be written before committing the transaction. So that is what we do in this patch. This also handles other cases where stale data exposure was possible - like filling hole via mmap in data=ordered,nodelalloc mode. The only exception to the above rule are extending direct IO writes where blkdev_direct_IO() waits for IO to complete before increasing i_size and thus stale data exposure is not possible. For now we don't complicate the code with optimizing this special case since the overhead is pretty low. In case this is observed to be a performance problem we can always handle it using a special flag to ext4_map_blocks(). CC: [email protected] Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d Reported-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]> Tested-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
static const char* PE_(bin_pe_get_claimed_authentihash)(RBinPEObj* pe) { if (!pe->spcinfo) { return NULL; } RASN1Binary *digest = pe->spcinfo->messageDigest.digest; return digest? r_hex_bin2strdup (digest->binary, digest->length): NULL; }
0
[ "CWE-400", "CWE-703" ]
radare2
634b886e84a5c568d243e744becc6b3223e089cf
215,042,910,054,242,260,000,000,000,000,000,000,000
7
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash * Reported by lazymio * Reproducer: AAA4AAAAAB4=
gx_default_begin_page(gx_device * dev, gs_gstate * pgs) { return 0; }
0
[]
ghostpdl
c9b362ba908ca4b1d7c72663a33229588012d7d9
170,833,888,943,036,470,000,000,000,000,000,000,000
4
Bug 699670: disallow copying of the epo device The erasepage optimisation (epo) subclass device shouldn't be allowed to be copied because the subclass private data, child and parent pointers end up being shared between the original device and the copy. Add an epo_finish_copydevice which NULLs the three offending pointers, and then communicates to the caller that copying is not allowed. This also exposed a separate issue with the stype for subclasses devices. Devices are, I think, unique in having two stype objects associated with them: the usual one in the memory manager header, and the other stored in the device structere directly. In order for the stype to be correct, we have to use the stype for the incoming device, with the ssize of the original device (ssize should reflect the size of the memory allocation). We correctly did so with the stype in the device structure, but then used the prototype device's stype to patch the memory manager stype - meaning the ssize potentially no longer matched the allocated memory. This caused problems in the garbager where there is an implicit assumption that the size of a single object clump (c_alone == 1) is also the size (+ memory manager overheads) of the single object it contains. The solution is to use the same stype instance to patch the memory manager data as we do in the device structure (with the correct ssize).
static unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; else return TASK_UNMAPPED_BASE + mmap_rnd(); }
0
[ "CWE-284", "CWE-264" ]
linux
4e7c22d447bb6d7e37bfe39ff658486ae78e8d77
102,810,967,728,111,780,000,000,000,000,000,000,000
7
x86, mm/ASLR: Fix stack randomization on 64-bit systems The issue is that the stack for processes is not properly randomized on 64 bit architectures due to an integer overflow. The affected function is randomize_stack_top() in file "fs/binfmt_elf.c": static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned int random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { random_variable = get_random_int() & STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } return PAGE_ALIGN(stack_top) + random_variable; return PAGE_ALIGN(stack_top) - random_variable; } Note that, it declares the "random_variable" variable as "unsigned int". Since the result of the shifting operation between STACK_RND_MASK (which is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64): random_variable <<= PAGE_SHIFT; then the two leftmost bits are dropped when storing the result in the "random_variable". This variable shall be at least 34 bits long to hold the (22+12) result. These two dropped bits have an impact on the entropy of process stack. Concretely, the total stack entropy is reduced by four: from 2^28 to 2^30 (One fourth of expected entropy). This patch restores back the entropy by correcting the types involved in the operations in the functions randomize_stack_top() and stack_maxrandom_size(). The successful fix can be tested with: $ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done 7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack] 7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack] 7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack] 7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack] ... Once corrected, the leading bytes should be between 7ffc and 7fff, rather than always being 7fff. Signed-off-by: Hector Marco-Gisbert <[email protected]> Signed-off-by: Ismael Ripoll <[email protected]> [ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ] Signed-off-by: Kees Cook <[email protected]> Cc: <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Al Viro <[email protected]> Fixes: CVE-2015-1593 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Borislav Petkov <[email protected]>
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, unsigned int len) { struct ip_options_data replyopts; struct ipcm_cookie ipc; struct flowi4 fl4; struct rtable *rt = skb_rtable(skb); struct net *net = sock_net(sk); struct sk_buff *nskb; int err; int oif; if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) return; ipc.addr = daddr; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; if (replyopts.opt.opt.optlen) { ipc.opt = &replyopts.opt; if (replyopts.opt.opt.srr) daddr = replyopts.opt.opt.faddr; } oif = arg->bound_dev_if; if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; flowi4_init_output(&fl4, oif, IP4_REPLY_MARK(net, skb->mark), RT_TOS(arg->tos), RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, ip_reply_arg_flowi_flags(arg), daddr, saddr, tcp_hdr(skb)->source, tcp_hdr(skb)->dest, arg->uid); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return; inet_sk(sk)->tos = arg->tos; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_sndbuf = sysctl_wmem_default; sk->sk_mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if (unlikely(err)) { ip_flush_pending_frames(sk); goto out; } nskb = skb_peek(&sk->sk_write_queue); if (nskb) { if (arg->csumoffset >= 0) *((__sum16 *)skb_transport_header(nskb) + arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk, &fl4); } out: ip_rt_put(rt); }
0
[ "CWE-362" ]
net
85f1bd9a7b5a79d5baa8bf44af19658f7bf77bfa
120,990,692,533,715,640,000,000,000,000,000,000,000
74
udp: consistently apply ufo or fragmentation When iteratively building a UDP datagram with MSG_MORE and that datagram exceeds MTU, consistently choose UFO or fragmentation. Once skb_is_gso, always apply ufo. Conversely, once a datagram is split across multiple skbs, do not consider ufo. Sendpage already maintains the first invariant, only add the second. IPv6 does not have a sendpage implementation to modify. A gso skb must have a partial checksum, do not follow sk_no_check_tx in udp_send_skb. Found by syzkaller. Fixes: e89e9cf539a2 ("[IPv4/IPv6]: UFO Scatter-gather approach") Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void PackLinuxElf64::unpack(OutputFile *fo) { if (e_phoff != sizeof(Elf64_Ehdr)) {// Phdrs not contiguous with Ehdr throwCantUnpack("bad e_phoff"); } unsigned const c_phnum = get_te16(&ehdri.e_phnum); upx_uint64_t old_data_off = 0; upx_uint64_t old_data_len = 0; upx_uint64_t old_dtinit = 0; unsigned is_asl = 0; // is Android Shared Library unsigned szb_info = sizeof(b_info); { upx_uint64_t const e_entry = get_te64(&ehdri.e_entry); if (e_entry < 0x401180 && get_te16(&ehdri.e_machine)==Elf64_Ehdr::EM_386) { /* old style, 8-byte b_info */ szb_info = 2*sizeof(unsigned); } } fi->seek(overlay_offset - sizeof(l_info), SEEK_SET); fi->readx(&linfo, sizeof(linfo)); lsize = get_te16(&linfo.l_lsize); if (UPX_MAGIC_LE32 != get_le32(&linfo.l_magic)) { throwCantUnpack("l_info corrupted"); } p_info hbuf; fi->readx(&hbuf, sizeof(hbuf)); unsigned orig_file_size = get_te32(&hbuf.p_filesize); blocksize = get_te32(&hbuf.p_blocksize); if ((u32_t)file_size > orig_file_size || blocksize > orig_file_size || !mem_size_valid(1, blocksize, OVERHEAD)) throwCantUnpack("p_info corrupted"); ibuf.alloc(blocksize + OVERHEAD); b_info bhdr; memset(&bhdr, 0, sizeof(bhdr)); fi->readx(&bhdr, szb_info); ph.u_len = get_te32(&bhdr.sz_unc); ph.c_len = get_te32(&bhdr.sz_cpr); if (ph.c_len > (unsigned)file_size || ph.c_len == 0 || ph.u_len == 0 || ph.u_len > orig_file_size) throwCantUnpack("b_info corrupted"); ph.filter_cto = bhdr.b_cto8; MemBuffer u(ph.u_len); Elf64_Ehdr *const ehdr = (Elf64_Ehdr *)&u[0]; Elf64_Phdr const *phdr = 0; // Uncompress Ehdr and Phdrs. if (ibuf.getSize() < ph.c_len) throwCompressedDataViolation(); fi->readx(ibuf, ph.c_len); decompress(ibuf, (upx_byte *)ehdr, false); if (ehdr->e_type !=ehdri.e_type || ehdr->e_machine!=ehdri.e_machine || ehdr->e_version!=ehdri.e_version // less strict for EM_PPC64 to workaround earlier bug || !( ehdr->e_flags==ehdri.e_flags || Elf64_Ehdr::EM_PPC64 == get_te16(&ehdri.e_machine)) || ehdr->e_ehsize !=ehdri.e_ehsize // check EI_MAG[0-3], EI_CLASS, EI_DATA, EI_VERSION || memcmp(ehdr->e_ident, ehdri.e_ident, Elf64_Ehdr::EI_OSABI)) { throwCantUnpack("ElfXX_Ehdr corrupted"); } fi->seek(- (off_t) (szb_info + ph.c_len), SEEK_CUR); unsigned const u_phnum = get_te16(&ehdr->e_phnum); unsigned total_in = 0; unsigned total_out = 0; unsigned c_adler = upx_adler32(NULL, 0); unsigned u_adler = upx_adler32(NULL, 0); #define MAX_ELF_HDR 1024 if ((umin64(MAX_ELF_HDR, ph.u_len) - sizeof(Elf64_Ehdr))/sizeof(Elf64_Phdr) < u_phnum) { throwCantUnpack("bad compressed e_phnum"); } #undef MAX_ELF_HDR // Packed ET_EXE has no PT_DYNAMIC. // Packed ET_DYN has original PT_DYNAMIC for info needed by rtld. Elf64_Phdr const *const dynhdr = elf_find_ptype(Elf64_Phdr::PT_DYNAMIC, phdri, c_phnum); bool const is_shlib = !!dynhdr; if (is_shlib) { // Unpack and output the Ehdr and Phdrs for real. // This depends on position within input file fi. unpackExtent(ph.u_len, fo, total_in, total_out, c_adler, u_adler, false, szb_info); // The first PT_LOAD. Part is not compressed (for benefit of rtld.) fi->seek(0, SEEK_SET); fi->readx(ibuf, get_te64(&dynhdr->p_offset) + get_te64(&dynhdr->p_filesz)); overlay_offset -= sizeof(linfo); xct_off = overlay_offset; e_shoff = get_te64(&ehdri.e_shoff); ibuf.subref("bad .e_shoff %#lx for %#lx", e_shoff, sizeof(Elf64_Shdr) * e_shnum); if (e_shoff && e_shnum) { // --android-shlib shdri = (Elf64_Shdr /*const*/ *)ibuf.subref( "bad Shdr table", e_shoff, sizeof(Elf64_Shdr)*e_shnum); upx_uint64_t xct_off2 = get_te64(&shdri->sh_offset); if (e_shoff == xct_off2) { xct_off = e_shoff; } // un-Relocate dynsym (DT_SYMTAB) which is below xct_off dynseg = (Elf64_Dyn const *)ibuf.subref( "bad DYNAMIC", get_te64(&dynhdr->p_offset), get_te64(&dynhdr->p_filesz)); dynstr = (char const *)elf_find_dynamic(Elf64_Dyn::DT_STRTAB); sec_dynsym = elf_find_section_type(Elf64_Shdr::SHT_DYNSYM); if (sec_dynsym) { upx_uint64_t const off_dynsym = get_te64(&sec_dynsym->sh_offset); upx_uint64_t const sz_dynsym = get_te64(&sec_dynsym->sh_size); Elf64_Sym *const sym0 = (Elf64_Sym *)ibuf.subref( "bad dynsym", off_dynsym, sz_dynsym); Elf64_Sym *sym = sym0; for (int j = sz_dynsym / sizeof(Elf64_Sym); --j>=0; ++sym) { upx_uint64_t symval = get_te64(&sym->st_value); unsigned symsec = get_te16(&sym->st_shndx); if (Elf64_Sym::SHN_UNDEF != symsec && Elf64_Sym::SHN_ABS != symsec && xct_off <= symval) { set_te64(&sym->st_value, symval - asl_delta); } if (Elf64_Sym::SHN_ABS == symsec && xct_off <= symval) { adjABS(sym, 0u - asl_delta); } } } } if (fo) { fo->write(ibuf + ph.u_len, xct_off - ph.u_len); } // Search the Phdrs of compressed int n_ptload = 0; phdr = (Elf64_Phdr *) (void *) (1+ (Elf64_Ehdr *)(unsigned char *)ibuf); for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type) && 0!=n_ptload++) { old_data_off = get_te64(&phdr->p_offset); old_data_len = get_te64(&phdr->p_filesz); break; } } total_in = xct_off; total_out = xct_off; ph.u_len = 0; // Position the input for next unpackExtent. fi->seek(sizeof(linfo) + overlay_offset + sizeof(hbuf) + szb_info + ph.c_len, SEEK_SET); // Decompress and unfilter the tail of first PT_LOAD. phdr = (Elf64_Phdr *) (void *) (1+ ehdr); for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { ph.u_len = get_te64(&phdr->p_filesz) - xct_off; break; } } unpackExtent(ph.u_len, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } else { // main executable // Decompress each PT_LOAD. bool first_PF_X = true; phdr = (Elf64_Phdr *) (void *) (1+ ehdr); // uncompressed for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { unsigned const filesz = get_te64(&phdr->p_filesz); unsigned const offset = get_te64(&phdr->p_offset); if (fo) fo->seek(offset, SEEK_SET); if (Elf64_Phdr::PF_X & get_te32(&phdr->p_flags)) { unpackExtent(filesz, fo, total_in, total_out, c_adler, u_adler, first_PF_X, szb_info); first_PF_X = false; } else { unpackExtent(filesz, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } } } } phdr = phdri; load_va = 0; for (unsigned j=0; j < c_phnum; ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { load_va = get_te64(&phdr->p_vaddr); break; } } if (0x1000==get_te64(&phdri[0].p_filesz) // detect C_BASE style && 0==get_te64(&phdri[1].p_offset) && 0==get_te64(&phdri[0].p_offset) && get_te64(&phdri[1].p_filesz) == get_te64(&phdri[1].p_memsz)) { fi->seek(up4(get_te64(&phdr[1].p_memsz)), SEEK_SET); // past the loader } else if (is_shlib || ((unsigned)(get_te64(&ehdri.e_entry) - load_va) + up4(lsize) + ph.getPackHeaderSize() + sizeof(overlay_offset)) < up4(file_size)) { // Loader is not at end; skip past it. funpad4(fi); // MATCH01 unsigned d_info[6]; fi->readx(d_info, sizeof(d_info)); if (0==old_dtinit) { old_dtinit = get_te32(&d_info[2 + (0==d_info[0])]); is_asl = 1u& get_te32(&d_info[0 + (0==d_info[0])]); } fi->seek(lsize - sizeof(d_info), SEEK_CUR); } // The gaps between PT_LOAD and after last PT_LOAD phdr = (Elf64_Phdr *)&u[sizeof(*ehdr)]; upx_uint64_t hi_offset(0); for (unsigned j = 0; j < u_phnum; ++j) { if (PT_LOAD64==phdr[j].p_type && hi_offset < phdr[j].p_offset) hi_offset = phdr[j].p_offset; } for (unsigned j = 0; j < u_phnum; ++j) { unsigned const size = find_LOAD_gap(phdr, j, u_phnum); if (size) { unsigned const where = get_te64(&phdr[j].p_offset) + get_te64(&phdr[j].p_filesz); if (fo) fo->seek(where, SEEK_SET); unpackExtent(size, fo, total_in, total_out, c_adler, u_adler, false, szb_info, (phdr[j].p_offset != hi_offset)); } } // check for end-of-file fi->readx(&bhdr, szb_info); unsigned const sz_unc = ph.u_len = get_te32(&bhdr.sz_unc); if (sz_unc == 0) { // uncompressed size 0 -> EOF // note: magic is always stored le32 unsigned const sz_cpr = get_le32(&bhdr.sz_cpr); if (sz_cpr != UPX_MAGIC_LE32) // sz_cpr must be h->magic throwCompressedDataViolation(); } else { // extra bytes after end? throwCompressedDataViolation(); } if (is_shlib) { // DT_INIT must be restored. // If android_shlib, then the asl_delta relocations must be un-done. int n_ptload = 0; upx_uint64_t load_off = 0; phdr = (Elf64_Phdr *)&u[sizeof(*ehdr)]; for (unsigned j= 0; j < u_phnum; ++j, ++phdr) { if (PT_LOAD64==get_te32(&phdr->p_type) && 0!=n_ptload++) { load_off = get_te64(&phdr->p_offset); load_va = get_te64(&phdr->p_vaddr); fi->seek(old_data_off, SEEK_SET); fi->readx(ibuf, old_data_len); total_in += old_data_len; total_out += old_data_len; Elf64_Phdr const *udynhdr = (Elf64_Phdr *)&u[sizeof(*ehdr)]; for (unsigned j3= 0; j3 < u_phnum; ++j3, ++udynhdr) if (Elf64_Phdr::PT_DYNAMIC==get_te32(&udynhdr->p_type)) { upx_uint64_t dt_pltrelsz(0), dt_jmprel(0); upx_uint64_t dt_relasz(0), dt_rela(0); upx_uint64_t const dyn_len = get_te64(&udynhdr->p_filesz); upx_uint64_t const dyn_off = get_te64(&udynhdr->p_offset); if ((unsigned long)file_size < (dyn_len + dyn_off)) { char msg[50]; snprintf(msg, sizeof(msg), "bad PT_DYNAMIC .p_filesz %#lx", (long unsigned)dyn_len); throwCantUnpack(msg); } if (dyn_off < load_off) { continue; // Oops. Not really is_shlib ? [built by 'rust' ?] } Elf64_Dyn *dyn = (Elf64_Dyn *)((unsigned char *)ibuf + (dyn_off - load_off)); dynseg = dyn; invert_pt_dynamic(dynseg); for (unsigned j2= 0; j2 < dyn_len; ++dyn, j2 += sizeof(*dyn)) { upx_uint64_t const tag = get_te64(&dyn->d_tag); upx_uint64_t val = get_te64(&dyn->d_val); if (is_asl) switch (tag) { case Elf64_Dyn::DT_RELASZ: { dt_relasz = val; } break; case Elf64_Dyn::DT_RELA: { dt_rela = val; } break; case Elf64_Dyn::DT_PLTRELSZ: { dt_pltrelsz = val; } break; case Elf64_Dyn::DT_JMPREL: { dt_jmprel = val; } break; case Elf64_Dyn::DT_PLTGOT: case Elf64_Dyn::DT_PREINIT_ARRAY: case Elf64_Dyn::DT_INIT_ARRAY: case Elf64_Dyn::DT_FINI_ARRAY: case Elf64_Dyn::DT_FINI: { set_te64(&dyn->d_val, val - asl_delta); }; break; } // end switch() if (upx_dt_init == tag) { if (Elf64_Dyn::DT_INIT == tag) { set_te64(&dyn->d_val, old_dtinit); if (!old_dtinit) { // compressor took the slot dyn->d_tag = Elf64_Dyn::DT_NULL; dyn->d_val = 0; } } else if (Elf64_Dyn::DT_INIT_ARRAY == tag || Elf64_Dyn::DT_PREINIT_ARRAY == tag) { if (val < load_va || (long unsigned)file_size < (long unsigned)val) { char msg[50]; snprintf(msg, sizeof(msg), "Bad Dynamic tag %#lx %#lx", (long unsigned)tag, (long unsigned)val); throwCantUnpack(msg); } set_te64(&ibuf[val - load_va], old_dtinit + (is_asl ? asl_delta : 0)); // counter-act unRel64 } } // Modified DT_*.d_val are re-written later from ibuf[] } if (is_asl) { lowmem.alloc(xct_off); fi->seek(0, SEEK_SET); fi->read(lowmem, xct_off); // contains relocation tables if (dt_relasz && dt_rela) { Elf64_Rela *const rela0 = (Elf64_Rela *)lowmem.subref( "bad Rela offset", dt_rela, dt_relasz); unRela64(dt_rela, rela0, dt_relasz, ibuf, load_va, old_dtinit, fo); } if (dt_pltrelsz && dt_jmprel) { // FIXME: overlap w/ DT_REL ? Elf64_Rela *const jmp0 = (Elf64_Rela *)lowmem.subref( "bad Jmprel offset", dt_jmprel, dt_pltrelsz); unRela64(dt_jmprel, jmp0, dt_pltrelsz, ibuf, load_va, old_dtinit, fo); } // Modified relocation tables are re-written by unRela64 } } if (fo) { fo->seek(get_te64(&phdr->p_offset), SEEK_SET); fo->rewrite(ibuf, old_data_len); } } } } // update header with totals ph.c_len = total_in; ph.u_len = total_out; // all bytes must be written if (total_out != orig_file_size) throwEOFException(); // finally test the checksums if (ph.c_adler != c_adler || ph.u_adler != u_adler) throwChecksumError(); }
1
[ "CWE-787" ]
upx
4e2fdb464a885c694408552c31739cb04b77bdcf
144,171,854,376,241,950,000,000,000,000,000,000,000
350
Defend against bad PT_DYNAMIC https://github.com/upx/upx/issues/391 modified: p_lx_elf.cpp modified: p_lx_elf.h
static void pmac_ide_flush(DBDMA_io *io) { MACIOIDEState *m = io->opaque; if (m->aiocb) { blk_drain_all(); } }
0
[ "CWE-399" ]
qemu
3251bdcf1c67427d964517053c3d185b46e618e8
83,496,021,977,771,360,000,000,000,000,000,000,000
8
ide: Correct handling of malformed/short PRDTs This impacts both BMDMA and AHCI HBA interfaces for IDE. Currently, we confuse the difference between a PRDT having "0 bytes" and a PRDT having "0 complete sectors." When we receive an incomplete sector, inconsistent error checking leads to an infinite loop wherein the call succeeds, but it didn't give us enough bytes -- leading us to re-call the DMA chain over and over again. This leads to, in the BMDMA case, leaked memory for short PRDTs, and infinite loops and resource usage in the AHCI case. The .prepare_buf() callback is reworked to return the number of bytes that it successfully prepared. 0 is a valid, non-error answer that means the table was empty and described no bytes. -1 indicates an error. Our current implementation uses the io_buffer in IDEState to ultimately describe the size of a prepared scatter-gather list. Even though the AHCI PRDT/SGList can be as large as 256GiB, the AHCI command header limits transactions to just 4GiB. ATA8-ACS3, however, defines the largest transaction to be an LBA48 command that transfers 65,536 sectors. With a 512 byte sector size, this is just 32MiB. Since our current state structures use the int type to describe the size of the buffer, and this state is migrated as int32, we are limited to describing 2GiB buffer sizes unless we change the migration protocol. For this reason, this patch begins to unify the assertions in the IDE pathways that the scatter-gather list provided by either the AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum, 2GiB. This should be resilient enough unless we need a sector size that exceeds 32KiB. Further, the likelihood of any guest operating system actually attempting to transfer this much data in a single operation is very slim. To this end, the IDEState variables have been updated to more explicitly clarify our maximum supported size. Callers to the prepare_buf callback have been reworked to understand the new return code, and all versions of the prepare_buf callback have been adjusted accordingly. Lastly, the ahci_populate_sglist helper, relied upon by the AHCI implementation of .prepare_buf() as well as the PCI implementation of the callback have had overflow assertions added to help make clear the reasonings behind the various type changes. [Added %d -> %"PRId64" fix John sent because off_pos changed from int to int64_t. --Stefan] Signed-off-by: John Snow <[email protected]> Reviewed-by: Paolo Bonzini <[email protected]> Message-id: [email protected] Signed-off-by: Stefan Hajnoczi <[email protected]>
xstring& MirrorJob::FormatShortStatus(xstring& s) { if(bytes_to_transfer>0 && (!parent_mirror || parent_mirror->bytes_to_transfer!=bytes_to_transfer)) { long long curr_bytes_transferred=GetBytesCount(); if(parent_mirror) curr_bytes_transferred+=bytes_transferred; s.appendf("%s/%s (%d%%)", xhuman(curr_bytes_transferred),xhuman(bytes_to_transfer), percent(curr_bytes_transferred,bytes_to_transfer)); double rate=GetTransferRate(); if(rate>=1) s.append(' ').append(Speedometer::GetStrProper(rate)); } return s; }
0
[ "CWE-20", "CWE-401" ]
lftp
a27e07d90a4608ceaf928b1babb27d4d803e1992
83,827,410,521,553,710,000,000,000,000,000,000,000
15
mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452)
void *idr_find(struct idr *idp, int id) { int n; struct idr_layer *p; p = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; if (id >= (1 << n)) return NULL; BUG_ON(n == 0); while (n > 0 && p) { n -= IDR_BITS; BUG_ON(n != p->layer*IDR_BITS); p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); }
0
[]
linux
2dcb22b346be7b7b7e630a8970d69cf3f1111ec1
138,413,754,458,773,630,000,000,000,000,000,000,000
24
idr: fix backtrack logic in idr_remove_all Currently idr_remove_all will fail with a use after free error if idr::layers is bigger than 2, which on 32 bit systems corresponds to items more than 1024. This is due to stepping back too many levels during backtracking. For simplicity let's assume that IDR_BITS=1 -> we have 2 nodes at each level below the root node and each leaf node stores two IDs. (In reality for 32 bit systems IDR_BITS=5, with 32 nodes at each sub-root level and 32 IDs in each leaf node). The sequence of freeing the nodes at the moment is as follows: layer 1 -> a(7) 2 -> b(3) c(5) 3 -> d(1) e(2) f(4) g(6) Until step 4 things go fine, but then node c is freed, whereas node g should be freed first. Since node c contains the pointer to node g we'll have a use after free error at step 6. How many levels we step back after visiting the leaf nodes is currently determined by the msb of the id we are currently visiting: Step 1. node d with IDs 0,1 is freed, current ID is advanced to 2. msb of the current ID bit 1. This means we need to step back 1 level to node b and take the next sibling, node e. 2-3. node e with IDs 2,3 is freed, current ID is 4, msb is bit 2. This means we need to step back 2 levels to node a, freeing node b on the way. 4-5. node f with IDs 4,5 is freed, current ID is 6, msb is still bit 2. This means we again need to step back 2 levels to node a and free c on the way. 6. We should visit node g, but its pointer is not available as node c was freed. The fix changes how we determine the number of levels to step back. Instead of deducting this merely from the msb of the current ID, we should really check if advancing the ID causes an overflow to a bit position corresponding to a given layer. In the above example overflow from bit 0 to bit 1 should mean stepping back 1 level. Overflow from bit 1 to bit 2 should mean stepping back 2 levels and so on. The fix was tested with IDs up to 1 << 20, which corresponds to 4 layers on 32 bit systems. Signed-off-by: Imre Deak <[email protected]> Reviewed-by: Tejun Heo <[email protected]> Cc: Eric Paris <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: <[email protected]> [2.6.34.1] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void fmtutil_generate_bmpfileheader(deark *c, dbuf *outf, const struct de_bmpinfo *bi, i64 file_size_override) { i64 file_size_to_write; dbuf_write(outf, (const u8*)"BM", 2); if(file_size_override) file_size_to_write = file_size_override; else file_size_to_write = 14 + bi->total_size; dbuf_writeu32le(outf, file_size_to_write); dbuf_write_zeroes(outf, 4); dbuf_writeu32le(outf, 14 + bi->size_of_headers_and_pal); }
0
[ "CWE-369" ]
deark
62acb7753b0e3c0d3ab3c15057b0a65222313334
145,979,398,996,981,950,000,000,000,000,000,000,000
16
pict,macrsrc: Fixed a bug that could cause division by 0 Found by F. Çelik.
static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } priv->snd_timer = timer; timer_setup(&priv->tlist, snd_timer_s_function, 0); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); }
0
[ "CWE-416", "CWE-703" ]
linux
e7af6307a8a54f0b873960b32b6a644f2d0fbd97
246,492,820,859,831,560,000,000,000,000,000,000,000
22
ALSA: timer: Fix incorrectly assigned timer instance The clean up commit 41672c0c24a6 ("ALSA: timer: Simplify error path in snd_timer_open()") unified the error handling code paths with the standard goto, but it introduced a subtle bug: the timer instance is stored in snd_timer_open() incorrectly even if it returns an error. This may eventually lead to UAF, as spotted by fuzzer. The culprit is the snd_timer_open() code checks the SNDRV_TIMER_IFLG_EXCLUSIVE flag with the common variable timeri. This variable is supposed to be the newly created instance, but we (ab-)used it for a temporary check before the actual creation of a timer instance. After that point, there is another check for the max number of instances, and it bails out if over the threshold. Before the refactoring above, it worked fine because the code returned directly from that point. After the refactoring, however, it jumps to the unified error path that stores the timeri variable in return -- even if it returns an error. Unfortunately this stored value is kept in the caller side (snd_timer_user_tselect()) in tu->timeri. This causes inconsistency later, as if the timer was successfully assigned. In this patch, we fix it by not re-using timeri variable but a temporary variable for testing the exclusive connection, so timeri remains NULL at that point. Fixes: 41672c0c24a6 ("ALSA: timer: Simplify error path in snd_timer_open()") Reported-and-tested-by: Tristan Madani <[email protected]> Cc: <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
TEST_F(RouterTest, UpstreamPerTryIdleTimeout) { InSequence s; callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ = std::chrono::milliseconds(3000); // This pattern helps ensure that we're actually invoking the callback. bool filter_state_verified = false; router_.config().upstream_logs_.push_back( std::make_shared<TestAccessLog>([&](const auto& stream_info) { filter_state_verified = stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); })); NiceMock<Http::MockRequestEncoder> encoder; Http::ResponseDecoder* response_decoder = nullptr; Http::ConnectionPool::Callbacks* pool_callbacks; EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) .WillOnce(Invoke( [&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder = &decoder; pool_callbacks = &callbacks; return nullptr; })); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, false); response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); EXPECT_CALL(*response_timeout_, enableTimer(_, _)); Buffer::OwnedImpl data; router_.decodeData(data, true); EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { EXPECT_EQ(host_address_, host->address()); })); per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The per try timeout timer should not be started yet. pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); EXPECT_EQ(1U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); EXPECT_CALL(*per_try_idle_timeout_, disableTimer()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); EXPECT_CALL(*response_timeout_, disableTimer()); EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails("upstream_per_try_idle_timeout")); Http::TestResponseHeaderMapImpl response_headers{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(callbacks_, encodeData(_, true)); per_try_idle_timeout_->invokeCallback(); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_rq_per_try_idle_timeout") .value()); EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); EXPECT_TRUE(filter_state_verified); }
0
[ "CWE-703" ]
envoy
f0bb2219112d8cdb4c4e8b346834f962925362ca
322,330,411,067,391,630,000,000,000,000,000,000,000
73
[1.20] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
Cached_item_field(THD *thd, Field *arg_field): field(arg_field) { field= arg_field; /* TODO: take the memory allocation below out of the constructor. */ buff= (uchar*) thd_calloc(thd, length= field->pack_length()); }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
273,228,795,010,965,330,000,000,000,000,000,000,000
6
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) { __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); }
0
[ "CWE-703" ]
linux
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
338,528,359,135,522,900,000,000,000,000,000,000,000
4
tracing: Fix possible NULL pointer dereferences Currently set_ftrace_pid and set_graph_function files use seq_lseek for their fops. However seq_open() is called only for FMODE_READ in the fops->open() so that if an user tries to seek one of those file when she open it for writing, it sees NULL seq_file and then panic. It can be easily reproduced with following command: $ cd /sys/kernel/debug/tracing $ echo 1234 | sudo tee -a set_ftrace_pid In this example, GNU coreutils' tee opens the file with fopen(, "a") and then the fopen() internally calls lseek(). Link: http://lkml.kernel.org/r/[email protected] Cc: Frederic Weisbecker <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: [email protected] Signed-off-by: Namhyung Kim <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
word_to_hex_npad(char *out, guint16 word) { if (word >= 0x1000) *out++ = low_nibble_of_octet_to_hex((guint8)(word >> 12)); if (word >= 0x0100) *out++ = low_nibble_of_octet_to_hex((guint8)(word >> 8)); if (word >= 0x0010) *out++ = low_nibble_of_octet_to_hex((guint8)(word >> 4)); *out++ = low_nibble_of_octet_to_hex((guint8)(word >> 0)); return out; }
0
[ "CWE-125" ]
wireshark
d5f2657825e63e4126ebd7d13a59f3c6e8a9e4e1
58,271,865,499,488,880,000,000,000,000,000,000,000
11
epan: Limit our bits in decode_bits_in_field. Limit the number of bits we process in decode_bits_in_field, otherwise we'll overrun our buffer. Fixes #16958.
ZEND_API int zend_ts_hash_find(TsHashTable *ht, char *arKey, uint nKeyLength, void **pData) { int retval; begin_read(ht); retval = zend_hash_find(TS_HASH(ht), arKey, nKeyLength, pData); end_read(ht); return retval; }
0
[]
php-src
24125f0f26f3787c006e4a51611ba33ee3b841cb
83,917,154,700,293,140,000,000,000,000,000,000,000
10
Fixed bug #68676 (Explicit Double Free)
static void rtl8xxxu_int_complete(struct urb *urb) { struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context; struct device *dev = &priv->udev->dev; int ret; if (rtl8xxxu_debug & RTL8XXXU_DEBUG_INTERRUPT) dev_dbg(dev, "%s: status %i\n", __func__, urb->status); if (urb->status == 0) { usb_anchor_urb(urb, &priv->int_anchor); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) usb_unanchor_urb(urb); } else { dev_dbg(dev, "%s: Error %i\n", __func__, urb->status); } }
0
[ "CWE-400", "CWE-401" ]
linux
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
180,392,132,807,923,120,000,000,000,000,000,000,000
17
rtl8xxxu: prevent leaking urb In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb should be released. Signed-off-by: Navid Emamdoost <[email protected]> Reviewed-by: Chris Chiu <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
check_anon(kdc_realm_t *kdc_active_realm, krb5_principal client, krb5_principal server) { /* If restrict_anon is set, reject requests from anonymous to principals * other than the local TGT. */ if (kdc_active_realm->realm_restrict_anon && krb5_principal_compare_any_realm(kdc_context, client, krb5_anonymous_principal()) && !krb5_principal_compare(kdc_context, server, tgs_server)) return -1; return 0; }
0
[ "CWE-476" ]
krb5
93b4a6306a0026cf1cc31ac4bd8a49ba5d034ba7
10,406,514,969,587,572,000,000,000,000,000,000,000
12
Fix S4U2Self KDC crash when anon is restricted In validate_as_request(), when enforcing restrict_anonymous_to_tgt, use client.princ instead of request->client; the latter is NULL when validating S4U2Self requests. CVE-2016-3120: In MIT krb5 1.9 and later, an authenticated attacker can cause krb5kdc to dereference a null pointer if the restrict_anonymous_to_tgt option is set to true, by making an S4U2Self request. CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:C ticket: 8458 (new) target_version: 1.14-next target_version: 1.13-next
mrb_obj_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj, mrb_method_flag_t flag) { return mrb_class_instance_method_list(mrb, recur, mrb_class(mrb, obj), 0); }
0
[ "CWE-824" ]
mruby
b64ce17852b180dfeea81cf458660be41a78974d
60,247,556,009,510,120,000,000,000,000,000,000,000
4
Should not call `initialize_copy` for `TT_ICLASS`; fix #4027 Since `TT_ICLASS` is a internal object that should never be revealed to Ruby world.
static struct ucounts *inc_net_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); }
0
[ "CWE-416" ]
linux
21b5944350052d2583e82dd59b19a9ba94a007f0
145,841,657,117,985,330,000,000,000,000,000,000,000
4
net: Fix double free and memory corruption in get_net_ns_by_id() (I can trivially verify that that idr_remove in cleanup_net happens after the network namespace count has dropped to zero --EWB) Function get_net_ns_by_id() does not check for net::count after it has found a peer in netns_ids idr. It may dereference a peer, after its count has already been finaly decremented. This leads to double free and memory corruption: put_net(peer) rtnl_lock() atomic_dec_and_test(&peer->count) [count=0] ... __put_net(peer) get_net_ns_by_id(net, id) spin_lock(&cleanup_list_lock) list_add(&net->cleanup_list, &cleanup_list) spin_unlock(&cleanup_list_lock) queue_work() peer = idr_find(&net->netns_ids, id) | get_net(peer) [count=1] | ... | (use after final put) v ... cleanup_net() ... spin_lock(&cleanup_list_lock) ... list_replace_init(&cleanup_list, ..) ... spin_unlock(&cleanup_list_lock) ... ... ... ... put_net(peer) ... atomic_dec_and_test(&peer->count) [count=0] ... spin_lock(&cleanup_list_lock) ... list_add(&net->cleanup_list, &cleanup_list) ... spin_unlock(&cleanup_list_lock) ... queue_work() ... rtnl_unlock() rtnl_lock() ... for_each_net(tmp) { ... id = __peernet2id(tmp, peer) ... spin_lock_irq(&tmp->nsid_lock) ... idr_remove(&tmp->netns_ids, id) ... ... ... net_drop_ns() ... net_free(peer) ... } ... | v cleanup_net() ... (Second free of peer) Also, put_net() on the right cpu may reorder with left's cpu list_replace_init(&cleanup_list, ..), and then cleanup_list will be corrupted. Since cleanup_net() is executed in worker thread, while put_net(peer) can happen everywhere, there should be enough time for concurrent get_net_ns_by_id() to pick the peer up, and the race does not seem to be unlikely. The patch fixes the problem in standard way. (Also, there is possible problem in peernet2id_alloc(), which requires check for net::count under nsid_lock and maybe_get_net(peer), but in current stable kernel it's used under rtnl_lock() and it has to be safe. Openswitch begun to use peernet2id_alloc(), and possibly it should be fixed too. While this is not in stable kernel yet, so I'll send a separate message to netdev@ later). Cc: Nicolas Dichtel <[email protected]> Signed-off-by: Kirill Tkhai <[email protected]> Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids" Reviewed-by: Andrey Ryabinin <[email protected]> Reviewed-by: "Eric W. Biederman" <[email protected]> Signed-off-by: Eric W. Biederman <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Acked-by: Nicolas Dichtel <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len, struct ath6kl_vif *vif) { struct wmi_tx_status_event *ev; u32 id; if (len < sizeof(*ev)) return -EINVAL; ev = (struct wmi_tx_status_event *) datap; id = le32_to_cpu(ev->id); ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n", id, ev->ack_status); if (wmi->last_mgmt_tx_frame) { cfg80211_mgmt_tx_status(&vif->wdev, id, wmi->last_mgmt_tx_frame, wmi->last_mgmt_tx_frame_len, !!ev->ack_status, GFP_ATOMIC); kfree(wmi->last_mgmt_tx_frame); wmi->last_mgmt_tx_frame = NULL; wmi->last_mgmt_tx_frame_len = 0; } return 0; }
0
[ "CWE-125" ]
linux
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
143,205,287,167,880,670,000,000,000,000,000,000,000
25
ath6kl: add some bounds checking The "ev->traffic_class" and "reply->ac" variables come from the network and they're used as an offset into the wmi->stream_exist_for_ac[] array. Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[] array only has WMM_NUM_AC (4) elements. We need to add a couple bounds checks to prevent array overflows. I also modified one existing check from "if (traffic_class > 3) {" to "if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent. Fixes: bdcd81707973 (" Add ath6kl cleaned up driver") Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
int dev_open(struct net_device *dev) { int ret; /* * Is it already up? */ if (dev->flags & IFF_UP) return 0; /* * Open device */ ret = __dev_open(dev); if (ret < 0) return ret; /* * ... and announce new interface. */ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); call_netdevice_notifiers(NETDEV_UP, dev); return ret; }
0
[ "CWE-399" ]
linux
6ec82562ffc6f297d0de36d65776cff8e5704867
266,791,342,518,687,700,000,000,000,000,000,000,000
25
veth: Dont kfree_skb() after dev_forward_skb() In case of congestion, netif_rx() frees the skb, so we must assume dev_forward_skb() also consume skb. Bug introduced by commit 445409602c092 (veth: move loopback logic to common location) We must change dev_forward_skb() to always consume skb, and veth to not double free it. Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3 Reported-by: Martín Ferrari <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void Item_ref::update_used_tables() { if (!get_depended_from()) (*ref)->update_used_tables(); }
0
[]
server
b000e169562697aa072600695d4f0c0412f94f4f
271,086,298,571,198,480,000,000,000,000,000,000,000
5
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) based on: commit f7316aa0c9a Author: Ajo Robert <[email protected]> Date: Thu Aug 24 17:03:21 2017 +0530 Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { struct vmcb_control_area *control; control = &svm->vmcb->control; control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
241,700,878,772,508,040,000,000,000,000,000,000,000
10
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static int mailimf_second_parse(const char * message, size_t length, size_t * indx, int * result) { uint32_t second; int r; r = mailimf_number_parse(message, length, indx, &second); if (r != MAILIMF_NO_ERROR) return r; * result = second; return MAILIMF_NO_ERROR; }
0
[ "CWE-476" ]
libetpan
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
194,876,378,352,542,950,000,000,000,000,000,000,000
14
Fixed crash #274
static int shred_file(int fd, const char *filename, const struct logInfo *log) { char count[DIGITS]; /* that's a lot of shredding :) */ const char **fullCommand; int id = 0; int status; pid_t pid; if (log->preremove) { message(MESS_DEBUG, "running preremove script\n"); if (runScript(log, filename, NULL, log->preremove)) { message(MESS_ERROR, "error running preremove script " "for %s of '%s'. Not removing this file.\n", filename, log->pattern); /* What ever was supposed to happen did not happen, * therefore do not unlink the file yet. */ return 1; } } if (!(log->flags & LOG_FLAG_SHRED)) { goto unlink_file; } message(MESS_DEBUG, "Using shred to remove the file %s\n", filename); if (log->shred_cycles != 0) { fullCommand = alloca(sizeof(*fullCommand) * 6); } else { fullCommand = alloca(sizeof(*fullCommand) * 4); } fullCommand[id++] = "shred"; fullCommand[id++] = "-u"; if (log->shred_cycles != 0) { fullCommand[id++] = "-n"; snprintf(count, DIGITS - 1, "%d", log->shred_cycles); fullCommand[id++] = count; } fullCommand[id++] = "-"; fullCommand[id++] = NULL; pid = fork(); if (pid == -1) { message(MESS_ERROR, "cannot fork: %s\n", strerror(errno)); return 1; } if (pid == 0) { movefd(fd, STDOUT_FILENO); if (switch_user_permanently(log) != 0) { exit(1); } execvp(fullCommand[0], (void *) fullCommand); exit(1); } wait(&status); if (!WIFEXITED(status) || WEXITSTATUS(status)) { message(MESS_ERROR, "Failed to shred %s, trying unlink\n", filename); return unlink(filename); } /* We have to unlink it after shred anyway, * because it doesn't remove the file itself */ unlink_file: if (unlink(filename) == 0) return 0; if (errno != ENOENT) return 1; /* unlink of log file that no longer exists is not a fatal error */ message(MESS_ERROR, "error unlinking log file %s: %s\n", filename, strerror(errno)); return 0; }
0
[ "CWE-732" ]
logrotate
f46d0bdfc9c53515c13880c501f4d2e1e7dd8b25
334,973,871,758,019,950,000,000,000,000,000,000,000
83
Lock state file to prevent parallel execution Running multiple instances of logrotate on the same set of log-files might have undesirable effects. Add command line option --skip-state-lock to skip locking the state file, for example if locking is unsupported or prohibited. Fixes: https://github.com/logrotate/logrotate/issues/295 Closes: https://github.com/logrotate/logrotate/pull/297
void sock_def_readable(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | EPOLLRDNORM | EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); }
0
[]
net
35306eb23814444bd4021f8a1c3047d3cb0c8b2b
245,634,841,429,603,850,000,000,000,000,000,000,000
12
af_unix: fix races in sk_peer_pid and sk_peer_cred accesses Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred. In order to fix this issue, this patch adds a new spinlock that needs to be used whenever these fields are read or written. Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently reading sk->sk_peer_pid which makes no sense, as this field is only possibly set by AF_UNIX sockets. We will have to clean this in a separate patch. This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback" or implementing what was truly expected. Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jann Horn <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Luiz Augusto von Dentz <[email protected]> Cc: Marcel Holtmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
/* }}} */ static int check_id_allowed(char *id, long what) /* {{{ */ { if (what & PHP_DATE_TIMEZONE_GROUP_AFRICA && strncasecmp(id, "Africa/", 7) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_AMERICA && strncasecmp(id, "America/", 8) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_ANTARCTICA && strncasecmp(id, "Antarctica/", 11) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_ARCTIC && strncasecmp(id, "Arctic/", 7) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_ASIA && strncasecmp(id, "Asia/", 5) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_ATLANTIC && strncasecmp(id, "Atlantic/", 9) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_AUSTRALIA && strncasecmp(id, "Australia/", 10) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_EUROPE && strncasecmp(id, "Europe/", 7) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_INDIAN && strncasecmp(id, "Indian/", 7) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_PACIFIC && strncasecmp(id, "Pacific/", 8) == 0) return 1; if (what & PHP_DATE_TIMEZONE_GROUP_UTC && strncasecmp(id, "UTC", 3) == 0) return 1;
0
[]
php-src
bb057498f7457e8b2eba98332a3bad434de4cf12
8,061,305,989,358,884,000,000,000,000,000,000,000
15
Fix #70277: new DateTimeZone($foo) is ignoring text after null byte The DateTimeZone constructors are not binary safe. They're parsing the timezone as string, but discard the length when calling timezone_initialize(). This patch adds a tz_len parameter and a respective check to timezone_initialize().
BGD_DECLARE(gdIOCtx *) gdNewDynamicCtx(int initialSize, void *data) { /* 2.0.23: Phil Moore: 'return' keyword was missing! */ return gdNewDynamicCtxEx(initialSize, data, 1); }
0
[ "CWE-119", "CWE-787" ]
libgd
53110871935244816bbb9d131da0bccff734bfe9
107,287,038,388,725,670,000,000,000,000,000,000,000
5
Avoid potentially dangerous signed to unsigned conversion We make sure to never pass a negative `rlen` as size to memcpy(). See also <https://bugs.php.net/bug.php?id=73280>. Patch provided by Emmanuel Law.
static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, unsigned int cq_entries, size_t *sq_offset) { struct io_rings *rings; size_t off, sq_array_size; off = struct_size(rings, cqes, cq_entries); if (off == SIZE_MAX) return SIZE_MAX; if (ctx->flags & IORING_SETUP_CQE32) { if (check_shl_overflow(off, 1, &off)) return SIZE_MAX; } #ifdef CONFIG_SMP off = ALIGN(off, SMP_CACHE_BYTES); if (off == 0) return SIZE_MAX; #endif if (sq_offset) *sq_offset = off; sq_array_size = array_size(sizeof(u32), sq_entries); if (sq_array_size == SIZE_MAX) return SIZE_MAX; if (check_add_overflow(off, sq_array_size, &off)) return SIZE_MAX; return off;
0
[ "CWE-416" ]
linux
9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7
24,595,955,240,633,246,000,000,000,000,000,000,000
32
io_uring: reinstate the inflight tracking After some debugging, it was realized that we really do still need the old inflight tracking for any file type that has io_uring_fops assigned. If we don't, then trivial circular references will mean that we never get the ctx cleaned up and hence it'll leak. Just bring back the inflight tracking, which then also means we can eliminate the conditional dropping of the file when task_work is queued. Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking") Signed-off-by: Jens Axboe <[email protected]>
static int ZEND_FASTCALL ZEND_CONCAT_SPEC_CONST_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); concat_function(&EX_T(opline->result.u.var).tmp_var, &opline->op1.u.constant, &opline->op2.u.constant TSRMLS_CC); ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
4,890,796,360,515,558,300,000,000,000,000,000,000
12
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
virDomainMigratePerform3Params(virDomainPtr domain, const char *dconnuri, virTypedParameterPtr params, int nparams, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, unsigned int flags) { virConnectPtr conn; VIR_DOMAIN_DEBUG(domain, "dconnuri=%s, params=%p, nparams=%d, cookiein=%p, " "cookieinlen=%d, cookieout=%p, cookieoutlen=%p, flags=%x", NULLSTR(dconnuri), params, nparams, cookiein, cookieinlen, cookieout, cookieoutlen, flags); VIR_TYPED_PARAMS_DEBUG(params, nparams); virResetLastError(); virCheckDomainReturn(domain, -1); conn = domain->conn; virCheckReadOnlyGoto(conn->flags, error); if (conn->driver->domainMigratePerform3Params) { int ret; ret = conn->driver->domainMigratePerform3Params( domain, dconnuri, params, nparams, cookiein, cookieinlen, cookieout, cookieoutlen, flags); if (ret < 0) goto error; return ret; } virReportUnsupportedError(); error: virDispatchError(domain->conn); return -1; }
0
[ "CWE-254" ]
libvirt
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
18,767,756,126,122,774,000,000,000,000,000,000,000
41
virDomainGetTime: Deny on RO connections We have a policy that if API may end up talking to a guest agent it should require RW connection. We don't obey the rule in virDomainGetTime(). Signed-off-by: Michal Privoznik <[email protected]>
static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; int bv = 0; /* Off */ if (break_state == -1) bv = 1; /* On */ status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv); if (status) dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n", __func__, status); }
0
[ "CWE-191" ]
linux
654b404f2a222f918af9b0cd18ad469d0c941a8e
56,564,342,164,185,660,000,000,000,000,000,000,000
14
USB: serial: io_ti: fix information leak in completion handler Add missing sanity check to the bulk-in completion handler to avoid an integer underflow that can be triggered by a malicious device. This avoids leaking 128 kB of memory content from after the URB transfer buffer to user space. Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable <[email protected]> # 2.6.30 Signed-off-by: Johan Hovold <[email protected]>
cmsHPROFILE CMSEXPORT cmsOpenProfileFromMemTHR(cmsContext ContextID, const void* MemPtr, cmsUInt32Number dwSize) { _cmsICCPROFILE* NewIcc; cmsHPROFILE hEmpty; hEmpty = cmsCreateProfilePlaceholder(ContextID); if (hEmpty == NULL) return NULL; NewIcc = (_cmsICCPROFILE*) hEmpty; // Ok, in this case const void* is casted to void* just because open IO handler // shares read and writting modes. Don't abuse this feature! NewIcc ->IOhandler = cmsOpenIOhandlerFromMem(ContextID, (void*) MemPtr, dwSize, "r"); if (NewIcc ->IOhandler == NULL) goto Error; if (!_cmsReadHeader(NewIcc)) goto Error; return hEmpty; Error: cmsCloseProfile(hEmpty); return NULL; }
0
[]
Little-CMS
d2d902b9a03583ae482c782b2f243f7e5268a47d
278,116,844,672,244,480,000,000,000,000,000,000,000
23
>Changes from Richard Hughes
color_cap_rv_fct (void) { /* By this point, it was 1 (or already -1). */ color_option = -1; /* That's still != 0. */ }
0
[ "CWE-189" ]
grep
8fcf61523644df42e1905c81bed26838e0b04f91
225,621,372,411,605,400,000,000,000,000,000,000,000
5
grep: fix integer-overflow issues in main program * NEWS: Document this. * bootstrap.conf (gnulib_modules): Add inttypes, xstrtoimax. Remove xstrtoumax. * src/main.c: Include <inttypes.h>, for INTMAX_MAX, PRIdMAX. (context_length_arg, prtext, grepbuf, grep, grepfile) (get_nondigit_option, main): Use intmax_t, not int, for line counts. (context_length_arg, main): Silently ceiling line counts to maximum value, since there's no practical difference between doing that and using infinite-precision arithmetic. (out_before, out_after, pending): Now intmax_t, not int. (max_count, outleft): Now intmax_t, not off_t. (prepend_args, prepend_default_options, main): Use size_t, not int, for sizes. (prepend_default_options): Check for int and size_t overflow.
void policies_print(BIO *out, X509_STORE_CTX *ctx) { X509_POLICY_TREE *tree; int explicit_policy; int free_out = 0; if (out == NULL) { out = BIO_new_fp(stderr, BIO_NOCLOSE); free_out = 1; } tree = X509_STORE_CTX_get0_policy_tree(ctx); explicit_policy = X509_STORE_CTX_get_explicit_policy(ctx); BIO_printf(out, "Require explicit Policy: %s\n", explicit_policy ? "True" : "False"); nodes_print(out, "Authority", X509_policy_tree_get0_policies(tree)); nodes_print(out, "User", X509_policy_tree_get0_user_policies(tree)); if (free_out) BIO_free(out); }
0
[]
openssl
a70da5b3ecc3160368529677006801c58cb369db
109,452,453,854,175,750,000,000,000,000,000,000,000
21
New functions to check a hostname email or IP address against a certificate. Add options to s_client, s_server and x509 utilities to print results of checks.
gs_window_clear (GSWindow *window) { g_return_if_fail (GS_IS_WINDOW (window)); clear_widget (GTK_WIDGET (window)); clear_widget (window->priv->drawing_area); }
0
[]
gnome-screensaver
a5f66339be6719c2b8fc478a1d5fc6545297d950
223,910,614,793,435,540,000,000,000,000,000,000,000
7
Ensure keyboard grab and unlock dialog exist after monitor removal gnome-screensaver currently doesn't deal with monitors getting removed properly. If the unlock dialog is on the removed monitor then the unlock dialog and its associated keyboard grab are not moved to an existing monitor when the monitor removal is processed. This means that users can gain access to the locked system by placing the mouse pointer on an external monitor and then disconnect the external monitor. CVE-2010-0414 https://bugzilla.gnome.org/show_bug.cgi?id=609337
void git_path_diriter_free(git_path_diriter *diriter) { if (diriter == NULL) return; git_buf_dispose(&diriter->path_utf8); if (diriter->handle != INVALID_HANDLE_VALUE) { FindClose(diriter->handle); diriter->handle = INVALID_HANDLE_VALUE; } }
0
[ "CWE-20", "CWE-706" ]
libgit2
3f7851eadca36a99627ad78cbe56a40d3776ed01
138,966,689,709,130,760,000,000,000,000,000,000,000
12
Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS A little-known feature of NTFS is that it offers to store metadata in so-called "Alternate Data Streams" (inspired by Apple's "resource forks") that are copied together with the file they are associated with. These Alternate Data Streams can be accessed via `<file name>:<stream name>:<stream type>`. Directories, too, have Alternate Data Streams, and they even have a default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and `abc::$INDEX_ALLOCATION/` are actually equivalent. This is of course another attack vector on the Git directory that we definitely want to prevent. On Windows, we already do this incidentally, by disallowing colons in file/directory names. While it looks as if files'/directories' Alternate Data Streams are not accessible in the Windows Subsystem for Linux, and neither via CIFS/SMB-mounted network shares in Linux, it _is_ possible to access them on SMB-mounted network shares on macOS. Therefore, let's go the extra mile and prevent this particular attack _everywhere_. To keep things simple, let's just disallow *any* Alternate Data Stream of `.git`. This is libgit2's variant of CVE-2019-1352. Signed-off-by: Johannes Schindelin <[email protected]>
void ConnectDialog::on_qaFavoriteAdd_triggered() { ServerItem *si = static_cast<ServerItem *>(qtwServers->currentItem()); if (! si || (si->itType == ServerItem::FavoriteType)) return; si = new ServerItem(si); qtwServers->fixupName(si); qlItems << si; qtwServers->siFavorite->addServerItem(si); qtwServers->setCurrentItem(si); startDns(si); }
0
[ "CWE-59", "CWE-61" ]
mumble
e59ee87abe249f345908c7d568f6879d16bfd648
60,594,618,213,953,400,000,000,000,000,000,000,000
12
FIX(client): Only allow "http"/"https" for URLs in ConnectDialog Our public server list registration script doesn't have an URL scheme whitelist for the website field. Turns out a malicious server can register itself with a dangerous URL in an attempt to attack a user's machine. User interaction is required, as the URL has to be opened by right-clicking on the server entry and clicking on "Open Webpage". This commit introduces a client-side whitelist, which only allows "http" and "https" schemes. We will also implement it in our public list. In future we should probably add a warning QMessageBox informing the user that there's no guarantee the URL is safe (regardless of the scheme). Thanks a lot to https://positive.security for reporting the RCE vulnerability to us privately.
static void t1_scan_keys(PDF pdf) { int i, k; char *p, *q, *r; const key_entry *key; if (t1_prefix("/FontType")) { p = t1_line_array + strlen("FontType") + 1; if ((i = (int) t1_scan_num(p, 0)) != 1) formatted_error("type 1","Type%d fonts unsupported by backend", i); return; } for (key = (const key_entry *) font_key; key - font_key < FONT_KEYS_NUM; key++) { if (key->t1name[0] != '\0' && str_prefix(t1_line_array + 1, key->t1name)) break; } if (key - font_key == FONT_KEYS_NUM) return; p = t1_line_array + strlen(key->t1name) + 1; skip_char(p, ' '); if ((k = (int) (key - font_key)) == FONTNAME_CODE) { if (*p != '/') { remove_eol(p, t1_line_array); formatted_error("type 1","a name expected: '%s'", t1_line_array); } /*tex Skip the slash. */ r = ++p; for (q = t1_buf_array; *p != ' ' && *p != 10; *q++ = *p++); *q = 0; xfree(fd_cur->fontname); fd_cur->fontname = xstrdup(t1_buf_array); /*tex At this moment we cannot call |make_subset_tag| yet, as the encoding is not read; thus we mark the offset of the subset tag and write it later. */ if (is_subsetted(fd_cur->fm)) { t1_fontname_offset = (int) (t1_offset() + (r - t1_line_array)); strcpy(t1_buf_array, p); sprintf(r, "ABCDEF+%s%s", fd_cur->fontname, t1_buf_array); t1_line_ptr = eol(r); } return; } if ((k == STEMV_CODE || k == FONTBBOX1_CODE) && (*p == '[' || *p == '{')) p++; if (k == FONTBBOX1_CODE) { for (i = 0; i < 4; i++, k++) { fd_cur->font_dim[k].val = (int) t1_scan_num(p, &r); fd_cur->font_dim[k].set = true; p = r; } return; } fd_cur->font_dim[k].val = (int) t1_scan_num(p, 0); fd_cur->font_dim[k].set = true; }
0
[ "CWE-119" ]
texlive-source
6ed0077520e2b0da1fd060c7f88db7b2e6068e4c
64,079,664,885,136,750,000,000,000,000,000,000,000
60
writet1 protection against buffer overflow git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751
pixContrastNorm(PIX *pixd, PIX *pixs, l_int32 sx, l_int32 sy, l_int32 mindiff, l_int32 smoothx, l_int32 smoothy) { PIX *pixmin, *pixmax; PROCNAME("pixContrastNorm"); if (!pixs || pixGetDepth(pixs) != 8) return (PIX *)ERROR_PTR("pixs undefined or not 8 bpp", procName, pixd); if (pixd && pixd != pixs) return (PIX *)ERROR_PTR("pixd not null or == pixs", procName, pixd); if (pixGetColormap(pixs)) return (PIX *)ERROR_PTR("pixs is colormapped", procName, pixd); if (sx < 5 || sy < 5) return (PIX *)ERROR_PTR("sx and/or sy less than 5", procName, pixd); if (smoothx < 0 || smoothy < 0) return (PIX *)ERROR_PTR("smooth params less than 0", procName, pixd); if (smoothx > 8 || smoothy > 8) return (PIX *)ERROR_PTR("smooth params exceed 8", procName, pixd); /* Get the min and max pixel values in each tile, and represent * each value as a pixel in pixmin and pixmax, respectively. */ pixMinMaxTiles(pixs, sx, sy, mindiff, smoothx, smoothy, &pixmin, &pixmax); /* For each tile, do a linear expansion of the dynamic range * of pixels so that the min value is mapped to 0 and the * max value is mapped to 255. */ pixd = pixLinearTRCTiled(pixd, pixs, sx, sy, pixmin, pixmax); pixDestroy(&pixmin); pixDestroy(&pixmax); return pixd; }
0
[ "CWE-125" ]
leptonica
3c18c43b6a3f753f0dfff99610d46ad46b8bfac4
86,307,369,302,665,500,000,000,000,000,000,000,000
38
Fixing oss-fuzz issue 22512: Heap-buffer-overflow in rasteropGeneralLow() * Simplified the hole-filling function `
static void get_delta_sk(X509_STORE_CTX *ctx, X509_CRL **dcrl, int *pscore, X509_CRL *base, STACK_OF(X509_CRL) *crls) { X509_CRL *delta; int i; if (!(ctx->param->flags & X509_V_FLAG_USE_DELTAS)) return; if (!((ctx->current_cert->ex_flags | base->flags) & EXFLAG_FRESHEST)) return; for (i = 0; i < sk_X509_CRL_num(crls); i++) { delta = sk_X509_CRL_value(crls, i); if (check_delta_base(delta, base)) { if (check_crl_time(ctx, delta, 0)) *pscore |= CRL_SCORE_TIME_DELTA; X509_CRL_up_ref(delta); *dcrl = delta; return; } } *dcrl = NULL; }
0
[]
openssl
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
115,065,256,287,627,150,000,000,000,000,000,000,000
21
Compat self-signed trust with reject-only aux data When auxiliary data contains only reject entries, continue to trust self-signed objects just as when no auxiliary data is present. This makes it possible to reject specific uses without changing what's accepted (and thus overring the underlying EKU). Added new supported certs and doubled test count from 38 to 76. Reviewed-by: Dr. Stephen Henson <[email protected]>
DEFUN(srchfor, SEARCH SEARCH_FORE WHEREIS, "Search forward") { srch(forwardSearch, "Forward: "); }
0
[ "CWE-59", "CWE-241" ]
w3m
18dcbadf2771cdb0c18509b14e4e73505b242753
224,627,479,529,555,470,000,000,000,000,000,000,000
4
Make temporary directory safely when ~/.w3m is unwritable
void addQueryableBackupPrivileges(PrivilegeVector* privileges) { Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::collStats)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listIndexes)); ActionSet clusterActions; clusterActions << ActionType::getParameter // To check authSchemaVersion << ActionType::listDatabases << ActionType::useUUID; Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forClusterResource(), clusterActions)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forDatabaseName("config"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forDatabaseName("local"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.indexes"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.namespaces"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.js"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.users"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.profile"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::usersAltCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forExactNamespace( AuthorizationManager::usersBackupCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::rolesCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::versionCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forExactNamespace(NamespaceString("config", "settings")), ActionType::find)); }
0
[ "CWE-20" ]
mongo
cbec187266a9f902b3906ae8ccef2bbda0c5b27b
153,206,353,328,749,150,000,000,000,000,000,000,000
70
SERVER-36263 Bypassing operation validation in applyOps should require special privilege
static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){ switch(width){ case 2: put_pixels2_8_c (dst, src, stride, height); break; case 4: put_pixels4_8_c (dst, src, stride, height); break; case 8: put_pixels8_8_c (dst, src, stride, height); break; case 16:put_pixels16_8_c(dst, src, stride, height); break; } }
0
[ "CWE-703", "CWE-189" ]
FFmpeg
454a11a1c9c686c78aa97954306fb63453299760
150,256,261,345,182,920,000,000,000,000,000,000,000
8
avcodec/dsputil: fix signedness in sizeof() comparissions Signed-off-by: Michael Niedermayer <[email protected]>
static void __vsock_remove_bound(struct vsock_sock *vsk) { list_del_init(&vsk->bound_table); sock_put(&vsk->sk); }
0
[ "CWE-667" ]
linux
c518adafa39f37858697ac9309c6cf1805581446
83,283,639,406,459,090,000,000,000,000,000,000,000
5
vsock: fix the race conditions in multi-transport support There are multiple similar bugs implicitly introduced by the commit c0cfa2d8a788fcf4 ("vsock: add multi-transports support") and commit 6a2c0962105ae8ce ("vsock: prevent transport modules unloading"). The bug pattern: [1] vsock_sock.transport pointer is copied to a local variable, [2] lock_sock() is called, [3] the local variable is used. VSOCK multi-transport support introduced the race condition: vsock_sock.transport value may change between [1] and [2]. Let's copy vsock_sock.transport pointer to local variables after the lock_sock() call. Fixes: c0cfa2d8a788fcf4 ("vsock: add multi-transports support") Signed-off-by: Alexander Popov <[email protected]> Reviewed-by: Stefano Garzarella <[email protected]> Reviewed-by: Jorgen Hansen <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
dwarf_select_sections_by_letters (const char *letters) { int result = 0; while (* letters) { const debug_dump_long_opts *entry; for (entry = debug_option_table; entry->letter; entry++) { if (entry->letter == * letters) { if (entry->val == 0) * entry->variable = 0; else * entry->variable |= entry->val; result |= entry->val; break; } } if (entry->letter == 0) warn (_("Unrecognized debug letter option '%c'\n"), * letters); letters ++; } /* The --debug-dump=frames-interp option also enables the --debug-dump=frames option. */ if (do_debug_frames_interp) do_debug_frames = 1; return result; }
0
[ "CWE-703" ]
binutils-gdb
695c6dfe7e85006b98c8b746f3fd5f913c94ebff
38,374,485,137,319,693,000,000,000,000,000,000,000
34
PR29370, infinite loop in display_debug_abbrev The PR29370 testcase is a fuzzed object file with multiple .trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev sections are not a violation of the DWARF standard. The DWARF5 standard even gives an example of multiple .debug_abbrev sections contained in groups. Caching and lookup of processed abbrevs thus needs to be done by section and offset rather than base and offset. (Why base anyway?) Or, since section contents are kept, by a pointer into the contents. PR 29370 * dwarf.c (struct abbrev_list): Replace abbrev_base and abbrev_offset with raw field. (find_abbrev_list_by_abbrev_offset): Delete. (find_abbrev_list_by_raw_abbrev): New function. (process_abbrev_set): Set list->raw and list->next. (find_and_process_abbrev_set): Replace abbrev list lookup with new function. Don't set list abbrev_base, abbrev_offset or next.
really_add_connection (NMConnection *connection, gboolean canceled, GError *error, gpointer user_data) { ActionInfo *info = user_data; NMConnectionEditor *editor; GError *editor_error = NULL; const char *message = _("The connection editor dialog could not be initialized due to an unknown error."); gboolean can_modify; g_return_if_fail (info != NULL); if (canceled) return; if (!connection) { error_dialog (info->list_window, _("Could not create new connection"), "%s", (error && error->message) ? error->message : message); return; } can_modify = nm_dbus_settings_system_get_can_modify (info->list->system_settings); editor = nm_connection_editor_new (connection, can_modify, &error); if (!editor) { error_dialog (info->list_window, _("Could not edit new connection"), "%s", (editor_error && editor_error->message) ? editor_error->message : message); g_clear_error (&editor_error); return; } g_signal_connect (G_OBJECT (editor), "done", G_CALLBACK (add_done_cb), info); g_hash_table_insert (info->list->editors, connection, editor); nm_connection_editor_run (editor); }
0
[ "CWE-200" ]
network-manager-applet
8627880e07c8345f69ed639325280c7f62a8f894
317,389,247,335,587,000,000,000,000,000,000,000,000
40
editor: prevent any registration of objects on the system bus D-Bus access-control is name-based; so requests for a specific name are allowed/denied based on the rules in /etc/dbus-1/system.d. But apparently apps still get a non-named service on the bus, and if we register *any* object even though we don't have a named service, dbus and dbus-glib will happily proxy signals. Since the connection editor shouldn't ever expose anything having to do with connections on any bus, make sure that's the case.
unzzip_show_list (int argc, char ** argv) { return unzzip_list(argc, argv, 0); }
0
[ "CWE-772" ]
zziplib
83a2da55922f67e07f22048ac9671a44cc0d35c4
155,604,367,541,314,760,000,000,000,000,000,000,000
4
ensure disk_close to avoid mem-leak #40
dns_zonemgr_settransfersin(dns_zonemgr_t *zmgr, uint32_t value) { REQUIRE(DNS_ZONEMGR_VALID(zmgr)); zmgr->transfersin = value; }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
231,332,608,492,008,200,000,000,000,000,000,000,000
5
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return do_mknodat(AT_FDCWD, filename, mode, dev); }
0
[ "CWE-416", "CWE-284" ]
linux
d0cb50185ae942b03c4327be322055d622dc79f6
233,334,257,936,451,820,000,000,000,000,000,000,000
4
do_last(): fetch directory ->i_mode and ->i_uid before it's too late may_create_in_sticky() call is done when we already have dropped the reference to dir. Fixes: 30aba6656f61e (namei: allow restricted O_CREAT of FIFOs and regular files) Signed-off-by: Al Viro <[email protected]>
exitcmd(int argc, char **argv) { if (stoppedjobs()) return 0; if (argc > 1) savestatus = number(argv[1]); exraise(EXEXIT); /* NOTREACHED */ }
0
[]
dash
29d6f2148f10213de4e904d515e792d2cf8c968e
183,325,499,555,626,170,000,000,000,000,000,000,000
11
eval: Check nflag in evaltree instead of cmdloop This patch moves the nflag check from cmdloop into evaltree. This is so that nflag will be in force even if we enter the shell via a path other than cmdloop, e.g., through sh -c. Reported-by: Joey Hess <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
void ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_) { active_request_->request_url_.append(data, length); } }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
301,605,998,684,131,170,000,000,000,000,000,000,000
5
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
R_API bool r_io_bank_map_depriorize(RIO *io, const ut32 bankid, const ut32 mapid) { RIOBank *bank = r_io_bank_get (io, bankid); RIOMap *map = r_io_map_get (io, mapid); r_return_val_if_fail (bank && map, false); RListIter *iter; RIOMapRef *mapref = NULL; r_list_foreach (bank->maprefs, iter, mapref) { if (mapref->id == mapid) { goto found; } } // map is not referenced by this bank return false; found: if (iter == bank->maprefs->head) { // map is already lowest priority return true; } bank->last_used = NULL; _delete_submaps_from_bank_tree (io, bank, iter, map); r_list_delete (bank->maprefs, iter); return r_io_bank_map_add_bottom (io, bankid, mapid); }
0
[ "CWE-416" ]
radare2
b5cb90b28ec71fda3504da04e3cc94a362807f5e
54,142,613,053,745,440,000,000,000,000,000,000,000
23
Prefer memleak over usaf in io.bank's rbtree bug ##crash * That's a workaround, proper fix will come later * Reproducer: bins/fuzzed/iobank-crash * Reported by Akyne Choi via huntr.dev
static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start) { struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len); start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; return nla_nest_end(skb, nest); }
0
[]
linux-2.6
1045b03e07d85f3545118510a587035536030c1c
254,914,989,881,275,680,000,000,000,000,000,000,000
7
netlink: fix overrun in attribute iteration kmemcheck reported this: kmemcheck: Caught 16-bit read from uninitialized memory (f6c1ba30) 0500110001508abf050010000500000002017300140000006f72672e66726565 i i i i i i i i i i i i i u u u u u u u u u u u u u u u u u u u ^ Pid: 3462, comm: wpa_supplicant Not tainted (2.6.27-rc3-00054-g6397ab9-dirty #13) EIP: 0060:[<c05de64a>] EFLAGS: 00010296 CPU: 0 EIP is at nla_parse+0x5a/0xf0 EAX: 00000008 EBX: fffffffd ECX: c06f16c0 EDX: 00000005 ESI: 00000010 EDI: f6c1ba30 EBP: f6367c6c ESP: c0a11e88 DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 CR0: 8005003b CR2: f781cc84 CR3: 3632f000 CR4: 000006d0 DR0: c0ead9bc DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff4ff0 DR7: 00000400 [<c05d4b23>] rtnl_setlink+0x63/0x130 [<c05d5f75>] rtnetlink_rcv_msg+0x165/0x200 [<c05ddf66>] netlink_rcv_skb+0x76/0xa0 [<c05d5dfe>] rtnetlink_rcv+0x1e/0x30 [<c05dda21>] netlink_unicast+0x281/0x290 [<c05ddbe9>] netlink_sendmsg+0x1b9/0x2b0 [<c05beef2>] sock_sendmsg+0xd2/0x100 [<c05bf945>] sys_sendto+0xa5/0xd0 [<c05bf9a6>] sys_send+0x36/0x40 [<c05c03d6>] sys_socketcall+0x1e6/0x2c0 [<c020353b>] sysenter_do_call+0x12/0x3f [<ffffffff>] 0xffffffff This is the line in nla_ok(): /** * nla_ok - check if the netlink attribute fits into the remaining bytes * @nla: netlink attribute * @remaining: number of bytes remaining in attribute stream */ static inline int nla_ok(const struct nlattr *nla, int remaining) { return remaining >= sizeof(*nla) && nla->nla_len >= sizeof(*nla) && nla->nla_len <= remaining; } It turns out that remaining can become negative due to alignment in nla_next(). But GCC promotes "remaining" to unsigned in the test against sizeof(*nla) above. Therefore the test succeeds, and the nla_for_each_attr() may access memory outside the received buffer. A short example illustrating this point is here: #include <stdio.h> main(void) { printf("%d\n", -1 >= sizeof(int)); } ...which prints "1". This patch adds a cast in front of the sizeof so that GCC will make a signed comparison and fix the illegal memory dereference. With the patch applied, there is no kmemcheck report. Signed-off-by: Vegard Nossum <[email protected]> Acked-by: Thomas Graf <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); }
0
[ "CWE-20", "CWE-617" ]
linux
3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb
115,535,143,794,254,640,000,000,000,000,000,000,000
16
KVM: VMX: Do not BUG() on out-of-bounds guest IRQ The value of the guest_irq argument to vmx_update_pi_irte() is ultimately coming from a KVM_IRQFD API call. Do not BUG() in vmx_update_pi_irte() if the value is out-of bounds. (Especially, since KVM as a whole seems to hang after that.) Instead, print a message only once if we find that we don't have a route for a certain IRQ (which can be out-of-bounds or within the array). This fixes CVE-2017-1000252. Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts") Signed-off-by: Jan H. Schönherr <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
MaybeLocal<Value> GetPeerCert( Environment* env, const SSLPointer& ssl, bool abbreviated, bool is_server) { ClearErrorOnReturn clear_error_on_return; Local<Object> result; MaybeLocal<Object> maybe_cert; // NOTE: This is because of the odd OpenSSL behavior. On client `cert_chain` // contains the `peer_certificate`, but on server it doesn't. X509Pointer cert(is_server ? SSL_get_peer_certificate(ssl.get()) : nullptr); STACK_OF(X509)* ssl_certs = SSL_get_peer_cert_chain(ssl.get()); if (!cert && (ssl_certs == nullptr || sk_X509_num(ssl_certs) == 0)) return Undefined(env->isolate()); // Short result requested. if (abbreviated) { maybe_cert = X509ToObject(env, cert ? cert.get() : sk_X509_value(ssl_certs, 0)); return maybe_cert.ToLocal(&result) ? result : MaybeLocal<Value>(); } StackOfX509 peer_certs = CloneSSLCerts(std::move(cert), ssl_certs); if (peer_certs == nullptr) return Undefined(env->isolate()); // First and main certificate. X509Pointer first_cert(sk_X509_value(peer_certs.get(), 0)); CHECK(first_cert); maybe_cert = X509ToObject(env, first_cert.release()); if (!maybe_cert.ToLocal(&result)) return MaybeLocal<Value>(); Local<Object> issuer_chain; MaybeLocal<Object> maybe_issuer_chain; maybe_issuer_chain = AddIssuerChainToObject( &cert, result, std::move(peer_certs), env); if (!maybe_issuer_chain.ToLocal(&issuer_chain)) return MaybeLocal<Value>(); maybe_issuer_chain = GetLastIssuedCert( &cert, ssl, issuer_chain, env); issuer_chain.Clear(); if (!maybe_issuer_chain.ToLocal(&issuer_chain)) return MaybeLocal<Value>(); // Last certificate should be self-signed. if (X509_check_issued(cert.get(), cert.get()) == X509_V_OK && !Set<Object>(env->context(), issuer_chain, env->issuercert_string(), issuer_chain)) { return MaybeLocal<Value>(); } return result; }
0
[ "CWE-295" ]
node
466e5415a2b7b3574ab5403acb87e89a94a980d1
266,974,637,785,737,540,000,000,000,000,000,000,000
68
crypto,tls: implement safe x509 GeneralName format This change introduces JSON-compatible escaping rules for strings that include X.509 GeneralName components (see RFC 5280). This non-standard format avoids ambiguities and prevents injection attacks that could previously lead to X.509 certificates being accepted even though they were not valid for the target hostname. These changes affect the format of subject alternative names and the format of authority information access. The checkServerIdentity function has been modified to safely handle the new format, eliminating the possibility of injecting subject alternative names into the verification logic. Because each subject alternative name is only encoded as a JSON string literal if necessary for security purposes, this change will only be visible in rare cases. This addresses CVE-2021-44532. CVE-ID: CVE-2021-44532 PR-URL: https://github.com/nodejs-private/node-private/pull/300 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Rich Trott <[email protected]>
onigenc_init(void) { return 0; }
0
[ "CWE-125" ]
Onigmo
d4cf99d30bd5f6a8a4ababd0b9d7b06f3a479a24
309,167,282,197,512,230,000,000,000,000,000,000,000
4
Fix out-of-bounds read in parse_char_class() (Close #139) /[\x{111111}]/ causes out-of-bounds read when encoding is a single byte encoding. \x{111111} is an invalid codepoint for a single byte encoding. Check if it is a valid codepoint.
static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int(bs, 1); vcl_hrd_parameters_present_flag = gf_bs_read_int(bs, 1); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int(bs, 1); if (sub_pic_hrd_params_present_flag) { /*tick_divisor_minus2 = */gf_bs_read_int(bs, 8); /*du_cpb_removal_delay_increment_length_minus1 = */gf_bs_read_int(bs, 5); /*sub_pic_cpb_params_in_pic_timing_sei_flag = */gf_bs_read_int(bs, 1); /*dpb_output_delay_du_length_minus1 = */gf_bs_read_int(bs, 5); } /*bit_rate_scale = */gf_bs_read_int(bs, 4); /*cpb_size_scale = */gf_bs_read_int(bs, 4); if (sub_pic_hrd_params_present_flag) { /*cpb_size_du_scale = */gf_bs_read_int(bs, 4); } /*initial_cpb_removal_delay_length_minus1 = */gf_bs_read_int(bs, 5); /*au_cpb_removal_delay_length_minus1 = */gf_bs_read_int(bs, 5); /*dpb_output_delay_length_minus1 = */gf_bs_read_int(bs, 5); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int(bs, 1); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int(bs, 1); } if (fixed_pic_rate_within_cvs_flag_i) /*elemental_duration_in_tc_minus1[i] = */bs_get_ue(bs); else low_delay_hrd_flag_i = gf_bs_read_int(bs, 1); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = bs_get_ue(bs); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag); } } }
0
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
245,083,979,269,029,330,000,000,000,000,000,000,000
50
fix some exploitable overflows (#994, #997)
static int rsa_prepare_blinding( rsa_context *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; if( ctx->Vf.p != NULL ) { /* We already have blinding values, just update them by squaring */ MPI_CHK( mpi_mul_mpi( &ctx->Vi, &ctx->Vi, &ctx->Vi ) ); MPI_CHK( mpi_mod_mpi( &ctx->Vi, &ctx->Vi, &ctx->N ) ); MPI_CHK( mpi_mul_mpi( &ctx->Vf, &ctx->Vf, &ctx->Vf ) ); MPI_CHK( mpi_mod_mpi( &ctx->Vf, &ctx->Vf, &ctx->N ) ); return( 0 ); } /* Unblinding value: Vf = random number */ MPI_CHK( mpi_fill_random( &ctx->Vf, ctx->len - 1, f_rng, p_rng ) ); /* Mathematically speaking, the algorithm should check Vf * against 0, P and Q (Vf should be relatively prime to N, and 0 < Vf < N), * so that Vf^-1 exists. */ /* Blinding value: Vi = Vf^(-e) mod N */ MPI_CHK( mpi_inv_mod( &ctx->Vi, &ctx->Vf, &ctx->N ) ); MPI_CHK( mpi_exp_mod( &ctx->Vi, &ctx->Vi, &ctx->E, &ctx->N, &ctx->RN ) ); cleanup: return( ret ); }
1
[ "CWE-310" ]
polarssl
6b06502c4b19ce40a88faca3528b9f3f0c87a755
227,778,554,332,612,530,000,000,000,000,000,000,000
31
Changed RSA blinding to a slower but thread-safe version
GF_Err fdsa_AddBox(GF_Box *s, GF_Box *a) { GF_HintSample *ptr = (GF_HintSample *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FDPA: gf_list_add(ptr->packetTable, a); break; case GF_ISOM_BOX_TYPE_EXTR: if (ptr->extra_data) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->extra_data = (GF_ExtraDataBox*)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK;
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
58,297,940,144,119,920,000,000,000,000,000,000,000
16
prevent dref memleak on invalid input (#1183)
int TS_TST_INFO_set_serial(TS_TST_INFO *a, ASN1_INTEGER *serial) { ASN1_INTEGER *new_serial; if (a->serial == serial) return 1; new_serial = ASN1_INTEGER_dup(serial); if (new_serial == NULL) { TSerr(TS_F_TS_TST_INFO_SET_SERIAL, ERR_R_MALLOC_FAILURE); return 0; } ASN1_INTEGER_free(a->serial); a->serial = new_serial; return 1; }
0
[]
openssl
c7235be6e36c4bef84594aa3b2f0561db84b63d8
298,787,179,396,644,900,000,000,000,000,000,000,000
16
RFC 3161 compliant time stamp request creation, response generation and response verification. Submitted by: Zoltan Glozik <[email protected]> Reviewed by: Ulf Moeller
static void schedule_orphans_remove(struct perf_event_context *ctx) { if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) return; if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { get_ctx(ctx); ctx->orphans_remove_sched = true; } }
0
[ "CWE-284", "CWE-264" ]
linux
f63a8daa5812afef4f06c962351687e1ff9ccb2b
144,326,716,852,686,600,000,000,000,000,000,000,000
10
perf: Fix event->ctx locking There have been a few reported issues wrt. the lack of locking around changing event->ctx. This patch tries to address those. It avoids the whole rwsem thing; and while it appears to work, please give it some thought in review. What I did fail at is sensible runtime checks on the use of event->ctx, the RCU use makes it very hard. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Linus Torvalds <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>