func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static Image *ReadOnePNGImage(MngInfo *mng_info,
const ImageInfo *image_info, ExceptionInfo *exception)
{
/* Read one PNG image */
/* To do: Read the tEXt/Creation Time chunk into the date:create property */
Image
*image;
char
im_vers[32],
libpng_runv[32],
libpng_vers[32],
zlib_runv[32],
zlib_vers[32];
int
intent, /* "PNG Rendering intent", which is ICC intent + 1 */
num_raw_profiles,
num_text,
num_text_total,
num_passes,
number_colors,
pass,
ping_bit_depth,
ping_color_type,
ping_file_depth,
ping_interlace_method,
ping_compression_method,
ping_filter_method,
ping_num_trans,
unit_type;
double
file_gamma;
LongPixelPacket
transparent_color;
MagickBooleanType
logging,
ping_found_cHRM,
ping_found_gAMA,
ping_found_iCCP,
ping_found_sRGB,
ping_found_sRGB_cHRM,
ping_preserve_iCCP,
status;
MemoryInfo
*volatile pixel_info;
png_bytep
ping_trans_alpha;
png_color_16p
ping_background,
ping_trans_color;
png_info
*end_info,
*ping_info;
png_struct
*ping;
png_textp
text;
png_uint_32
ping_height,
ping_width,
x_resolution,
y_resolution;
ssize_t
ping_rowbytes,
y;
register unsigned char
*p;
register IndexPacket
*indexes;
register ssize_t
i,
x;
register PixelPacket
*q;
size_t
length,
row_offset;
Quantum
*volatile quantum_scanline;
QuantumInfo
*volatile quantum_info;
ssize_t
j;
unsigned char
*ping_pixels;
#ifdef PNG_UNKNOWN_CHUNKS_SUPPORTED
png_byte unused_chunks[]=
{
104, 73, 83, 84, (png_byte) '\0', /* hIST */
105, 84, 88, 116, (png_byte) '\0', /* iTXt */
112, 67, 65, 76, (png_byte) '\0', /* pCAL */
115, 67, 65, 76, (png_byte) '\0', /* sCAL */
115, 80, 76, 84, (png_byte) '\0', /* sPLT */
#if !defined(PNG_tIME_SUPPORTED)
116, 73, 77, 69, (png_byte) '\0', /* tIME */
#endif
#ifdef PNG_APNG_SUPPORTED /* libpng was built with APNG patch; */
/* ignore the APNG chunks */
97, 99, 84, 76, (png_byte) '\0', /* acTL */
102, 99, 84, 76, (png_byte) '\0', /* fcTL */
102, 100, 65, 84, (png_byte) '\0', /* fdAT */
#endif
};
#endif
/* Define these outside of the following "if logging()" block so they will
* show in debuggers.
*/
*im_vers='\0';
(void) ConcatenateMagickString(im_vers,
MagickLibVersionText,32);
(void) ConcatenateMagickString(im_vers,
MagickLibAddendum,32);
*libpng_vers='\0';
(void) ConcatenateMagickString(libpng_vers,
PNG_LIBPNG_VER_STRING,32);
*libpng_runv='\0';
(void) ConcatenateMagickString(libpng_runv,
png_get_libpng_ver(NULL),32);
*zlib_vers='\0';
(void) ConcatenateMagickString(zlib_vers,
ZLIB_VERSION,32);
*zlib_runv='\0';
(void) ConcatenateMagickString(zlib_runv,
zlib_version,32);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),
" Enter ReadOnePNGImage()\n"
" IM version = %s\n"
" Libpng version = %s",
im_vers, libpng_vers);
if (logging != MagickFalse)
{
if (LocaleCompare(libpng_vers,libpng_runv) != 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" running with %s", libpng_runv);
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Zlib version = %s", zlib_vers);
if (LocaleCompare(zlib_vers,zlib_runv) != 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" running with %s", zlib_runv);
}
}
#if (PNG_LIBPNG_VER < 10200)
if (image_info->verbose)
printf("Your PNG library (libpng-%s) is rather old.\n",
PNG_LIBPNG_VER_STRING);
#endif
#if (PNG_LIBPNG_VER >= 10400)
# ifndef PNG_TRANSFORM_GRAY_TO_RGB /* Added at libpng-1.4.0beta67 */
if (image_info->verbose)
{
printf("Your PNG library (libpng-%s) is an old beta version.\n",
PNG_LIBPNG_VER_STRING);
printf("Please update it.\n");
}
# endif
#endif
image=mng_info->image;
if (logging != MagickFalse)
{
(void)LogMagickEvent(CoderEvent,GetMagickModule(),
" Before reading:\n"
" image->matte=%d\n"
" image->rendering_intent=%d\n"
" image->colorspace=%d\n"
" image->gamma=%f",
(int) image->matte, (int) image->rendering_intent,
(int) image->colorspace, image->gamma);
}
intent=Magick_RenderingIntent_to_PNG_RenderingIntent(image->rendering_intent);
/* Set to an out-of-range color unless tRNS chunk is present */
transparent_color.red=65537;
transparent_color.green=65537;
transparent_color.blue=65537;
transparent_color.opacity=65537;
number_colors=0;
num_text = 0;
num_text_total = 0;
num_raw_profiles = 0;
ping_found_cHRM = MagickFalse;
ping_found_gAMA = MagickFalse;
ping_found_iCCP = MagickFalse;
ping_found_sRGB = MagickFalse;
ping_found_sRGB_cHRM = MagickFalse;
ping_preserve_iCCP = MagickFalse;
/*
Allocate the PNG structures
*/
#ifdef PNG_USER_MEM_SUPPORTED
ping=png_create_read_struct_2(PNG_LIBPNG_VER_STRING, image,
MagickPNGErrorHandler,MagickPNGWarningHandler, NULL,
(png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free);
#else
ping=png_create_read_struct(PNG_LIBPNG_VER_STRING,image,
MagickPNGErrorHandler,MagickPNGWarningHandler);
#endif
if (ping == (png_struct *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
ping_info=png_create_info_struct(ping);
if (ping_info == (png_info *) NULL)
{
png_destroy_read_struct(&ping,(png_info **) NULL,(png_info **) NULL);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
end_info=png_create_info_struct(ping);
if (end_info == (png_info *) NULL)
{
png_destroy_read_struct(&ping,&ping_info,(png_info **) NULL);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
pixel_info=(MemoryInfo *) NULL;
quantum_scanline = (Quantum *) NULL;
quantum_info = (QuantumInfo *) NULL;
if (setjmp(png_jmpbuf(ping)))
{
/*
PNG image is corrupt.
*/
png_destroy_read_struct(&ping,&ping_info,&end_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline);
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit ReadOnePNGImage() with error.");
if (image != (Image *) NULL)
image=DestroyImageList(image);
return(image);
}
/* { For navigation to end of SETJMP-protected block. Within this
* block, use png_error() instead of Throwing an Exception, to ensure
* that libpng is able to clean up, and that the semaphore is unlocked.
*/
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
LockSemaphoreInfo(ping_semaphore);
#endif
#ifdef PNG_BENIGN_ERRORS_SUPPORTED
/* Allow benign errors */
png_set_benign_errors(ping, 1);
#endif
#ifdef PNG_SET_USER_LIMITS_SUPPORTED
{
const char
*option;
/* Reject images with too many rows or columns */
png_set_user_limits(ping,(png_uint_32) MagickMin(PNG_UINT_31_MAX,
GetMagickResourceLimit(WidthResource)),(png_uint_32)
MagickMin(PNG_UINT_31_MAX,GetMagickResourceLimit(HeightResource)));
#if (PNG_LIBPNG_VER >= 10400)
option=GetImageOption(image_info,"png:chunk-cache-max");
if (option != (const char *) NULL)
png_set_chunk_cache_max(ping,(png_uint_32) MagickMin(PNG_UINT_32_MAX,
StringToLong(option)));
else
png_set_chunk_cache_max(ping,32767);
#endif
#if (PNG_LIBPNG_VER >= 10401)
option=GetImageOption(image_info,"png:chunk-malloc-max");
if (option != (const char *) NULL)
png_set_chunk_malloc_max(ping,(png_alloc_size_t) MagickMin((ssize_t)
PNG_SIZE_MAX,StringToLong(option)));
else
png_set_chunk_malloc_max(ping,(png_alloc_size_t) GetMaxMemoryRequest());
#endif
}
#endif /* PNG_SET_USER_LIMITS_SUPPORTED */
/*
Prepare PNG for reading.
*/
mng_info->image_found++;
png_set_sig_bytes(ping,8);
if (LocaleCompare(image_info->magick,"MNG") == 0)
{
#if defined(PNG_MNG_FEATURES_SUPPORTED)
(void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES);
png_set_read_fn(ping,image,png_get_data);
#else
#if defined(PNG_READ_EMPTY_PLTE_SUPPORTED)
png_permit_empty_plte(ping,MagickTrue);
png_set_read_fn(ping,image,png_get_data);
#else
mng_info->image=image;
mng_info->bytes_in_read_buffer=0;
mng_info->found_empty_plte=MagickFalse;
mng_info->have_saved_bkgd_index=MagickFalse;
png_set_read_fn(ping,mng_info,mng_get_data);
#endif
#endif
}
else
png_set_read_fn(ping,image,png_get_data);
{
const char
*value;
value=GetImageOption(image_info,"profile:skip");
if (IsOptionMember("ICC",value) == MagickFalse)
{
value=GetImageOption(image_info,"png:preserve-iCCP");
if (value == NULL)
value=GetImageArtifact(image,"png:preserve-iCCP");
if (value != NULL)
ping_preserve_iCCP=MagickTrue;
#if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED)
/* Don't let libpng check for ICC/sRGB profile because we're going
* to do that anyway. This feature was added at libpng-1.6.12.
* If logging, go ahead and check and issue a warning as appropriate.
*/
if (logging == MagickFalse)
png_set_option(ping, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
#endif
}
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
else
{
/* Ignore the iCCP chunk */
png_set_keep_unknown_chunks(ping, 1, (png_bytep)mng_iCCP, 1);
}
#endif
}
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
/* Ignore unused chunks and all unknown chunks except for caNv
and vpAg */
# if PNG_LIBPNG_VER < 10700 /* Avoid libpng16 warning */
png_set_keep_unknown_chunks(ping, 2, (png_bytep)NULL, 0);
# else
png_set_keep_unknown_chunks(ping, 1, (png_bytep)NULL, 0);
# endif
png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_caNv, 1);
png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_vpAg, 1);
png_set_keep_unknown_chunks(ping, 1, unused_chunks,
(int)sizeof(unused_chunks)/5);
/* Callback for other unknown chunks */
png_set_read_user_chunk_fn(ping, image, read_user_chunk_callback);
#endif
#ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED
/* Disable new libpng-1.5.10 feature */
png_set_check_for_invalid_index (ping, 0);
#endif
#if (PNG_LIBPNG_VER < 10400)
# if defined(PNG_USE_PNGGCCRD) && defined(PNG_ASSEMBLER_CODE_SUPPORTED) && \
(PNG_LIBPNG_VER >= 10200) && (PNG_LIBPNG_VER < 10220) && defined(__i386__)
/* Disable thread-unsafe features of pnggccrd */
if (png_access_version_number() >= 10200)
{
png_uint_32 mmx_disable_mask=0;
png_uint_32 asm_flags;
mmx_disable_mask |= ( PNG_ASM_FLAG_MMX_READ_COMBINE_ROW \
| PNG_ASM_FLAG_MMX_READ_FILTER_SUB \
| PNG_ASM_FLAG_MMX_READ_FILTER_AVG \
| PNG_ASM_FLAG_MMX_READ_FILTER_PAETH );
asm_flags=png_get_asm_flags(ping);
png_set_asm_flags(ping, asm_flags & ~mmx_disable_mask);
}
# endif
#endif
png_read_info(ping,ping_info);
/* Read and check IHDR chunk data */
png_get_IHDR(ping,ping_info,&ping_width,&ping_height,
&ping_bit_depth,&ping_color_type,
&ping_interlace_method,&ping_compression_method,
&ping_filter_method);
ping_file_depth = ping_bit_depth;
/* Swap bytes if requested */
if (ping_file_depth == 16)
{
const char
*value;
value=GetImageOption(image_info,"png:swap-bytes");
if (value == NULL)
value=GetImageArtifact(image,"png:swap-bytes");
if (value != NULL)
png_set_swap(ping);
}
/* Save bit-depth and color-type in case we later want to write a PNG00 */
{
char
msg[MaxTextExtent];
(void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_color_type);
(void) SetImageProperty(image,"png:IHDR.color-type-orig",msg);
(void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_bit_depth);
(void) SetImageProperty(image,"png:IHDR.bit-depth-orig",msg);
}
(void) png_get_tRNS(ping, ping_info, &ping_trans_alpha, &ping_num_trans,
&ping_trans_color);
(void) png_get_bKGD(ping, ping_info, &ping_background);
if (ping_bit_depth < 8)
{
png_set_packing(ping);
ping_bit_depth = 8;
}
image->depth=ping_bit_depth;
image->depth=GetImageQuantumDepth(image,MagickFalse);
image->interlace=ping_interlace_method != 0 ? PNGInterlace : NoInterlace;
if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) ||
((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA))
{
image->rendering_intent=UndefinedIntent;
intent=Magick_RenderingIntent_to_PNG_RenderingIntent(UndefinedIntent);
(void) memset(&image->chromaticity,0,
sizeof(image->chromaticity));
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG width: %.20g, height: %.20g\n"
" PNG color_type: %d, bit_depth: %d\n"
" PNG compression_method: %d\n"
" PNG interlace_method: %d, filter_method: %d",
(double) ping_width, (double) ping_height,
ping_color_type, ping_bit_depth,
ping_compression_method,
ping_interlace_method,ping_filter_method);
}
if (png_get_valid(ping,ping_info, PNG_INFO_iCCP))
{
ping_found_iCCP=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found PNG iCCP chunk.");
}
if (png_get_valid(ping,ping_info,PNG_INFO_gAMA))
{
ping_found_gAMA=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found PNG gAMA chunk.");
}
if (png_get_valid(ping,ping_info,PNG_INFO_cHRM))
{
ping_found_cHRM=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found PNG cHRM chunk.");
}
if (ping_found_iCCP != MagickTrue && png_get_valid(ping,ping_info,
PNG_INFO_sRGB))
{
ping_found_sRGB=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found PNG sRGB chunk.");
}
#ifdef PNG_READ_iCCP_SUPPORTED
if (ping_found_iCCP !=MagickTrue &&
ping_found_sRGB != MagickTrue &&
png_get_valid(ping,ping_info, PNG_INFO_iCCP))
{
ping_found_iCCP=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found PNG iCCP chunk.");
}
if (png_get_valid(ping,ping_info,PNG_INFO_iCCP))
{
int
compression;
#if (PNG_LIBPNG_VER < 10500)
png_charp
info;
#else
png_bytep
info;
#endif
png_charp
name;
png_uint_32
profile_length;
(void) png_get_iCCP(ping,ping_info,&name,(int *) &compression,&info,
&profile_length);
if (profile_length != 0)
{
StringInfo
*profile;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG iCCP chunk.");
profile=BlobToStringInfo(info,profile_length);
if (profile == (StringInfo *) NULL)
{
png_warning(ping, "ICC profile is NULL");
profile=DestroyStringInfo(profile);
}
else
{
if (ping_preserve_iCCP == MagickFalse)
{
int
icheck,
got_crc=0;
png_uint_32
length,
profile_crc=0;
unsigned char
*data;
length=(png_uint_32) GetStringInfoLength(profile);
for (icheck=0; sRGB_info[icheck].len > 0; icheck++)
{
if (length == sRGB_info[icheck].len)
{
if (got_crc == 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Got a %lu-byte ICC profile (potentially sRGB)",
(unsigned long) length);
data=GetStringInfoDatum(profile);
profile_crc=crc32(0,data,length);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" with crc=%8x",(unsigned int) profile_crc);
got_crc++;
}
if (profile_crc == sRGB_info[icheck].crc)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" It is sRGB with rendering intent = %s",
Magick_RenderingIntentString_from_PNG_RenderingIntent(
sRGB_info[icheck].intent));
if (image->rendering_intent==UndefinedIntent)
{
image->rendering_intent=
Magick_RenderingIntent_from_PNG_RenderingIntent(
sRGB_info[icheck].intent);
}
break;
}
}
}
if (sRGB_info[icheck].len == 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Got a %lu-byte ICC profile not recognized as sRGB",
(unsigned long) length);
(void) SetImageProfile(image,"icc",profile);
}
}
else /* Preserve-iCCP */
{
(void) SetImageProfile(image,"icc",profile);
}
profile=DestroyStringInfo(profile);
}
}
}
#endif
#if defined(PNG_READ_sRGB_SUPPORTED)
{
if (ping_found_iCCP==MagickFalse && png_get_valid(ping,ping_info,
PNG_INFO_sRGB))
{
if (png_get_sRGB(ping,ping_info,&intent))
{
if (image->rendering_intent == UndefinedIntent)
image->rendering_intent=
Magick_RenderingIntent_from_PNG_RenderingIntent (intent);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG sRGB chunk: rendering_intent: %d",intent);
}
}
else if (mng_info->have_global_srgb)
{
if (image->rendering_intent == UndefinedIntent)
image->rendering_intent=
Magick_RenderingIntent_from_PNG_RenderingIntent
(mng_info->global_srgb_intent);
}
}
#endif
{
if (!png_get_gAMA(ping,ping_info,&file_gamma))
if (mng_info->have_global_gama)
png_set_gAMA(ping,ping_info,mng_info->global_gamma);
if (png_get_gAMA(ping,ping_info,&file_gamma))
{
image->gamma=(float) file_gamma;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG gAMA chunk: gamma: %f",file_gamma);
}
}
if (!png_get_valid(ping,ping_info,PNG_INFO_cHRM))
{
if (mng_info->have_global_chrm != MagickFalse)
{
(void) png_set_cHRM(ping,ping_info,
mng_info->global_chrm.white_point.x,
mng_info->global_chrm.white_point.y,
mng_info->global_chrm.red_primary.x,
mng_info->global_chrm.red_primary.y,
mng_info->global_chrm.green_primary.x,
mng_info->global_chrm.green_primary.y,
mng_info->global_chrm.blue_primary.x,
mng_info->global_chrm.blue_primary.y);
}
}
if (png_get_valid(ping,ping_info,PNG_INFO_cHRM))
{
(void) png_get_cHRM(ping,ping_info,
&image->chromaticity.white_point.x,
&image->chromaticity.white_point.y,
&image->chromaticity.red_primary.x,
&image->chromaticity.red_primary.y,
&image->chromaticity.green_primary.x,
&image->chromaticity.green_primary.y,
&image->chromaticity.blue_primary.x,
&image->chromaticity.blue_primary.y);
ping_found_cHRM=MagickTrue;
if (image->chromaticity.red_primary.x>0.6399f &&
image->chromaticity.red_primary.x<0.6401f &&
image->chromaticity.red_primary.y>0.3299f &&
image->chromaticity.red_primary.y<0.3301f &&
image->chromaticity.green_primary.x>0.2999f &&
image->chromaticity.green_primary.x<0.3001f &&
image->chromaticity.green_primary.y>0.5999f &&
image->chromaticity.green_primary.y<0.6001f &&
image->chromaticity.blue_primary.x>0.1499f &&
image->chromaticity.blue_primary.x<0.1501f &&
image->chromaticity.blue_primary.y>0.0599f &&
image->chromaticity.blue_primary.y<0.0601f &&
image->chromaticity.white_point.x>0.3126f &&
image->chromaticity.white_point.x<0.3128f &&
image->chromaticity.white_point.y>0.3289f &&
image->chromaticity.white_point.y<0.3291f)
ping_found_sRGB_cHRM=MagickTrue;
}
if (image->rendering_intent != UndefinedIntent)
{
if (ping_found_sRGB != MagickTrue &&
(ping_found_gAMA != MagickTrue ||
(image->gamma > .45 && image->gamma < .46)) &&
(ping_found_cHRM != MagickTrue ||
ping_found_sRGB_cHRM != MagickFalse) &&
ping_found_iCCP != MagickTrue)
{
png_set_sRGB(ping,ping_info,
Magick_RenderingIntent_to_PNG_RenderingIntent
(image->rendering_intent));
file_gamma=0.45455f;
ping_found_sRGB=MagickTrue;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting sRGB as if in input");
}
}
#if defined(PNG_oFFs_SUPPORTED)
if (png_get_valid(ping,ping_info,PNG_INFO_oFFs))
{
image->page.x=(ssize_t) png_get_x_offset_pixels(ping, ping_info);
image->page.y=(ssize_t) png_get_y_offset_pixels(ping, ping_info);
if (logging != MagickFalse)
if (image->page.x || image->page.y)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG oFFs chunk: x: %.20g, y: %.20g.",(double)
image->page.x,(double) image->page.y);
}
#endif
#if defined(PNG_pHYs_SUPPORTED)
if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs))
{
if (mng_info->have_global_phys)
{
png_set_pHYs(ping,ping_info,
mng_info->global_x_pixels_per_unit,
mng_info->global_y_pixels_per_unit,
mng_info->global_phys_unit_type);
}
}
x_resolution=0;
y_resolution=0;
unit_type=0;
if (png_get_valid(ping,ping_info,PNG_INFO_pHYs))
{
/*
Set image resolution.
*/
(void) png_get_pHYs(ping,ping_info,&x_resolution,&y_resolution,
&unit_type);
image->x_resolution=(double) x_resolution;
image->y_resolution=(double) y_resolution;
if (unit_type == PNG_RESOLUTION_METER)
{
image->units=PixelsPerCentimeterResolution;
image->x_resolution=(double) x_resolution/100.0;
image->y_resolution=(double) y_resolution/100.0;
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.",
(double) x_resolution,(double) y_resolution,unit_type);
}
#endif
if (png_get_valid(ping,ping_info,PNG_INFO_PLTE))
{
png_colorp
palette;
(void) png_get_PLTE(ping,ping_info,&palette,&number_colors);
if ((number_colors == 0) &&
((int) ping_color_type == PNG_COLOR_TYPE_PALETTE))
{
if (mng_info->global_plte_length)
{
png_set_PLTE(ping,ping_info,mng_info->global_plte,
(int) mng_info->global_plte_length);
if (!png_get_valid(ping,ping_info,PNG_INFO_tRNS))
if (mng_info->global_trns_length)
{
if (mng_info->global_trns_length >
mng_info->global_plte_length)
{
png_warning(ping,
"global tRNS has more entries than global PLTE");
}
else
{
png_set_tRNS(ping,ping_info,mng_info->global_trns,
(int) mng_info->global_trns_length,NULL);
}
}
#ifdef PNG_READ_bKGD_SUPPORTED
if (
#ifndef PNG_READ_EMPTY_PLTE_SUPPORTED
mng_info->have_saved_bkgd_index ||
#endif
png_get_valid(ping,ping_info,PNG_INFO_bKGD))
{
png_color_16
background;
#ifndef PNG_READ_EMPTY_PLTE_SUPPORTED
if (mng_info->have_saved_bkgd_index)
background.index=mng_info->saved_bkgd_index;
#endif
if (png_get_valid(ping, ping_info, PNG_INFO_bKGD))
background.index=ping_background->index;
background.red=(png_uint_16)
mng_info->global_plte[background.index].red;
background.green=(png_uint_16)
mng_info->global_plte[background.index].green;
background.blue=(png_uint_16)
mng_info->global_plte[background.index].blue;
background.gray=(png_uint_16)
mng_info->global_plte[background.index].green;
png_set_bKGD(ping,ping_info,&background);
}
#endif
}
else
png_error(ping,"No global PLTE in file");
}
}
#ifdef PNG_READ_bKGD_SUPPORTED
if (mng_info->have_global_bkgd &&
(!png_get_valid(ping,ping_info,PNG_INFO_bKGD)))
image->background_color=mng_info->mng_global_bkgd;
if (png_get_valid(ping,ping_info,PNG_INFO_bKGD))
{
unsigned int
bkgd_scale;
/* Set image background color.
* Scale background components to 16-bit, then scale
* to quantum depth
*/
bkgd_scale = 1;
if (ping_file_depth == 1)
bkgd_scale = 255;
else if (ping_file_depth == 2)
bkgd_scale = 85;
else if (ping_file_depth == 4)
bkgd_scale = 17;
if (ping_file_depth <= 8)
bkgd_scale *= 257;
ping_background->red *= bkgd_scale;
ping_background->green *= bkgd_scale;
ping_background->blue *= bkgd_scale;
if (logging != MagickFalse)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG bKGD chunk, raw ping_background=(%d,%d,%d).\n"
" bkgd_scale=%d. ping_background=(%d,%d,%d).",
ping_background->red,ping_background->green,
ping_background->blue,
bkgd_scale,ping_background->red,
ping_background->green,ping_background->blue);
}
image->background_color.red=
ScaleShortToQuantum(ping_background->red);
image->background_color.green=
ScaleShortToQuantum(ping_background->green);
image->background_color.blue=
ScaleShortToQuantum(ping_background->blue);
image->background_color.opacity=OpaqueOpacity;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->background_color=(%.20g,%.20g,%.20g).",
(double) image->background_color.red,
(double) image->background_color.green,
(double) image->background_color.blue);
}
#endif /* PNG_READ_bKGD_SUPPORTED */
if (png_get_valid(ping,ping_info,PNG_INFO_tRNS))
{
/*
Image has a tRNS chunk.
*/
int
max_sample;
size_t
one = 1;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG tRNS chunk.");
max_sample = (int) ((one << ping_file_depth) - 1);
if ((ping_color_type == PNG_COLOR_TYPE_GRAY &&
(int)ping_trans_color->gray > max_sample) ||
(ping_color_type == PNG_COLOR_TYPE_RGB &&
((int)ping_trans_color->red > max_sample ||
(int)ping_trans_color->green > max_sample ||
(int)ping_trans_color->blue > max_sample)))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Ignoring PNG tRNS chunk with out-of-range sample.");
png_free_data(ping, ping_info, PNG_FREE_TRNS, 0);
png_set_invalid(ping,ping_info,PNG_INFO_tRNS);
image->matte=MagickFalse;
}
else
{
int
scale_to_short;
scale_to_short = 65535L/((1UL << ping_file_depth)-1);
/* Scale transparent_color to short */
transparent_color.red= scale_to_short*ping_trans_color->red;
transparent_color.green= scale_to_short*ping_trans_color->green;
transparent_color.blue= scale_to_short*ping_trans_color->blue;
transparent_color.opacity= scale_to_short*ping_trans_color->gray;
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Raw tRNS graylevel = %d, scaled graylevel = %d.",
ping_trans_color->gray,transparent_color.opacity);
}
transparent_color.red=transparent_color.opacity;
transparent_color.green=transparent_color.opacity;
transparent_color.blue=transparent_color.opacity;
}
}
}
#if defined(PNG_READ_sBIT_SUPPORTED)
if (mng_info->have_global_sbit)
{
if (!png_get_valid(ping,ping_info,PNG_INFO_sBIT))
png_set_sBIT(ping,ping_info,&mng_info->global_sbit);
}
#endif
num_passes=png_set_interlace_handling(ping);
png_read_update_info(ping,ping_info);
ping_rowbytes=png_get_rowbytes(ping,ping_info);
/*
Initialize image structure.
*/
mng_info->image_box.left=0;
mng_info->image_box.right=(ssize_t) ping_width;
mng_info->image_box.top=0;
mng_info->image_box.bottom=(ssize_t) ping_height;
if (mng_info->mng_type == 0)
{
mng_info->mng_width=ping_width;
mng_info->mng_height=ping_height;
mng_info->frame=mng_info->image_box;
mng_info->clip=mng_info->image_box;
}
else
{
image->page.y=mng_info->y_off[mng_info->object_id];
}
image->compression=ZipCompression;
image->columns=ping_width;
image->rows=ping_height;
if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) ||
((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA))
{
double
image_gamma = image->gamma;
(void)LogMagickEvent(CoderEvent,GetMagickModule(),
" image->gamma=%f",(float) image_gamma);
if (image_gamma > 0.75)
{
/* Set image->rendering_intent to Undefined,
* image->colorspace to GRAY, and reset image->chromaticity.
*/
image->intensity = Rec709LuminancePixelIntensityMethod;
SetImageColorspace(image,LinearGRAYColorspace);
}
else
{
RenderingIntent
save_rendering_intent = image->rendering_intent;
ChromaticityInfo
save_chromaticity = image->chromaticity;
SetImageColorspace(image,GRAYColorspace);
image->rendering_intent = save_rendering_intent;
image->chromaticity = save_chromaticity;
}
image->gamma = image_gamma;
}
else
{
double
image_gamma = image->gamma;
(void)LogMagickEvent(CoderEvent,GetMagickModule(),
" image->gamma=%f",(float) image_gamma);
if (image_gamma > 0.75)
{
/* Set image->rendering_intent to Undefined,
* image->colorspace to GRAY, and reset image->chromaticity.
*/
image->intensity = Rec709LuminancePixelIntensityMethod;
SetImageColorspace(image,RGBColorspace);
}
else
{
RenderingIntent
save_rendering_intent = image->rendering_intent;
ChromaticityInfo
save_chromaticity = image->chromaticity;
SetImageColorspace(image,sRGBColorspace);
image->rendering_intent = save_rendering_intent;
image->chromaticity = save_chromaticity;
}
image->gamma = image_gamma;
}
(void)LogMagickEvent(CoderEvent,GetMagickModule(),
" image->colorspace=%d",(int) image->colorspace);
if (((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) ||
((int) ping_bit_depth < 16 &&
(int) ping_color_type == PNG_COLOR_TYPE_GRAY))
{
size_t
one;
image->storage_class=PseudoClass;
one=1;
image->colors=one << ping_file_depth;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (image->colors > 256)
image->colors=256;
#else
if (image->colors > 65536L)
image->colors=65536L;
#endif
if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
png_colorp
palette;
(void) png_get_PLTE(ping,ping_info,&palette,&number_colors);
image->colors=(size_t) number_colors;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG PLTE chunk: number_colors: %d.",number_colors);
}
}
if (image->storage_class == PseudoClass)
{
/*
Initialize image colormap.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
png_error(ping,"Memory allocation failed");
if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
png_colorp
palette;
(void) png_get_PLTE(ping,ping_info,&palette,&number_colors);
for (i=0; i < (ssize_t) number_colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum(palette[i].red);
image->colormap[i].green=ScaleCharToQuantum(palette[i].green);
image->colormap[i].blue=ScaleCharToQuantum(palette[i].blue);
}
for ( ; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=0;
image->colormap[i].green=0;
image->colormap[i].blue=0;
}
}
}
/* Set some properties for reporting by "identify" */
{
char
msg[MaxTextExtent];
/* encode ping_width, ping_height, ping_file_depth, ping_color_type,
ping_interlace_method in value */
(void) FormatLocaleString(msg,MaxTextExtent,
"%d, %d",(int) ping_width, (int) ping_height);
(void) SetImageProperty(image,"png:IHDR.width,height",msg);
(void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_file_depth);
(void) SetImageProperty(image,"png:IHDR.bit_depth",msg);
(void) FormatLocaleString(msg,MaxTextExtent,"%d (%s)",
(int) ping_color_type,
Magick_ColorType_from_PNG_ColorType((int)ping_color_type));
(void) SetImageProperty(image,"png:IHDR.color_type",msg);
if (ping_interlace_method == 0)
{
(void) FormatLocaleString(msg,MaxTextExtent,"%d (Not interlaced)",
(int) ping_interlace_method);
}
else if (ping_interlace_method == 1)
{
(void) FormatLocaleString(msg,MaxTextExtent,"%d (Adam7 method)",
(int) ping_interlace_method);
}
else
{
(void) FormatLocaleString(msg,MaxTextExtent,"%d (Unknown method)",
(int) ping_interlace_method);
}
(void) SetImageProperty(image,"png:IHDR.interlace_method",msg);
if (number_colors != 0)
{
(void) FormatLocaleString(msg,MaxTextExtent,"%d",
(int) number_colors);
(void) SetImageProperty(image,"png:PLTE.number_colors",msg);
}
}
#if defined(PNG_tIME_SUPPORTED)
read_tIME_chunk(image,ping,ping_info);
#endif
/*
Read image scanlines.
*/
if (image->delay != 0)
mng_info->scenes_found++;
if ((mng_info->mng_type == 0 && (image->ping != MagickFalse)) || (
(image_info->number_scenes != 0) && (mng_info->scenes_found > (ssize_t)
(image_info->first_scene+image_info->number_scenes))))
{
/* This happens later in non-ping decodes */
if (png_get_valid(ping,ping_info,PNG_INFO_tRNS))
image->storage_class=DirectClass;
image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ||
((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
(png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ?
MagickTrue : MagickFalse;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Skipping PNG image data for scene %.20g",(double)
mng_info->scenes_found-1);
png_destroy_read_struct(&ping,&ping_info,&end_info);
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit ReadOnePNGImage().");
return(image);
}
status=SetImageExtent(image,image->columns,image->rows);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
png_destroy_read_struct(&ping,&ping_info,&end_info);
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
return(DestroyImageList(image));
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG IDAT chunk(s)");
if (num_passes > 1)
pixel_info=AcquireVirtualMemory(image->rows,ping_rowbytes*
sizeof(*ping_pixels));
else
pixel_info=AcquireVirtualMemory(ping_rowbytes,sizeof(*ping_pixels));
if (pixel_info == (MemoryInfo *) NULL)
png_error(ping,"Memory allocation failed");
ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Converting PNG pixels to pixel packets");
/*
Convert PNG pixels to pixel packets.
*/
{
MagickBooleanType
found_transparent_pixel;
found_transparent_pixel=MagickFalse;
if (image->storage_class == DirectClass)
{
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
png_error(ping,"Failed to allocate quantum_info");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
for (pass=0; pass < num_passes; pass++)
{
/*
Convert image to DirectClass pixel packets.
*/
image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ||
((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
(png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ?
MagickTrue : MagickFalse;
for (y=0; y < (ssize_t) image->rows; y++)
{
if (num_passes > 1)
row_offset=ping_rowbytes*y;
else
row_offset=0;
png_read_row(ping,ping_pixels+row_offset,NULL);
if (pass < num_passes-1)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
else
{
if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY)
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GrayQuantum,ping_pixels+row_offset,exception);
else if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GrayAlphaQuantum,ping_pixels+row_offset,exception);
else if ((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA)
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RGBAQuantum,ping_pixels+row_offset,exception);
else if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
IndexQuantum,ping_pixels+row_offset,exception);
else /* ping_color_type == PNG_COLOR_TYPE_RGB */
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RGBQuantum,ping_pixels+row_offset,exception);
}
if (found_transparent_pixel == MagickFalse)
{
/* Is there a transparent pixel in the row? */
if (y== 0 && logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Looking for cheap transparent pixel");
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
if ((ping_color_type == PNG_COLOR_TYPE_RGBA ||
ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) &&
(GetPixelOpacity(q) != OpaqueOpacity))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ...got one.");
found_transparent_pixel = MagickTrue;
break;
}
if ((ping_color_type == PNG_COLOR_TYPE_RGB ||
ping_color_type == PNG_COLOR_TYPE_GRAY) &&
(ScaleQuantumToShort(GetPixelRed(q))
== transparent_color.red &&
ScaleQuantumToShort(GetPixelGreen(q))
== transparent_color.green &&
ScaleQuantumToShort(GetPixelBlue(q))
== transparent_color.blue))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ...got one.");
found_transparent_pixel = MagickTrue;
break;
}
q++;
}
}
if (num_passes == 1)
{
status=SetImageProgress(image,LoadImageTag,
(MagickOffsetType) y, image->rows);
if (status == MagickFalse)
break;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (y < (long) image->rows)
break;
if (num_passes != 1)
{
status=SetImageProgress(image,LoadImageTag,pass,num_passes);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
}
else /* image->storage_class != DirectClass */
for (pass=0; pass < num_passes; pass++)
{
register Quantum
*r;
/*
Convert grayscale image to PseudoClass pixel packets.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Converting grayscale pixels to pixel packets");
image->matte=ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ?
MagickTrue : MagickFalse;
quantum_scanline=(Quantum *) AcquireQuantumMemory(image->columns,
(image->matte ? 2 : 1)*sizeof(*quantum_scanline));
if (quantum_scanline == (Quantum *) NULL)
png_error(ping,"Memory allocation failed");
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
alpha;
if (num_passes > 1)
row_offset=ping_rowbytes*y;
else
row_offset=0;
png_read_row(ping,ping_pixels+row_offset,NULL);
if (pass < num_passes-1)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
p=ping_pixels+row_offset;
r=quantum_scanline;
switch (ping_bit_depth)
{
case 8:
{
if (ping_color_type == 4)
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
*r++=*p++;
/* In image.h, OpaqueOpacity is 0
* TransparentOpacity is QuantumRange
* In a PNG datastream, Opaque is QuantumRange
* and Transparent is 0.
*/
alpha=ScaleCharToQuantum((unsigned char)*p++);
SetPixelAlpha(q,alpha);
if (alpha != QuantumRange-OpaqueOpacity)
found_transparent_pixel = MagickTrue;
q++;
}
else
for (x=(ssize_t) image->columns-1; x >= 0; x--)
*r++=*p++;
break;
}
case 16:
{
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
#if (MAGICKCORE_QUANTUM_DEPTH >= 16)
unsigned long
quantum;
if (image->colors > 256)
quantum=(((unsigned int) *p++) << 8);
else
quantum=0;
quantum|=(*p++);
*r=ScaleShortToQuantum(quantum);
r++;
if (ping_color_type == 4)
{
if (image->colors > 256)
quantum=(((unsigned int) *p++) << 8);
else
quantum=0;
quantum|=(*p++);
alpha=ScaleShortToQuantum(quantum);
SetPixelAlpha(q,alpha);
if (alpha != QuantumRange-OpaqueOpacity)
found_transparent_pixel = MagickTrue;
q++;
}
#else /* MAGICKCORE_QUANTUM_DEPTH == 8 */
*r++=(*p++);
p++; /* strip low byte */
if (ping_color_type == 4)
{
alpha=*p++;
SetPixelAlpha(q,alpha);
if (alpha != QuantumRange-OpaqueOpacity)
found_transparent_pixel = MagickTrue;
p++;
q++;
}
#endif
}
break;
}
default:
break;
}
/*
Transfer image scanline.
*/
r=quantum_scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t index=ConstrainColormapIndex(image,(ssize_t) *r);
SetPixelRed(q,ClampToQuantum(image->colormap[index].red));
SetPixelGreen(q,ClampToQuantum(image->colormap[index].green));
SetPixelBlue(q,ClampToQuantum(image->colormap[index].blue));
SetPixelIndex(indexes+x,index);
r++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (num_passes == 1)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline);
if (y < (long) image->rows)
break;
if (num_passes != 1)
{
status=SetImageProgress(image,LoadImageTag,pass,num_passes);
if (status == MagickFalse)
break;
}
}
image->matte=found_transparent_pixel;
if (logging != MagickFalse)
{
if (found_transparent_pixel != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Found transparent pixel");
else
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" No transparent pixel was found");
ping_color_type&=0x03;
}
}
}
png_read_end(ping,end_info);
if (image_info->number_scenes != 0 && mng_info->scenes_found-1 <
(ssize_t) image_info->first_scene && image->delay != 0)
{
png_destroy_read_struct(&ping,&ping_info,&end_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetImageBackgroundColor(image);
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit ReadOnePNGImage() early.");
return(image);
}
if (png_get_valid(ping,ping_info,PNG_INFO_tRNS))
{
ClassType
storage_class;
/*
Image has a transparent background.
*/
storage_class=image->storage_class;
image->matte=MagickTrue;
/* Balfour fix from imagemagick discourse server, 5 Feb 2010 */
if (storage_class == PseudoClass)
{
if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
for (x=0; x < ping_num_trans; x++)
{
image->colormap[x].opacity =
ScaleCharToQuantum((unsigned char)(255-ping_trans_alpha[x]));
}
}
else if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
for (x=0; x < (int) image->colors; x++)
{
if (ScaleQuantumToShort(image->colormap[x].red) ==
transparent_color.opacity)
{
image->colormap[x].opacity = (Quantum) TransparentOpacity;
}
}
}
(void) SyncImage(image);
}
#if 1 /* Should have already been done above, but glennrp problem P10
* needs this.
*/
else
{
for (y=0; y < (ssize_t) image->rows; y++)
{
image->storage_class=storage_class;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
/* Caution: on a Q8 build, this does not distinguish between
* 16-bit colors that differ only in the low byte
*/
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
if (ScaleQuantumToShort(GetPixelRed(q))
== transparent_color.red &&
ScaleQuantumToShort(GetPixelGreen(q))
== transparent_color.green &&
ScaleQuantumToShort(GetPixelBlue(q))
== transparent_color.blue)
{
SetPixelOpacity(q,TransparentOpacity);
}
else
{
SetPixelOpacity(q,OpaqueOpacity);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
#endif
image->storage_class=DirectClass;
}
for (j = 0; j < 2; j++)
{
if (j == 0)
status = png_get_text(ping,ping_info,&text,&num_text) != 0 ?
MagickTrue : MagickFalse;
else
status = png_get_text(ping,end_info,&text,&num_text) != 0 ?
MagickTrue : MagickFalse;
if (status != MagickFalse)
for (i=0; i < (ssize_t) num_text; i++)
{
/* Check for a profile */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading PNG text chunk");
if (strlen(text[i].key) > 16 &&
memcmp(text[i].key, "Raw profile type ",17) == 0)
{
const char
*value;
value=GetImageOption(image_info,"profile:skip");
if (IsOptionMember(text[i].key+17,value) == MagickFalse)
{
(void) Magick_png_read_raw_profile(ping,image,image_info,text,
(int) i);
num_raw_profiles++;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Read raw profile %s",text[i].key+17);
}
else
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Skipping raw profile %s",text[i].key+17);
}
}
else
{
char
*value;
length=text[i].text_length;
value=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*value));
if (value == (char *) NULL)
png_error(ping,"Memory allocation failed");
*value='\0';
(void) ConcatenateMagickString(value,text[i].text,length+2);
/* Don't save "density" or "units" property if we have a pHYs
* chunk
*/
if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs) ||
(LocaleCompare(text[i].key,"density") != 0 &&
LocaleCompare(text[i].key,"units") != 0))
{
char
key[MaxTextExtent];
(void) FormatLocaleString(key,MaxTextExtent,"%s",text[i].key);
if (LocaleCompare(key,"version") == 0)
(void) FormatLocaleString(key,MagickPathExtent,"png:%s",
text[i].key);
(void) SetImageProperty(image,key,value);
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" length: %lu\n"
" Keyword: %s",
(unsigned long) length,
text[i].key);
}
value=DestroyString(value);
}
}
num_text_total += num_text;
}
#ifdef MNG_OBJECT_BUFFERS
/*
Store the object if necessary.
*/
if (object_id && !mng_info->frozen[object_id])
{
if (mng_info->ob[object_id] == (MngBuffer *) NULL)
{
/*
create a new object buffer.
*/
mng_info->ob[object_id]=(MngBuffer *)
AcquireMagickMemory(sizeof(MngBuffer));
if (mng_info->ob[object_id] != (MngBuffer *) NULL)
{
mng_info->ob[object_id]->image=(Image *) NULL;
mng_info->ob[object_id]->reference_count=1;
}
}
if ((mng_info->ob[object_id] == (MngBuffer *) NULL) ||
mng_info->ob[object_id]->frozen)
{
if (mng_info->ob[object_id] == (MngBuffer *) NULL)
png_error(ping,"Memory allocation failed");
if (mng_info->ob[object_id]->frozen)
png_error(ping,"Cannot overwrite frozen MNG object buffer");
}
else
{
if (mng_info->ob[object_id]->image != (Image *) NULL)
mng_info->ob[object_id]->image=DestroyImage
(mng_info->ob[object_id]->image);
mng_info->ob[object_id]->image=CloneImage(image,0,0,MagickTrue,
&image->exception);
if (mng_info->ob[object_id]->image != (Image *) NULL)
mng_info->ob[object_id]->image->file=(FILE *) NULL;
else
png_error(ping, "Cloning image for object buffer failed");
if (ping_width > 250000L || ping_height > 250000L)
png_error(ping,"PNG Image dimensions are too large.");
mng_info->ob[object_id]->width=ping_width;
mng_info->ob[object_id]->height=ping_height;
mng_info->ob[object_id]->color_type=ping_color_type;
mng_info->ob[object_id]->sample_depth=ping_bit_depth;
mng_info->ob[object_id]->interlace_method=ping_interlace_method;
mng_info->ob[object_id]->compression_method=
ping_compression_method;
mng_info->ob[object_id]->filter_method=ping_filter_method;
if (png_get_valid(ping,ping_info,PNG_INFO_PLTE))
{
png_colorp
plte;
/*
Copy the PLTE to the object buffer.
*/
png_get_PLTE(ping,ping_info,&plte,&number_colors);
mng_info->ob[object_id]->plte_length=number_colors;
for (i=0; i < number_colors; i++)
{
mng_info->ob[object_id]->plte[i]=plte[i];
}
}
else
mng_info->ob[object_id]->plte_length=0;
}
}
#endif
/* Set image->matte to MagickTrue if the input colortype supports
* alpha or if a valid tRNS chunk is present, no matter whether there
* is actual transparency present.
*/
image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ||
((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
(png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ?
MagickTrue : MagickFalse;
if (image->matte != MagickFalse)
(void) SetImageStorageClass(image,DirectClass);
#if 0 /* I'm not sure what's wrong here but it does not work. */
if (image->matte != MagickFalse)
{
if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
(void) SetImageType(image,GrayscaleMatteType);
else if (ping_color_type == PNG_COLOR_TYPE_PALETTE)
(void) SetImageType(image,PaletteMatteType);
else
(void) SetImageType(image,TrueColorMatteType);
}
else
{
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
(void) SetImageType(image,GrayscaleType);
else if (ping_color_type == PNG_COLOR_TYPE_PALETTE)
(void) SetImageType(image,PaletteType);
else
(void) SetImageType(image,TrueColorType);
}
#endif
/* Set more properties for identify to retrieve */
{
char
msg[MaxTextExtent];
if (num_text_total != 0)
{
/* libpng doesn't tell us whether they were tEXt, zTXt, or iTXt */
(void) FormatLocaleString(msg,MaxTextExtent,
"%d tEXt/zTXt/iTXt chunks were found", num_text_total);
(void) SetImageProperty(image,"png:text",msg);
}
if (num_raw_profiles != 0)
{
(void) FormatLocaleString(msg,MaxTextExtent,
"%d were found", num_raw_profiles);
(void) SetImageProperty(image,"png:text-encoded profiles",msg);
}
/* cHRM chunk: */
if (ping_found_cHRM != MagickFalse)
{
(void) FormatLocaleString(msg,MaxTextExtent,"%s",
"chunk was found (see Chromaticity, above)");
(void) SetImageProperty(image,"png:cHRM",msg);
}
/* bKGD chunk: */
if (png_get_valid(ping,ping_info,PNG_INFO_bKGD))
{
(void) FormatLocaleString(msg,MaxTextExtent,"%s",
"chunk was found (see Background color, above)");
(void) SetImageProperty(image,"png:bKGD",msg);
}
(void) FormatLocaleString(msg,MaxTextExtent,"%s",
"chunk was found");
/* iCCP chunk: */
if (ping_found_iCCP != MagickFalse)
(void) SetImageProperty(image,"png:iCCP",msg);
if (png_get_valid(ping,ping_info,PNG_INFO_tRNS))
(void) SetImageProperty(image,"png:tRNS",msg);
#if defined(PNG_sRGB_SUPPORTED)
/* sRGB chunk: */
if (ping_found_sRGB != MagickFalse)
{
(void) FormatLocaleString(msg,MaxTextExtent,
"intent=%d (%s)",
(int) intent,
Magick_RenderingIntentString_from_PNG_RenderingIntent(intent));
(void) SetImageProperty(image,"png:sRGB",msg);
}
#endif
/* gAMA chunk: */
if (ping_found_gAMA != MagickFalse)
{
(void) FormatLocaleString(msg,MaxTextExtent,
"gamma=%.8g (See Gamma, above)", file_gamma);
(void) SetImageProperty(image,"png:gAMA",msg);
}
#if defined(PNG_pHYs_SUPPORTED)
/* pHYs chunk: */
if (png_get_valid(ping,ping_info,PNG_INFO_pHYs))
{
(void) FormatLocaleString(msg,MaxTextExtent,
"x_res=%.10g, y_res=%.10g, units=%d",
(double) x_resolution,(double) y_resolution, unit_type);
(void) SetImageProperty(image,"png:pHYs",msg);
}
#endif
#if defined(PNG_oFFs_SUPPORTED)
/* oFFs chunk: */
if (png_get_valid(ping,ping_info,PNG_INFO_oFFs))
{
(void) FormatLocaleString(msg,MaxTextExtent,"x_off=%.20g, y_off=%.20g",
(double) image->page.x,(double) image->page.y);
(void) SetImageProperty(image,"png:oFFs",msg);
}
#endif
#if defined(PNG_tIME_SUPPORTED)
read_tIME_chunk(image,ping,end_info);
#endif
/* caNv chunk: */
if ((image->page.width != 0 && image->page.width != image->columns) ||
(image->page.height != 0 && image->page.height != image->rows) ||
(image->page.x != 0 || image->page.y != 0))
{
(void) FormatLocaleString(msg,MaxTextExtent,
"width=%.20g, height=%.20g, x_offset=%.20g, y_offset=%.20g",
(double) image->page.width,(double) image->page.height,
(double) image->page.x,(double) image->page.y);
(void) SetImageProperty(image,"png:caNv",msg);
}
}
/*
Relinquish resources.
*/
png_destroy_read_struct(&ping,&ping_info,&end_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit ReadOnePNGImage()");
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
/* } for navigation to beginning of SETJMP-protected block, revert to
* Throwing an Exception when an error occurs.
*/
return(image);
/* end of reading one PNG image */
} | 0 | [
"CWE-125"
]
| ImageMagick6 | 34adc98afd5c7e7fb774d2ebdaea39e831c24dce | 192,187,653,587,284,440,000,000,000,000,000,000,000 | 2,060 | https://github.com/ImageMagick/ImageMagick/issues/1561 |
static void get_current_db()
{
MYSQL_RES *res;
/* If one_database is set, current_db is not supposed to change. */
if (one_database)
return;
my_free(current_db);
current_db= NULL;
/* In case of error below current_db will be NULL */
if (!mysql_query(&mysql, "SELECT DATABASE()") &&
(res= mysql_use_result(&mysql)))
{
MYSQL_ROW row= mysql_fetch_row(res);
if (row && row[0])
current_db= my_strdup(row[0], MYF(MY_WME));
mysql_free_result(res);
}
} | 0 | [
"CWE-295"
]
| mysql-server | b3e9211e48a3fb586e88b0270a175d2348935424 | 277,419,222,766,863,900,000,000,000,000,000,000,000 | 20 | WL#9072: Backport WL#8785 to 5.5 |
save_image (const gchar *filename,
gint32 image,
gint32 layer,
GError **error)
{
FILE *fp;
GimpPixelRgn pixel_rgn;
GimpDrawable *drawable;
GimpImageType drawable_type;
guchar *cmap= NULL;
guchar *pixels;
gint offset_x, offset_y;
guint width, height;
gint colors, i;
guint8 header_buf[128];
drawable = gimp_drawable_get (layer);
drawable_type = gimp_drawable_type (layer);
gimp_drawable_offsets (layer, &offset_x, &offset_y);
width = drawable->width;
height = drawable->height;
gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0, width, height, FALSE, FALSE);
gimp_progress_init_printf (_("Saving '%s'"),
gimp_filename_to_utf8 (filename));
pcx_header.manufacturer = 0x0a;
pcx_header.version = 5;
pcx_header.compression = 1;
switch (drawable_type)
{
case GIMP_INDEXED_IMAGE:
cmap = gimp_image_get_colormap (image, &colors);
pcx_header.bpp = 8;
pcx_header.bytesperline = GUINT16_TO_LE (width);
pcx_header.planes = 1;
pcx_header.color = GUINT16_TO_LE (1);
break;
case GIMP_RGB_IMAGE:
pcx_header.bpp = 8;
pcx_header.planes = 3;
pcx_header.color = GUINT16_TO_LE (1);
pcx_header.bytesperline = GUINT16_TO_LE (width);
break;
case GIMP_GRAY_IMAGE:
pcx_header.bpp = 8;
pcx_header.planes = 1;
pcx_header.color = GUINT16_TO_LE (2);
pcx_header.bytesperline = GUINT16_TO_LE (width);
break;
default:
g_message (_("Cannot save images with alpha channel."));
return FALSE;
}
if ((fp = g_fopen (filename, "wb")) == NULL)
{
g_set_error (error, G_FILE_ERROR, g_file_error_from_errno (errno),
_("Could not open '%s' for writing: %s"),
gimp_filename_to_utf8 (filename), g_strerror (errno));
return FALSE;
}
pixels = (guchar *) g_malloc (width * height * pcx_header.planes);
gimp_pixel_rgn_get_rect (&pixel_rgn, pixels, 0, 0, width, height);
if ((offset_x < 0) || (offset_x > (1<<16)))
{
g_message (_("Invalid X offset: %d"), offset_x);
return FALSE;
}
if ((offset_y < 0) || (offset_y > (1<<16)))
{
g_message (_("Invalid Y offset: %d"), offset_y);
return FALSE;
}
if (offset_x + width - 1 > (1<<16))
{
g_message (_("Right border out of bounds (must be < %d): %d"), (1<<16),
offset_x + width - 1);
return FALSE;
}
if (offset_y + height - 1 > (1<<16))
{
g_message (_("Bottom border out of bounds (must be < %d): %d"), (1<<16),
offset_y + height - 1);
return FALSE;
}
pcx_header.x1 = GUINT16_TO_LE ((guint16)offset_x);
pcx_header.y1 = GUINT16_TO_LE ((guint16)offset_y);
pcx_header.x2 = GUINT16_TO_LE ((guint16)(offset_x + width - 1));
pcx_header.y2 = GUINT16_TO_LE ((guint16)(offset_y + height - 1));
pcx_header.hdpi = GUINT16_TO_LE (300);
pcx_header.vdpi = GUINT16_TO_LE (300);
pcx_header.reserved = 0;
pcx_header_to_buffer (header_buf);
fwrite (header_buf, 128, 1, fp);
switch (drawable_type)
{
case GIMP_INDEXED_IMAGE:
save_8 (fp, width, height, pixels);
fputc (0x0c, fp);
fwrite (cmap, colors, 3, fp);
for (i = colors; i < 256; i++)
{
fputc (0, fp);
fputc (0, fp);
fputc (0, fp);
}
break;
case GIMP_RGB_IMAGE:
save_24 (fp, width, height, pixels);
break;
case GIMP_GRAY_IMAGE:
save_8 (fp, width, height, pixels);
fputc (0x0c, fp);
for (i = 0; i < 256; i++)
{
fputc ((guchar) i, fp);
fputc ((guchar) i, fp);
fputc ((guchar) i, fp);
}
break;
default:
return FALSE;
}
gimp_drawable_detach (drawable);
g_free (pixels);
if (fclose (fp) != 0)
{
g_set_error (error, G_FILE_ERROR, g_file_error_from_errno (errno),
_("Writing to file '%s' failed: %s"),
gimp_filename_to_utf8 (filename), g_strerror (errno));
return FALSE;
}
return TRUE;
} | 0 | [
"CWE-190"
]
| gimp | a9671395f6573e90316a9d748588c5435216f6ce | 295,418,952,633,045,530,000,000,000,000,000,000,000 | 155 | PCX: Avoid allocation overflows.
Multiplying gint values may overflow unless cast into a larger type. |
void ext4_ind_truncate(handle_t *handle, struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
int n = 0;
ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (last_block != max_block) {
n = ext4_block_to_path(inode, last_block, offsets, NULL);
if (n == 0)
return;
}
ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
/*
* The orphan list entry will now protect us from any crash which
* occurs before the truncate completes, so it is now safe to propagate
* the new, shorter inode size (held for now in i_size) into the
* on-disk inode. We do this via i_disksize, which is the value which
* ext4 *really* writes onto the disk inode.
*/
ei->i_disksize = inode->i_size;
if (last_block == max_block) {
/*
* It is unnecessary to free any data blocks if last_block is
* equal to the indirect block limit.
*/
return;
} else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
/* Kill the top of shared branch (not detached) */
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
/*
* We mark the inode dirty prior to restart,
* and prior to stop. No need for it here.
*/
} else {
/* Shared branch grows from an indirect block */
BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
}
}
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
(__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
/* fall through */
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
/* fall through */
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
/* fall through */
case EXT4_TIND_BLOCK:
;
}
} | 0 | [
"CWE-703"
]
| linux | ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1 | 175,627,083,423,070,740,000,000,000,000,000,000,000 | 104 | ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
spin_lock(&cifs_file_list_lock);
cifsFileInfo_get_locked(cifs_file);
spin_unlock(&cifs_file_list_lock);
return cifs_file;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 5d81de8e8667da7135d3a32a964087c0faf5483f | 104,735,547,185,861,280,000,000,000,000,000,000,000 | 7 | cifs: ensure that uncached writes handle unmapped areas correctly
It's possible for userland to pass down an iovec via writev() that has a
bogus user pointer in it. If that happens and we're doing an uncached
write, then we can end up getting less bytes than we expect from the
call to iov_iter_copy_from_user. This is CVE-2014-0069
cifs_iovec_write isn't set up to handle that situation however. It'll
blindly keep chugging through the page array and not filling those pages
with anything useful. Worse yet, we'll later end up with a negative
number in wdata->tailsz, which will confuse the sending routines and
cause an oops at the very least.
Fix this by having the copy phase of cifs_iovec_write stop copying data
in this situation and send the last write as a short one. At the same
time, we want to avoid sending a zero-length write to the server, so
break out of the loop and set rc to -EFAULT if that happens. This also
allows us to handle the case where no address in the iovec is valid.
[Note: Marking this for stable on v3.4+ kernels, but kernels as old as
v2.6.38 may have a similar problem and may need similar fix]
Cc: <[email protected]> # v3.4+
Reviewed-by: Pavel Shilovsky <[email protected]>
Reported-by: Al Viro <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
static inline struct timespec64 ep_set_mstimeout(long ms)
{
struct timespec64 now, ts = {
.tv_sec = ms / MSEC_PER_SEC,
.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
};
ktime_get_ts64(&now);
return timespec64_add_safe(now, ts);
} | 0 | [
"CWE-416"
]
| linux | a9ed4a6560b8562b7e2e2bed9527e88001f7b682 | 234,374,853,771,187,830,000,000,000,000,000,000,000 | 10 | epoll: Keep a reference on files added to the check list
When adding a new fd to an epoll, and that this new fd is an
epoll fd itself, we recursively scan the fds attached to it
to detect cycles, and add non-epool files to a "check list"
that gets subsequently parsed.
However, this check list isn't completely safe when deletions
can happen concurrently. To sidestep the issue, make sure that
a struct file placed on the check list sees its f_count increased,
ensuring that a concurrent deletion won't result in the file
disapearing from under our feet.
Cc: [email protected]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
void t_go_generator::generate_process_function(t_service* tservice, t_function* tfunction) {
// Open function
string processorName = privatize(tservice->get_name()) + "Processor"
+ publicize(tfunction->get_name());
string argsname = publicize(tfunction->get_name() + "_args", true);
string resultname = publicize(tfunction->get_name() + "_result", true);
// t_struct* xs = tfunction->get_xceptions();
// const std::vector<t_field*>& xceptions = xs->get_members();
vector<t_field*>::const_iterator x_iter;
f_types_ << indent() << "type " << processorName << " struct {" << endl;
f_types_ << indent() << " handler " << publicize(tservice->get_name()) << endl;
f_types_ << indent() << "}" << endl << endl;
f_types_ << indent() << "func (p *" << processorName
<< ") Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err "
"thrift.TException) {" << endl;
indent_up();
f_types_ << indent() << "args := " << argsname << "{}" << endl;
f_types_ << indent() << "if err = args." << read_method_name_ << "(iprot); err != nil {" << endl;
f_types_ << indent() << " iprot.ReadMessageEnd()" << endl;
if (!tfunction->is_oneway()) {
f_types_ << indent()
<< " x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())"
<< endl;
f_types_ << indent() << " oprot.WriteMessageBegin(\"" << escape_string(tfunction->get_name())
<< "\", thrift.EXCEPTION, seqId)" << endl;
f_types_ << indent() << " x.Write(oprot)" << endl;
f_types_ << indent() << " oprot.WriteMessageEnd()" << endl;
f_types_ << indent() << " oprot.Flush()" << endl;
}
f_types_ << indent() << " return false, err" << endl;
f_types_ << indent() << "}" << endl << endl;
f_types_ << indent() << "iprot.ReadMessageEnd()" << endl;
if (!tfunction->is_oneway()) {
f_types_ << indent() << "result := " << resultname << "{}" << endl;
}
bool need_reference = type_need_reference(tfunction->get_returntype());
if (!tfunction->is_oneway() && !tfunction->get_returntype()->is_void()) {
f_types_ << "var retval " << type_to_go_type(tfunction->get_returntype()) << endl;
}
f_types_ << indent() << "var err2 error" << endl;
f_types_ << indent() << "if ";
if (!tfunction->is_oneway()) {
if (!tfunction->get_returntype()->is_void()) {
f_types_ << "retval, ";
}
}
// Generate the function call
t_struct* arg_struct = tfunction->get_arglist();
const std::vector<t_field*>& fields = arg_struct->get_members();
vector<t_field*>::const_iterator f_iter;
f_types_ << "err2 = p.handler." << publicize(tfunction->get_name()) << "(";
bool first = true;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
} else {
f_types_ << ", ";
}
f_types_ << "args." << publicize((*f_iter)->get_name());
}
f_types_ << "); err2 != nil {" << endl;
t_struct* exceptions = tfunction->get_xceptions();
const vector<t_field*>& x_fields = exceptions->get_members();
if (!x_fields.empty()) {
f_types_ << indent() << "switch v := err2.(type) {" << endl;
vector<t_field*>::const_iterator xf_iter;
for (xf_iter = x_fields.begin(); xf_iter != x_fields.end(); ++xf_iter) {
f_types_ << indent() << " case " << type_to_go_type(((*xf_iter)->get_type())) << ":"
<< endl;
f_types_ << indent() << "result." << publicize((*xf_iter)->get_name()) << " = v" << endl;
}
f_types_ << indent() << " default:" << endl;
}
if (!tfunction->is_oneway()) {
f_types_ << indent() << " x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "
"\"Internal error processing " << escape_string(tfunction->get_name())
<< ": \" + err2.Error())" << endl;
f_types_ << indent() << " oprot.WriteMessageBegin(\"" << escape_string(tfunction->get_name())
<< "\", thrift.EXCEPTION, seqId)" << endl;
f_types_ << indent() << " x.Write(oprot)" << endl;
f_types_ << indent() << " oprot.WriteMessageEnd()" << endl;
f_types_ << indent() << " oprot.Flush()" << endl;
}
f_types_ << indent() << " return true, err2" << endl;
if (!x_fields.empty()) {
f_types_ << indent() << "}" << endl;
}
f_types_ << indent() << "}"; // closes err2 != nil
if (!tfunction->is_oneway()) {
if (!tfunction->get_returntype()->is_void()) {
f_types_ << " else {" << endl; // make sure we set Success retval only on success
indent_up();
f_types_ << indent() << "result.Success = ";
if (need_reference) {
f_types_ << "&";
}
f_types_ << "retval" << endl;
indent_down();
f_types_ << "}" << endl;
} else {
f_types_ << endl;
}
f_types_ << indent() << "if err2 = oprot.WriteMessageBegin(\""
<< escape_string(tfunction->get_name()) << "\", thrift.REPLY, seqId); err2 != nil {"
<< endl;
f_types_ << indent() << " err = err2" << endl;
f_types_ << indent() << "}" << endl;
f_types_ << indent() << "if err2 = result." << write_method_name_ << "(oprot); err == nil && err2 != nil {" << endl;
f_types_ << indent() << " err = err2" << endl;
f_types_ << indent() << "}" << endl;
f_types_ << indent() << "if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {"
<< endl;
f_types_ << indent() << " err = err2" << endl;
f_types_ << indent() << "}" << endl;
f_types_ << indent() << "if err2 = oprot.Flush(); err == nil && err2 != nil {" << endl;
f_types_ << indent() << " err = err2" << endl;
f_types_ << indent() << "}" << endl;
f_types_ << indent() << "if err != nil {" << endl;
f_types_ << indent() << " return" << endl;
f_types_ << indent() << "}" << endl;
f_types_ << indent() << "return true, err" << endl;
} else {
f_types_ << endl;
f_types_ << indent() << "return true, nil" << endl;
}
indent_down();
f_types_ << indent() << "}" << endl << endl;
} | 0 | [
"CWE-77"
]
| thrift | 2007783e874d524a46b818598a45078448ecc53e | 152,638,797,856,737,910,000,000,000,000,000,000,000 | 144 | THRIFT-3893 Command injection in format_go_output
Client: Go
Patch: Jens Geyer |
static int jpc_dec_cp_setfromqcc(jpc_dec_cp_t *cp, jpc_qcc_t *qcc)
{
return jpc_dec_cp_setfromqcx(cp, &cp->ccps[qcc->compno], &qcc->compparms, JPC_QCC);
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 29,863,001,950,920,030,000,000,000,000,000,000,000 | 4 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
DnD_TransportReqPacket(DnDTransportBuffer *buf, // IN
DnDTransportPacketHeader **packet) // OUT
{
*packet = Util_SafeMalloc(DND_TRANSPORT_PACKET_HEADER_SIZE);
(*packet)->type = DND_TRANSPORT_PACKET_TYPE_REQUEST;
(*packet)->seqNum = buf->seqNum;
(*packet)->totalSize = buf->totalSize;
(*packet)->payloadSize = 0;
(*packet)->offset = buf->offset;
return DND_TRANSPORT_PACKET_HEADER_SIZE;
} | 0 | []
| open-vm-tools | e88f91b00a715b79255de6576506d80ecfdb064c | 224,912,477,020,237,160,000,000,000,000,000,000,000 | 12 | Fix possible security issue with the permissions of the intermediate
staging directory and path
/tmp/VMwareDnD is a staging directory used for DnD and CnP. It should be
a regular directory, but malicious code or user may create the /tmp/VMwareDnD
as a symbolic link which points elsewhere on the system. This may provide
user access to user B's files.
Do not set the permission of the root directory if the root directory
already exists and has the wrong permission. The permission of the directory
must be 1777 if it is created by the VMToolsi. If not, then the directory
has been created or modified by malicious code or user, so just cancel the
host to guest DnD or CnP operation. |
Result visit_bool(bool value) {
return FMT_DISPATCH(visit_any_int(value));
} | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
]
| fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 278,375,760,972,488,200,000,000,000,000,000,000,000 | 3 | Fix segfault on complex pointer formatting (#642) |
static int lua_ap_started(request_rec* r)
{
return (int)(ap_scoreboard_image->global->restart_time / 1000000);
} | 0 | [
"CWE-20"
]
| httpd | 78eb3b9235515652ed141353d98c239237030410 | 86,180,124,121,521,690,000,000,000,000,000,000,000 | 4 | *) SECURITY: CVE-2015-0228 (cve.mitre.org)
mod_lua: A maliciously crafted websockets PING after a script
calls r:wsupgrade() can cause a child process crash.
[Edward Lu <Chaosed0 gmail.com>]
Discovered by Guido Vranken <guidovranken gmail.com>
Submitted by: Edward Lu
Committed by: covener
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68 |
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
+ nla_total_size(sizeof(struct rtnl_link_stats64))
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ nla_total_size(4) /* IFLA_TXQLEN */
+ nla_total_size(4) /* IFLA_WEIGHT */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_CARRIER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(4) /* IFLA_MAX_GSO_SEGS */
+ nla_total_size(4) /* IFLA_MAX_GSO_SIZE */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
} | 0 | [
"CWE-200"
]
| net | 5f8e44741f9f216e33736ea4ec65ca9ac03036e6 | 153,665,921,866,401,940,000,000,000,000,000,000,000 | 39 | net: fix infoleak in rtnetlink
The stack object “map” has a total size of 32 bytes. Its last 4
bytes are padding generated by compiler. These padding bytes are
not initialized and sent out via “nla_put”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void splashOutBlendColorBurn(SplashColorPtr src, SplashColorPtr dest,
SplashColorPtr blend, SplashColorMode cm) {
int i, x;
#ifdef SPLASH_CMYK
if (cm == splashModeCMYK8 || cm == splashModeDeviceN8) {
for (i = 0; i < splashColorModeNComps[cm]; ++i) {
dest[i] = 255 - dest[i];
src[i] = 255 - src[i];
}
}
#endif
{
for (i = 0; i < splashColorModeNComps[cm]; ++i) {
if (src[i] == 0) {
blend[i] = 0;
} else {
x = ((255 - dest[i]) * 255) / src[i];
blend[i] = x <= 255 ? 255 - x : 0;
}
}
}
#ifdef SPLASH_CMYK
if (cm == splashModeCMYK8 || cm == splashModeDeviceN8) {
for (i = 0; i < splashColorModeNComps[cm]; ++i) {
dest[i] = 255 - dest[i];
src[i] = 255 - src[i];
blend[i] = 255 - blend[i];
}
}
#endif
} | 0 | [
"CWE-369"
]
| poppler | b224e2f5739fe61de9fa69955d016725b2a4b78d | 312,558,514,320,506,350,000,000,000,000,000,000,000 | 32 | SplashOutputDev::tilingPatternFill: Fix crash on broken file
Issue #802 |
static void lo_inode_put(struct lo_data *lo, struct lo_inode **inodep)
{
struct lo_inode *inode = *inodep;
if (!inode) {
return;
}
*inodep = NULL;
if (g_atomic_int_dec_and_test(&inode->refcount)) {
close(inode->fd);
free(inode);
}
} | 0 | []
| qemu | 6084633dff3a05d63176e06d7012c7e15aba15be | 24,330,626,135,040,710,000,000,000,000,000,000,000 | 15 | tools/virtiofsd: xattr name mappings: Add option
Add an option to define mappings of xattr names so that
the client and server filesystems see different views.
This can be used to have different SELinux mappings as
seen by the guest, to run the virtiofsd with less privileges
(e.g. in a case where it can't set trusted/system/security
xattrs but you want the guest to be able to), or to isolate
multiple users of the same name; e.g. trusted attributes
used by stacking overlayfs.
A mapping engine is used with 3 simple rules; the rules can
be combined to allow most useful mapping scenarios.
The ruleset is defined by -o xattrmap='rules...'.
This patch doesn't use the rule maps yet.
Signed-off-by: Dr. David Alan Gilbert <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Dr. David Alan Gilbert <[email protected]> |
static inline void percpu_modcopy(struct module *mod,
const void *from, unsigned long size)
{
/* pcpusec should be 0, and size of that section should be 0. */
BUG_ON(size != 0);
} | 0 | [
"CWE-362",
"CWE-347"
]
| linux | 0c18f29aae7ce3dadd26d8ee3505d07cc982df75 | 8,233,361,012,062,837,000,000,000,000,000,000,000 | 6 | module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <[email protected]>
Tested-by: Mimi Zohar <[email protected]>
Tested-by: Jessica Yu <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Signed-off-by: Jessica Yu <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int io_openat(struct io_kiocb *req, bool force_nonblock)
{
return io_openat2(req, force_nonblock);
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 105,386,222,380,648,310,000,000,000,000,000,000,000 | 4 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj,
gboolean create_open_instance, gboolean register_token)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
/* Check for user defined reflection objects */
/* TypeDelegator is the only corlib type which doesn't look like a MonoReflectionType */
if (klass->image != mono_defaults.corlib || (strcmp (klass->name, "TypeDelegator") == 0))
mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported")); \
if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params && !mb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_methodbuilder_token (assembly, mb, create_open_instance);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_ctorbuilder_token (assembly, mb);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)fb->typeb;
if (tb->generic_params) {
token = mono_image_get_generic_field_token (assembly, fb);
} else {
if ((tb->module->dynamic_image == assembly)) {
token = fb->table_idx | MONO_TOKEN_FIELD_DEF;
} else {
token = mono_image_get_fieldref_token (assembly, (MonoObject*)fb, fb->handle);
}
}
} else if (strcmp (klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj;
if (create_open_instance && tb->generic_params) {
MonoType *type;
init_type_builder_generics (obj);
type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_image_typedef_or_ref_full (assembly, type, TRUE);
token = mono_metadata_token_from_dor (token);
} else {
token = tb->table_idx | MONO_TOKEN_TYPE_DEF;
}
} else if (strcmp (klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
MonoClass *mc = mono_class_from_mono_type (type);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref_full (assembly, type, mc->generic_container == NULL || create_open_instance));
} else if (strcmp (klass->name, "GenericTypeParameterBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoGenericClass") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoCMethod") == 0 ||
strcmp (klass->name, "MonoMethod") == 0 ||
strcmp (klass->name, "MonoGenericMethod") == 0 ||
strcmp (klass->name, "MonoGenericCMethod") == 0) {
MonoReflectionMethod *m = (MonoReflectionMethod *)obj;
if (m->method->is_inflated) {
if (create_open_instance)
token = mono_image_get_methodspec_token (assembly, m->method);
else
token = mono_image_get_inflated_method_token (assembly, m->method);
} else if ((m->method->klass->image == &assembly->image) &&
!m->method->klass->generic_class) {
static guint32 method_table_idx = 0xffffff;
if (m->method->klass->wastypebuilder) {
/* we use the same token as the one that was assigned
* to the Methodbuilder.
* FIXME: do the equivalent for Fields.
*/
token = m->method->token;
} else {
/*
* Each token should have a unique index, but the indexes are
* assigned by managed code, so we don't know about them. An
* easy solution is to count backwards...
*/
method_table_idx --;
token = MONO_TOKEN_METHOD_DEF | method_table_idx;
}
} else {
token = mono_image_get_methodref_token (assembly, m->method, create_open_instance);
}
/*g_print ("got token 0x%08x for %s\n", token, m->method->name);*/
} else if (strcmp (klass->name, "MonoField") == 0) {
MonoReflectionField *f = (MonoReflectionField *)obj;
if ((f->field->parent->image == &assembly->image) && !is_field_on_inst (f->field)) {
static guint32 field_table_idx = 0xffffff;
field_table_idx --;
token = MONO_TOKEN_FIELD_DEF | field_table_idx;
} else {
token = mono_image_get_fieldref_token (assembly, (MonoObject*)f, f->field);
}
/*g_print ("got token 0x%08x for %s\n", token, f->field->name);*/
} else if (strcmp (klass->name, "MonoArrayMethod") == 0) {
MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod *)obj;
token = mono_image_get_array_token (assembly, m);
} else if (strcmp (klass->name, "SignatureHelper") == 0) {
MonoReflectionSigHelper *s = (MonoReflectionSigHelper*)obj;
token = MONO_TOKEN_SIGNATURE | mono_image_get_sighelper_token (assembly, s);
} else if (strcmp (klass->name, "EnumBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "FieldOnTypeBuilderInst") == 0) {
MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj;
token = mono_image_get_field_on_inst_token (assembly, f);
} else if (strcmp (klass->name, "ConstructorOnTypeBuilderInst") == 0) {
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj;
token = mono_image_get_ctor_on_inst_token (assembly, c, create_open_instance);
} else if (strcmp (klass->name, "MethodOnTypeBuilderInst") == 0) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj;
token = mono_image_get_method_on_inst_token (assembly, m, create_open_instance);
} else if (is_sre_array (klass) || is_sre_byref (klass) || is_sre_pointer (klass)) {
MonoReflectionType *type = (MonoReflectionType *)obj;
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (type)));
} else {
g_error ("requested token for %s\n", klass->name);
}
if (register_token)
mono_image_register_token (assembly, token, obj);
return token;
} | 0 | [
"CWE-399",
"CWE-264"
]
| mono | 89d1455a80ef13cddee5d79ec00c06055da3085c | 78,318,393,092,449,960,000,000,000,000,000,000,000 | 141 | Don't use finalization to cleanup dynamic methods.
* reflection.c: Use a reference queue to cleanup
dynamic methods instead of finalization.
* runtime.c: Shutdown the dynamic method queue
before runtime cleanup begins.
* DynamicMethod.cs: No longer finalizable.
* icall-def.h: Remove unused dynamic method icall.
Fixes #660422 |
static void core_analysis_color_curr_node(RzCore *core, RzAnalysisBlock *bbi) {
bool color_current = rz_config_get_i(core->config, "graph.gv.current");
char *pal_curr = palColorFor("graph.current");
bool current = rz_analysis_block_contains(bbi, core->offset);
if (current && color_current) {
rz_cons_printf("\t\"0x%08" PFMT64x "\" ", bbi->addr);
rz_cons_printf("\t[fillcolor=%s style=filled shape=box];\n", pal_curr);
}
free(pal_curr);
} | 0 | [
"CWE-703"
]
| rizin | 6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939 | 205,172,246,391,320,050,000,000,000,000,000,000,000 | 11 | Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022 |
double table_multi_eq_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
table_map rem_tables, uint keyparts,
uint16 *ref_keyuse_steps)
{
double sel= 1.0;
COND_EQUAL *cond_equal= join->cond_equal;
if (!cond_equal || !cond_equal->current_level.elements)
return sel;
if (!s->keyuse)
return sel;
Item_equal *item_equal;
List_iterator_fast<Item_equal> it(cond_equal->current_level);
TABLE *table= s->table;
table_map table_bit= table->map;
POSITION *pos= &join->positions[idx];
while ((item_equal= it++))
{
/*
Check whether we need to take into account the selectivity of
multiple equality item_equal. If this is the case multiply
the current value of sel by this selectivity
*/
table_map used_tables= item_equal->used_tables();
if (!(used_tables & table_bit))
continue;
if (item_equal->get_const())
continue;
bool adjust_sel= FALSE;
Item_equal_fields_iterator fi(*item_equal);
while((fi++) && !adjust_sel)
{
Field *fld= fi.get_curr_field();
if (fld->table->map != table_bit)
continue;
if (pos->key == 0)
adjust_sel= TRUE;
else
{
uint i;
KEYUSE *keyuse= pos->key;
uint key= keyuse->key;
for (i= 0; i < keyparts; i++)
{
if (i > 0)
keyuse+= ref_keyuse_steps[i-1];
uint fldno;
if (is_hash_join_key_no(key))
fldno= keyuse->keypart;
else
fldno= table->key_info[key].key_part[i].fieldnr - 1;
if (fld->field_index == fldno)
break;
}
keyuse= pos->key;
if (i == keyparts)
{
/*
Field fld is included in multiple equality item_equal
and is not a part of the ref key.
The selectivity of the multiple equality must be taken
into account unless one of the ref arguments is
equal to fld.
*/
adjust_sel= TRUE;
for (uint j= 0; j < keyparts && adjust_sel; j++)
{
if (j > 0)
keyuse+= ref_keyuse_steps[j-1];
Item *ref_item= keyuse->val;
if (ref_item->real_item()->type() == Item::FIELD_ITEM)
{
Item_field *field_item= (Item_field *) (ref_item->real_item());
if (item_equal->contains(field_item->field))
adjust_sel= FALSE;
}
}
}
}
}
if (adjust_sel)
{
/*
If ref == 0 and there are no fields in the multiple equality
item_equal that belong to the tables joined prior to s
then the selectivity of multiple equality will be set to 1.0.
*/
double eq_fld_sel= 1.0;
fi.rewind();
while ((fi++))
{
double curr_eq_fld_sel;
Field *fld= fi.get_curr_field();
if (!(fld->table->map & ~(table_bit | rem_tables)))
continue;
curr_eq_fld_sel= get_column_avg_frequency(fld) /
fld->table->stat_records();
if (curr_eq_fld_sel < 1.0)
set_if_bigger(eq_fld_sel, curr_eq_fld_sel);
}
sel*= eq_fld_sel;
}
}
return sel;
} | 0 | []
| server | ff77a09bda884fe6bf3917eb29b9d3a2f53f919b | 178,711,522,008,032,930,000,000,000,000,000,000,000 | 110 | MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix. |
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
int evt;
if (last->transfer_status == 0)
/* This descriptor isn't done yet, stop iteration. */
return 0;
driver_data = (struct driver_data *) &d[3];
packet = driver_data->packet;
if (packet == NULL)
/* This packet was cancelled, just continue. */
return 1;
if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
log_ar_at_event('T', packet->speed, packet->header, evt);
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
packet->ack = RCODE_CANCELLED;
break;
case OHCI1394_evt_flushed:
/*
* The packet was flushed should give same error as
* when we try to use a stale generation count.
*/
packet->ack = RCODE_GENERATION;
break;
case OHCI1394_evt_missing_ack:
/*
* Using a valid (current) generation count, but the
* node is not on the bus or not sending acks.
*/
packet->ack = RCODE_NO_ACK;
break;
case ACK_COMPLETE + 0x10:
case ACK_PENDING + 0x10:
case ACK_BUSY_X + 0x10:
case ACK_BUSY_A + 0x10:
case ACK_BUSY_B + 0x10:
case ACK_DATA_ERROR + 0x10:
case ACK_TYPE_ERROR + 0x10:
packet->ack = evt - 0x10;
break;
default:
packet->ack = RCODE_SEND_ERROR;
break;
}
packet->callback(packet, &ohci->card, packet->ack);
return 1;
} | 0 | [
"CWE-399"
]
| linux-2.6 | 8c0c0cc2d9f4c523fde04bdfe41e4380dec8ee54 | 105,587,503,880,775,570,000,000,000,000,000,000,000 | 69 | firewire: ohci: handle receive packets with a data length of zero
Queueing to receive an ISO packet with a payload length of zero
silently does nothing in dualbuffer mode, and crashes the kernel in
packet-per-buffer mode. Return an error in dualbuffer mode, because
the DMA controller won't let us do what we want, and work correctly in
packet-per-buffer mode.
Signed-off-by: Jay Fenlason <[email protected]>
Signed-off-by: Stefan Richter <[email protected]>
Cc: [email protected] |
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
u64 alignment = ctxt->d & AlignMask;
if (likely(size < 16))
return 1;
switch (alignment) {
case Unaligned:
case Avx:
return 1;
case Aligned16:
return 16;
case Aligned:
default:
return size;
}
} | 0 | [
"CWE-284"
]
| linux | 33ab91103b3415e12457e3104f0e4517ce12d0f3 | 149,683,437,776,940,870,000,000,000,000,000,000,000 | 18 | KVM: x86: fix emulation of "MOV SS, null selector"
This is CVE-2017-2583. On Intel this causes a failed vmentry because
SS's type is neither 3 nor 7 (even though the manual says this check is
only done for usable SS, and the dmesg splat says that SS is unusable!).
On AMD it's worse: svm.c is confused and sets CPL to 0 in the vmcb.
The fix fabricates a data segment descriptor when SS is set to a null
selector, so that CPL and SS.DPL are set correctly in the VMCS/vmcb.
Furthermore, only allow setting SS to a NULL selector if SS.RPL < 3;
this in turn ensures CPL < 3 because RPL must be equal to CPL.
Thanks to Andy Lutomirski and Willy Tarreau for help in analyzing
the bug and deciphering the manuals.
Reported-by: Xiaohan Zhang <[email protected]>
Fixes: 79d5b4c3cd809c770d4bf9812635647016c56011
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
isoent_alloc_path_table(struct archive_write *a, struct vdd *vdd,
int max_depth)
{
int i;
vdd->max_depth = max_depth;
vdd->pathtbl = malloc(sizeof(*vdd->pathtbl) * vdd->max_depth);
if (vdd->pathtbl == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory");
return (ARCHIVE_FATAL);
}
for (i = 0; i < vdd->max_depth; i++) {
vdd->pathtbl[i].first = NULL;
vdd->pathtbl[i].last = &(vdd->pathtbl[i].first);
vdd->pathtbl[i].sorted = NULL;
vdd->pathtbl[i].cnt = 0;
}
return (ARCHIVE_OK);
} | 0 | [
"CWE-190"
]
| libarchive | 3014e19820ea53c15c90f9d447ca3e668a0b76c6 | 133,144,174,774,072,000,000,000,000,000,000,000,000 | 20 | Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around. |
TEST_F(HttpHealthCheckerImplTest, ImmediateFailure) {
setupNoServiceValidationHC();
cluster_->prioritySet().getMockHostSet(0)->hosts_ = {
makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())};
expectSessionCreate();
expectStreamCreate(0);
EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _));
health_checker_->start();
EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed));
EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));
EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _));
EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer());
EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));
respond(0, "503", false, false, true, false, absl::nullopt, false, true);
EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL));
EXPECT_EQ(Host::Health::Unhealthy,
cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());
} | 0 | [
"CWE-476"
]
| envoy | 9b1c3962172a972bc0359398af6daa3790bb59db | 43,605,590,168,656,030,000,000,000,000,000,000,000 | 22 | healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
struct usb_host_interface *usb_find_alt_setting(
struct usb_host_config *config,
unsigned int iface_num,
unsigned int alt_num)
{
struct usb_interface_cache *intf_cache = NULL;
int i;
if (!config)
return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
intf_cache = config->intf_cache[i];
break;
}
}
if (!intf_cache)
return NULL;
for (i = 0; i < intf_cache->num_altsetting; i++)
if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
return &intf_cache->altsetting[i];
printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
"config %u\n", alt_num, iface_num,
config->desc.bConfigurationValue);
return NULL;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | 704620afc70cf47abb9d6a1a57f3825d2bca49cf | 262,207,581,588,838,460,000,000,000,000,000,000,000 | 28 | USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Co-developed-by: Linus Torvalds <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int kvm_write_guest_virt_system(gva_t addr, void *val,
unsigned int bytes,
struct kvm_vcpu *vcpu,
u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK,
error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA) {
r = X86EMUL_PROPAGATE_FAULT;
goto out;
}
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
if (ret < 0) {
r = X86EMUL_IO_NEEDED;
goto out;
}
bytes -= towrite;
data += towrite;
addr += towrite;
}
out:
return r;
} | 0 | [
"CWE-200"
]
| kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 105,676,993,026,303,500,000,000,000,000,000,000,000 | 33 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
int cil_check_recursive_blockinherit(struct cil_tree_node *bi_node)
{
struct cil_tree_node *curr = NULL;
struct cil_blockinherit *bi = NULL;
struct cil_block *block = NULL;
int rc = SEPOL_ERR;
bi = bi_node->data;
for (curr = bi_node->parent; curr != NULL; curr = curr->parent) {
if (curr->flavor != CIL_BLOCK) {
continue;
}
block = curr->data;
if (block != bi->block) {
continue;
}
cil_log(CIL_ERR, "Recursive blockinherit found:\n");
cil_print_recursive_blockinherit(bi_node, curr);
rc = SEPOL_ERR;
goto exit;
}
rc = SEPOL_OK;
exit:
return rc;
} | 0 | [
"CWE-125"
]
| selinux | 340f0eb7f3673e8aacaf0a96cbfcd4d12a405521 | 277,240,657,203,938,120,000,000,000,000,000,000,000 | 32 | libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]> |
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
} | 0 | [
"CWE-20"
]
| linux | d26c25a9d19b5976b319af528886f89cf455692d | 311,310,960,496,028,460,000,000,000,000,000,000,000 | 12 | arm64: KVM: Tighten guest core register access from userspace
We currently allow userspace to access the core register file
in about any possible way, including straddling multiple
registers and doing unaligned accesses.
This is not the expected use of the ABI, and nobody is actually
using it that way. Let's tighten it by explicitly checking
the size and alignment for each field of the register file.
Cc: <[email protected]>
Fixes: 2f4a07c5f9fe ("arm64: KVM: guest one-reg interface")
Reviewed-by: Christoffer Dall <[email protected]>
Reviewed-by: Mark Rutland <[email protected]>
Signed-off-by: Dave Martin <[email protected]>
[maz: rewrote Dave's initial patch to be more easily backported]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Will Deacon <[email protected]> |
int mdvi_configure(DviContext *dvi, DviParamCode option, ...)
{
va_list ap;
int reset_all;
int reset_font;
DviParams np;
va_start(ap, option);
reset_font = 0;
reset_all = 0;
np = dvi->params; /* structure copy */
while(option != MDVI_PARAM_LAST) {
switch(option) {
case MDVI_SET_DPI:
np.dpi = np.vdpi = va_arg(ap, Uint);
reset_all = 1;
break;
case MDVI_SET_XDPI:
np.dpi = va_arg(ap, Uint);
reset_all = 1;
break;
case MDVI_SET_YDPI:
np.vdpi = va_arg(ap, Uint);
break;
case MDVI_SET_SHRINK:
np.hshrink = np.vshrink = va_arg(ap, Uint);
reset_font = MDVI_FONTSEL_GREY|MDVI_FONTSEL_BITMAP;
break;
case MDVI_SET_XSHRINK:
np.hshrink = va_arg(ap, Uint);
reset_font = MDVI_FONTSEL_GREY|MDVI_FONTSEL_BITMAP;
break;
case MDVI_SET_YSHRINK:
np.vshrink = va_arg(ap, Uint);
reset_font = MDVI_FONTSEL_GREY|MDVI_FONTSEL_BITMAP;
break;
case MDVI_SET_ORIENTATION:
np.orientation = va_arg(ap, DviOrientation);
reset_font = MDVI_FONTSEL_GLYPH;
break;
case MDVI_SET_GAMMA:
np.gamma = va_arg(ap, double);
reset_font = MDVI_FONTSEL_GREY;
break;
case MDVI_SET_DENSITY:
np.density = va_arg(ap, Uint);
reset_font = MDVI_FONTSEL_BITMAP;
break;
case MDVI_SET_MAGNIFICATION:
np.mag = va_arg(ap, double);
reset_all = 1;
break;
case MDVI_SET_DRIFT:
np.hdrift = np.vdrift = va_arg(ap, int);
break;
case MDVI_SET_HDRIFT:
np.hdrift = va_arg(ap, int);
break;
case MDVI_SET_VDRIFT:
np.vdrift = va_arg(ap, int);
break;
case MDVI_SET_FOREGROUND:
np.fg = va_arg(ap, Ulong);
reset_font = MDVI_FONTSEL_GREY;
break;
case MDVI_SET_BACKGROUND:
np.bg = va_arg(ap, Ulong);
reset_font = MDVI_FONTSEL_GREY;
break;
default:
break;
}
option = va_arg(ap, DviParamCode);
}
va_end(ap);
/* check that all values make sense */
if(np.dpi <= 0 || np.vdpi <= 0)
return -1;
if(np.mag <= 0.0)
return -1;
if(np.density < 0)
return -1;
if(np.hshrink < 1 || np.vshrink < 1)
return -1;
if(np.hdrift < 0 || np.vdrift < 0)
return -1;
if(np.fg == np.bg)
return -1;
/*
* If the dpi or the magnification change, we basically have to reload
* the DVI file again from scratch.
*/
if(reset_all)
return (mdvi_reload(dvi, &np) == 0);
if(np.hshrink != dvi->params.hshrink) {
np.conv = dvi->dviconv;
if(np.hshrink)
np.conv /= np.hshrink;
}
if(np.vshrink != dvi->params.vshrink) {
np.vconv = dvi->dvivconv;
if(np.vshrink)
np.vconv /= np.vshrink;
}
if(reset_font) {
font_reset_chain_glyphs(&dvi->device, dvi->fonts, reset_font);
}
dvi->params = np;
if((reset_font & MDVI_FONTSEL_GLYPH) && dvi->device.refresh) {
dvi->device.refresh(dvi, dvi->device.device_data);
return 0;
}
return 1;
} | 0 | [
"CWE-20"
]
| evince | d4139205b010ed06310d14284e63114e88ec6de2 | 210,454,473,565,473,050,000,000,000,000,000,000,000 | 121 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) {
const char *p = bson_iterator_value( i );
return p + strlen( p ) + 1;
} | 0 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 147,756,911,037,769,900,000,000,000,000,000,000,000 | 5 | don't mix up int and size_t (first pass to fix that) |
static void _print_next_block(int idx, const char *blk)
{
pr_cont("%s%s", idx ? ", " : "", blk);
} | 0 | [
"CWE-20"
]
| linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 86,890,282,221,297,800,000,000,000,000,000,000,000 | 4 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void Field_long::sql_type(String &res) const
{
CHARSET_INFO *cs=res.charset();
res.length(cs->cset->snprintf(cs,(char*) res.ptr(),res.alloced_length(),
"int(%d)",(int) field_length));
add_zerofill_and_unsigned(res);
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 143,237,553,719,260,790,000,000,000,000,000,000,000 | 7 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
scale_line (int *weights, int n_x, int n_y, guchar *dest, int dest_x,
guchar *dest_end, int dest_channels, int dest_has_alpha,
guchar **src, int src_channels, gboolean src_has_alpha, int x_init,
int x_step, int src_width, int check_size, guint32 color1,
guint32 color2)
{
int x = x_init;
int i, j;
while (dest < dest_end)
{
int x_scaled = x >> SCALE_SHIFT;
int *pixel_weights;
pixel_weights = weights +
((x >> (SCALE_SHIFT - SUBSAMPLE_BITS)) & SUBSAMPLE_MASK) * n_x * n_y;
if (src_has_alpha)
{
unsigned int r = 0, g = 0, b = 0, a = 0;
for (i=0; i<n_y; i++)
{
guchar *q = src[i] + x_scaled * src_channels;
int *line_weights = pixel_weights + n_x * i;
for (j=0; j<n_x; j++)
{
unsigned int ta;
ta = q[3] * line_weights[j];
r += ta * q[0];
g += ta * q[1];
b += ta * q[2];
a += ta;
q += src_channels;
}
}
if (a)
{
dest[0] = r / a;
dest[1] = g / a;
dest[2] = b / a;
dest[3] = a >> 16;
}
else
{
dest[0] = 0;
dest[1] = 0;
dest[2] = 0;
dest[3] = 0;
}
}
else
{
unsigned int r = 0, g = 0, b = 0;
for (i=0; i<n_y; i++)
{
guchar *q = src[i] + x_scaled * src_channels;
int *line_weights = pixel_weights + n_x * i;
for (j=0; j<n_x; j++)
{
unsigned int ta = line_weights[j];
r += ta * q[0];
g += ta * q[1];
b += ta * q[2];
q += src_channels;
}
}
dest[0] = (r + 0xffff) >> 16;
dest[1] = (g + 0xffff) >> 16;
dest[2] = (b + 0xffff) >> 16;
if (dest_has_alpha)
dest[3] = 0xff;
}
dest += dest_channels;
x += x_step;
}
return dest;
} | 0 | []
| gdk-pixbuf | ffec86ed5010c5a2be14f47b33bcf4ed3169a199 | 322,387,136,077,206,320,000,000,000,000,000,000,000 | 89 | pixops: Be more careful about integer overflow
Our loader code is supposed to handle out-of-memory and overflow
situations gracefully, reporting errors instead of aborting. But
if you load an image at a specific size, we also execute our
scaling code, which was not careful enough about overflow in some
places.
This commit makes the scaling code silently return if it fails to
allocate filter tables. This is the best we can do, since
gdk_pixbuf_scale() is not taking a GError.
https://bugzilla.gnome.org/show_bug.cgi?id=752297 |
static void toneport_startup(struct usb_line6 *line6)
{
line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true);
} | 0 | [
"CWE-476"
]
| linux | 0b074ab7fc0d575247b9cc9f93bb7e007ca38840 | 160,676,632,435,373,320,000,000,000,000,000,000,000 | 4 | ALSA: line6: Assure canceling delayed work at disconnection
The current code performs the cancel of a delayed work at the late
stage of disconnection procedure, which may lead to the access to the
already cleared state.
This patch assures to call cancel_delayed_work_sync() at the beginning
of the disconnection procedure for avoiding that race. The delayed
work object is now assigned in the common line6 object instead of its
derivative, so that we can call cancel_delayed_work_sync().
Along with the change, the startup function is called via the new
callback instead. This will make it easier to port other LINE6
drivers to use the delayed work for startup in later patches.
Reported-by: [email protected]
Fixes: 7f84ff68be05 ("ALSA: line6: toneport: Fix broken usage of timer for delayed execution")
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
static char *jas_iccsigtostr(int sig, char *buf)
{
int n;
int c;
char *bufptr;
bufptr = buf;
for (n = 4; n > 0; --n) {
c = (sig >> 24) & 0xff;
if (isalpha(c) || isdigit(c)) {
*bufptr++ = c;
}
sig <<= 8;
}
*bufptr = '\0';
return buf;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 255,751,305,036,135,070,000,000,000,000,000,000,000 | 16 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
expand_words_no_vars (list)
WORD_LIST *list;
{
return (expand_word_list_internal (list, WEXP_NOVARS));
} | 0 | []
| bash | 955543877583837c85470f7fb8a97b7aa8d45e6c | 222,681,430,275,625,200,000,000,000,000,000,000,000 | 5 | bash-4.4-rc2 release |
void endhostent_unlocked(void)
{
if (hostp) {
config_close(hostp);
hostp = NULL;
}
host_stayopen = 0;
} | 0 | [
"CWE-79"
]
| uclibc-ng | 0f822af0445e5348ce7b7bd8ce1204244f31d174 | 67,856,376,921,742,150,000,000,000,000,000,000,000 | 8 | libc/inet/resolv.c: add __hnbad to check DNS entries for validity…
… using the same rules glibc does
also call __hnbad in some places to check answers |
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
} | 0 | [
"CWE-119",
"CWE-787"
]
| ImageMagick | aecd0ada163a4d6c769cec178955d5f3e9316f2f | 338,068,170,524,211,470,000,000,000,000,000,000,000 | 17 | Set pixel cache to undefined if any resource limit is exceeded |
void serveloop(GArray* servers) {
struct sockaddr_storage addrin;
socklen_t addrinlen=sizeof(addrin);
int i;
int max;
fd_set mset;
fd_set rset;
/*
* Set up the master fd_set. The set of descriptors we need
* to select() for never changes anyway and it buys us a *lot*
* of time to only build this once. However, if we ever choose
* to not fork() for clients anymore, we may have to revisit
* this.
*/
max=0;
FD_ZERO(&mset);
for(i=0;i<servers->len;i++) {
int sock;
if((sock=(g_array_index(servers, SERVER, i)).socket) >= 0) {
FD_SET(sock, &mset);
max=sock>max?sock:max;
}
}
for(i=0;i<modernsocks->len;i++) {
int sock = g_array_index(modernsocks, int, i);
FD_SET(sock, &mset);
max=sock>max?sock:max;
}
for(;;) {
/* SIGHUP causes the root server process to reconfigure
* itself and add new export servers for each newly
* found export configuration group, i.e. spawn new
* server processes for each previously non-existent
* export. This does not alter old runtime configuration
* but just appends new exports. */
if (is_sighup_caught) {
int n;
GError *gerror = NULL;
msg(LOG_INFO, "reconfiguration request received");
is_sighup_caught = 0; /* Reset to allow catching
* it again. */
n = append_new_servers(servers, &gerror);
if (n == -1)
msg(LOG_ERR, "failed to append new servers: %s",
gerror->message);
for (i = servers->len - n; i < servers->len; ++i) {
const SERVER server = g_array_index(servers,
SERVER, i);
if (server.socket >= 0) {
FD_SET(server.socket, &mset);
max = server.socket > max ? server.socket : max;
}
msg(LOG_INFO, "reconfigured new server: %s",
server.servename);
}
}
memcpy(&rset, &mset, sizeof(fd_set));
if(select(max+1, &rset, NULL, NULL, NULL)>0) {
DEBUG("accept, ");
for(i=0; i < modernsocks->len; i++) {
int sock = g_array_index(modernsocks, int, i);
if(!FD_ISSET(sock, &rset)) {
continue;
}
handle_modern_connection(servers, sock);
}
for(i=0; i < servers->len; i++) {
int net;
SERVER *serve;
serve=&(g_array_index(servers, SERVER, i));
if(serve->socket < 0) {
continue;
}
if(FD_ISSET(serve->socket, &rset)) {
if ((net=accept(serve->socket, (struct sockaddr *) &addrin, &addrinlen)) < 0) {
err_nonfatal("accept: %m");
continue;
}
handle_connection(servers, net, serve, NULL);
}
}
}
}
} | 0 | [
"CWE-399",
"CWE-310"
]
| nbd | 741495cb08503fd32a9d22648e63b64390c601f4 | 281,376,937,554,766,440,000,000,000,000,000,000,000 | 94 | nbd-server: handle modern-style negotiation in a child process
Previously, the modern style negotiation was carried out in the root
server (listener) process before forking the actual client handler. This
made it possible for a malfunctioning or evil client to terminate the
root process simply by querying a non-existent export or aborting in the
middle of the negotation process (caused SIGPIPE in the server).
This commit moves the negotiation process to the child to keep the root
process up and running no matter what happens during the negotiation.
See http://sourceforge.net/mailarchive/message.php?msg_id=30410146
Signed-off-by: Tuomas Räsänen <[email protected]> |
static int bio_zlib_read(BIO *b, char *out, int outl)
{
BIO_ZLIB_CTX *ctx;
int ret;
z_stream *zin;
if(!out || !outl) return 0;
ctx = (BIO_ZLIB_CTX *)b->ptr;
zin = &ctx->zin;
BIO_clear_retry_flags(b);
if(!ctx->ibuf)
{
ctx->ibuf = OPENSSL_malloc(ctx->ibufsize);
if(!ctx->ibuf)
{
COMPerr(COMP_F_BIO_ZLIB_READ, ERR_R_MALLOC_FAILURE);
return 0;
}
inflateInit(zin);
zin->next_in = ctx->ibuf;
zin->avail_in = 0;
}
/* Copy output data directly to supplied buffer */
zin->next_out = (unsigned char *)out;
zin->avail_out = (unsigned int)outl;
for(;;)
{
/* Decompress while data available */
while(zin->avail_in)
{
ret = inflate(zin, 0);
if((ret != Z_OK) && (ret != Z_STREAM_END))
{
COMPerr(COMP_F_BIO_ZLIB_READ,
COMP_R_ZLIB_INFLATE_ERROR);
ERR_add_error_data(2, "zlib error:",
zError(ret));
return 0;
}
/* If EOF or we've read everything then return */
if((ret == Z_STREAM_END) || !zin->avail_out)
return outl - zin->avail_out;
}
/* No data in input buffer try to read some in,
* if an error then return the total data read.
*/
ret = BIO_read(b->next_bio, ctx->ibuf, ctx->ibufsize);
if(ret <= 0)
{
/* Total data read */
int tot = outl - zin->avail_out;
BIO_copy_next_retry(b);
if(ret < 0) return (tot > 0) ? tot : ret;
return tot;
}
zin->avail_in = ret;
zin->next_in = ctx->ibuf;
}
} | 0 | [
"CWE-399"
]
| openssl | 1b31b5ad560b16e2fe1cad54a755e3e6b5e778a3 | 149,119,966,792,032,790,000,000,000,000,000,000,000 | 60 | Modify compression code so it avoids using ex_data free functions. This
stops applications that call CRYPTO_free_all_ex_data() prematurely leaking
memory. |
xmlBufferCreateStatic(void *mem, size_t size) {
xmlBufferPtr ret;
if ((mem == NULL) || (size == 0))
return(NULL);
if (size > UINT_MAX)
return(NULL);
ret = (xmlBufferPtr) xmlMalloc(sizeof(xmlBuffer));
if (ret == NULL) {
xmlTreeErrMemory("creating buffer");
return(NULL);
}
ret->use = size;
ret->size = size;
ret->alloc = XML_BUFFER_ALLOC_IMMUTABLE;
ret->content = (xmlChar *) mem;
return(ret);
} | 0 | [
"CWE-190"
]
| libxml2 | 6c283d83eccd940bcde15634ac8c7f100e3caefd | 240,650,624,192,853,930,000,000,000,000,000,000,000 | 19 | [CVE-2022-29824] Fix integer overflows in xmlBuf and xmlBuffer
In several places, the code handling string buffers didn't check for
integer overflow or used wrong types for buffer sizes. This could
result in out-of-bounds writes or other memory errors when working on
large, multi-gigabyte buffers.
Thanks to Felix Wilhelm for the report. |
int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
int err = 0;
size_t target, copied = 0;
long timeo;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
msg->msg_namelen = 0;
BT_DBG("sk %p size %zu", sk, size);
lock_sock(sk);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb;
int chunk;
skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
err = sock_error(sk);
if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
break;
timeo = bt_sock_data_wait(sk, timeo);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
continue;
}
chunk = min_t(unsigned int, skb->len, size);
if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
int skb_len = skb_headlen(skb);
if (chunk <= skb_len) {
__skb_pull(skb, chunk);
} else {
struct sk_buff *frag;
__skb_pull(skb, skb_len);
chunk -= skb_len;
skb_walk_frags(skb, frag) {
if (chunk <= frag->len) {
/* Pulling partial data */
skb->len -= chunk;
skb->data_len -= chunk;
__skb_pull(frag, chunk);
break;
} else if (frag->len) {
/* Pulling all frag data */
chunk -= frag->len;
skb->len -= frag->len;
skb->data_len -= frag->len;
__skb_pull(frag, frag->len);
}
}
}
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
release_sock(sk);
return copied ? : err;
} | 1 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 233,543,333,707,926,600,000,000,000,000,000,000,000 | 105 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void usb_ehci_finalize(EHCIState *s)
{
usb_packet_cleanup(&s->ipacket);
} | 0 | [
"CWE-772",
"CWE-401"
]
| qemu | d710e1e7bd3d5bfc26b631f02ae87901ebe646b0 | 234,747,778,953,437,900,000,000,000,000,000,000,000 | 4 | usb: ehci: fix memory leak in ehci
In usb_ehci_init function, it initializes 's->ipacket', but there
is no corresponding function to free this. As the ehci can be hotplug
and unplug, this will leak host memory leak. In order to make the
hierarchy clean, we should add a ehci pci finalize function, then call
the clean function in ehci device.
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
{
int rv;
bmc->dyn_id_set = 2;
intf->null_user_handler = bmc_device_id_handler;
rv = send_get_device_id_cmd(intf);
if (rv)
return rv;
wait_event(intf->waitq, bmc->dyn_id_set != 2);
if (!bmc->dyn_id_set)
rv = -EIO; /* Something went wrong in the fetch. */
/* dyn_id_set makes the id data available. */
smp_rmb();
intf->null_user_handler = NULL;
return rv;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | 77f8269606bf95fcb232ee86f6da80886f1dfae8 | 139,153,979,273,964,550,000,000,000,000,000,000,000 | 24 | ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: [email protected] # 4.18
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]> |
AP_DECLARE(int) ap_update_child_status_from_conn(ap_sb_handle_t *sbh, int status,
conn_rec *c)
{
if (!sbh || (sbh->child_num < 0))
return -1;
return update_child_status_internal(sbh->child_num, sbh->thread_num,
status, c, NULL, NULL, NULL);
} | 0 | [
"CWE-476"
]
| httpd | fa7b2a5250e54363b3a6c8ac3aaa7de4e8da9b2e | 291,574,447,097,314,420,000,000,000,000,000,000,000 | 9 | Merge r1878092 from trunk:
Fix a NULL pointer dereference
* server/scoreboard.c (ap_increment_counts): In certain cases like certain
invalid requests r->method might be NULL here. r->method_number defaults
to M_GET and hence is M_GET in these cases.
Submitted by: rpluem
Reviewed by: covener, ylavic, jfclere
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1893051 13f79535-47bb-0310-9956-ffa450edef68 |
connection_need_new_password(const Connection *conn, const Operation *op, Slapi_PBlock *pb)
{
int r = 0;
/*
* add tag != LDAP_REQ_SEARCH to allow admin server 3.5 to do
* searches when the user needs to reset
* the pw the first time logon.
* LP: 22 Dec 2000: Removing LDAP_REQ_SEARCH. It's very unlikely that AS 3.5 will
* be used to manage DS5.0
*/
if (conn->c_needpw && op->o_tag != LDAP_REQ_MODIFY &&
op->o_tag != LDAP_REQ_BIND && op->o_tag != LDAP_REQ_UNBIND &&
op->o_tag != LDAP_REQ_ABANDON && op->o_tag != LDAP_REQ_EXTENDED) {
slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0);
slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d %s\n",
conn->c_connid, op->o_opid,
"UNPROCESSED OPERATION - need new password");
send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM,
NULL, NULL, 0, NULL);
r = 1;
}
return r;
} | 0 | [
"CWE-415"
]
| 389-ds-base | a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b | 282,277,275,375,013,650,000,000,000,000,000,000,000 | 24 | Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <[email protected]> |
static PHP_MINIT_FUNCTION(zip)
{
zend_class_entry ce;
memcpy(&zip_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
zip_object_handlers.offset = XtOffsetOf(ze_zip_object, zo);
zip_object_handlers.free_obj = php_zip_object_free_storage;
zip_object_handlers.clone_obj = NULL;
zip_object_handlers.get_property_ptr_ptr = php_zip_get_property_ptr_ptr;
zip_object_handlers.get_properties = php_zip_get_properties;
zip_object_handlers.read_property = php_zip_read_property;
zip_object_handlers.has_property = php_zip_has_property;
INIT_CLASS_ENTRY(ce, "ZipArchive", zip_class_functions);
ce.create_object = php_zip_object_new;
zip_class_entry = zend_register_internal_class(&ce);
zend_hash_init(&zip_prop_handlers, 0, NULL, php_zip_free_prop_handler, 1);
php_zip_register_prop_handler(&zip_prop_handlers, "status", php_zip_status, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "statusSys", php_zip_status_sys, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "numFiles", php_zip_get_num_files, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "filename", NULL, NULL, php_zipobj_get_filename, IS_STRING);
php_zip_register_prop_handler(&zip_prop_handlers, "comment", NULL, php_zipobj_get_zip_comment, NULL, IS_STRING);
REGISTER_ZIP_CLASS_CONST_LONG("CREATE", ZIP_CREATE);
REGISTER_ZIP_CLASS_CONST_LONG("EXCL", ZIP_EXCL);
REGISTER_ZIP_CLASS_CONST_LONG("CHECKCONS", ZIP_CHECKCONS);
REGISTER_ZIP_CLASS_CONST_LONG("OVERWRITE", ZIP_OVERWRITE);
REGISTER_ZIP_CLASS_CONST_LONG("FL_NOCASE", ZIP_FL_NOCASE);
REGISTER_ZIP_CLASS_CONST_LONG("FL_NODIR", ZIP_FL_NODIR);
REGISTER_ZIP_CLASS_CONST_LONG("FL_COMPRESSED", ZIP_FL_COMPRESSED);
REGISTER_ZIP_CLASS_CONST_LONG("FL_UNCHANGED", ZIP_FL_UNCHANGED);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFAULT", ZIP_CM_DEFAULT);
REGISTER_ZIP_CLASS_CONST_LONG("CM_STORE", ZIP_CM_STORE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_SHRINK", ZIP_CM_SHRINK);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_1", ZIP_CM_REDUCE_1);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_2", ZIP_CM_REDUCE_2);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_3", ZIP_CM_REDUCE_3);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_4", ZIP_CM_REDUCE_4);
REGISTER_ZIP_CLASS_CONST_LONG("CM_IMPLODE", ZIP_CM_IMPLODE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE", ZIP_CM_DEFLATE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE64", ZIP_CM_DEFLATE64);
REGISTER_ZIP_CLASS_CONST_LONG("CM_PKWARE_IMPLODE", ZIP_CM_PKWARE_IMPLODE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_BZIP2", ZIP_CM_BZIP2);
REGISTER_ZIP_CLASS_CONST_LONG("CM_LZMA", ZIP_CM_LZMA);
REGISTER_ZIP_CLASS_CONST_LONG("CM_TERSE", ZIP_CM_TERSE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_LZ77", ZIP_CM_LZ77);
REGISTER_ZIP_CLASS_CONST_LONG("CM_WAVPACK", ZIP_CM_WAVPACK);
REGISTER_ZIP_CLASS_CONST_LONG("CM_PPMD", ZIP_CM_PPMD);
/* Error code */
REGISTER_ZIP_CLASS_CONST_LONG("ER_OK", ZIP_ER_OK); /* N No error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_MULTIDISK", ZIP_ER_MULTIDISK); /* N Multi-disk zip archives not supported */
REGISTER_ZIP_CLASS_CONST_LONG("ER_RENAME", ZIP_ER_RENAME); /* S Renaming temporary file failed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CLOSE", ZIP_ER_CLOSE); /* S Closing zip archive failed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_SEEK", ZIP_ER_SEEK); /* S Seek error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_READ", ZIP_ER_READ); /* S Read error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_WRITE", ZIP_ER_WRITE); /* S Write error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CRC", ZIP_ER_CRC); /* N CRC error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_ZIPCLOSED", ZIP_ER_ZIPCLOSED); /* N Containing zip archive was closed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_NOENT", ZIP_ER_NOENT); /* N No such file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_EXISTS", ZIP_ER_EXISTS); /* N File already exists */
REGISTER_ZIP_CLASS_CONST_LONG("ER_OPEN", ZIP_ER_OPEN); /* S Can't open file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_TMPOPEN", ZIP_ER_TMPOPEN); /* S Failure to create temporary file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_ZLIB", ZIP_ER_ZLIB); /* Z Zlib error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_MEMORY", ZIP_ER_MEMORY); /* N Malloc failure */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CHANGED", ZIP_ER_CHANGED); /* N Entry has been changed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_COMPNOTSUPP", ZIP_ER_COMPNOTSUPP);/* N Compression method not supported */
REGISTER_ZIP_CLASS_CONST_LONG("ER_EOF", ZIP_ER_EOF); /* N Premature EOF */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INVAL", ZIP_ER_INVAL); /* N Invalid argument */
REGISTER_ZIP_CLASS_CONST_LONG("ER_NOZIP", ZIP_ER_NOZIP); /* N Not a zip archive */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INTERNAL", ZIP_ER_INTERNAL); /* N Internal error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INCONS", ZIP_ER_INCONS); /* N Zip archive inconsistent */
REGISTER_ZIP_CLASS_CONST_LONG("ER_REMOVE", ZIP_ER_REMOVE); /* S Can't remove file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_DELETED", ZIP_ER_DELETED); /* N Entry has been deleted */
#ifdef ZIP_OPSYS_DEFAULT
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_DOS", ZIP_OPSYS_DOS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_AMIGA", ZIP_OPSYS_AMIGA);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OPENVMS", ZIP_OPSYS_OPENVMS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_UNIX", ZIP_OPSYS_UNIX);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VM_CMS", ZIP_OPSYS_VM_CMS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ATARI_ST", ZIP_OPSYS_ATARI_ST);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_2", ZIP_OPSYS_OS_2);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_MACINTOSH", ZIP_OPSYS_MACINTOSH);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_Z_SYSTEM", ZIP_OPSYS_Z_SYSTEM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_Z_CPM", ZIP_OPSYS_CPM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_WINDOWS_NTFS", ZIP_OPSYS_WINDOWS_NTFS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_MVS", ZIP_OPSYS_MVS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VSE", ZIP_OPSYS_VSE);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ACORN_RISC", ZIP_OPSYS_ACORN_RISC);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VFAT", ZIP_OPSYS_VFAT);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ALTERNATE_MVS", ZIP_OPSYS_ALTERNATE_MVS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_BEOS", ZIP_OPSYS_BEOS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_TANDEM", ZIP_OPSYS_TANDEM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_400", ZIP_OPSYS_OS_400);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_X", ZIP_OPSYS_OS_X);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_DEFAULT", ZIP_OPSYS_DEFAULT);
#endif /* ifdef ZIP_OPSYS_DEFAULT */
php_register_url_stream_wrapper("zip", &php_stream_zip_wrapper);
le_zip_dir = zend_register_list_destructors_ex(php_zip_free_dir, NULL, le_zip_dir_name, module_number);
le_zip_entry = zend_register_list_destructors_ex(php_zip_free_entry, NULL, le_zip_entry_name, module_number);
return SUCCESS;
} | 0 | [
"CWE-190"
]
| php-src | 3b8d4de300854b3517c7acb239b84f7726c1353c | 314,560,657,332,368,670,000,000,000,000,000,000,000 | 110 | Fix bug #71923 - integer overflow in ZipArchive::getFrom* |
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_is_visible_memslot(memslot);
} | 0 | [
"CWE-401"
]
| linux | f65886606c2d3b562716de030706dfe1bea4ed5e | 22,331,803,368,630,735,000,000,000,000,000,000,000 | 6 | KVM: fix memory leak in kvm_io_bus_unregister_dev()
when kmalloc() fails in kvm_io_bus_unregister_dev(), before removing
the bus, we should iterate over all other devices linked to it and call
kvm_iodevice_destructor() for them
Fixes: 90db10434b16 ("KVM: kvm_io_bus_unregister_dev() should never fail")
Cc: [email protected]
Reported-and-tested-by: [email protected]
Link: https://syzkaller.appspot.com/bug?extid=f196caa45793d6374707
Signed-off-by: Rustam Kovhaev <[email protected]>
Reviewed-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
future<std::unique_ptr<cql_server::response>> cql_server::connection::process_options(uint16_t stream, request_reader in, service::client_state& client_state,
tracing::trace_state_ptr trace_state) {
++_server._stats.options_requests;
return make_ready_future<std::unique_ptr<cql_server::response>>(make_supported(stream, std::move(trace_state)));
} | 0 | []
| scylladb | 1c2eef384da439b0457b6d71c7e37d7268e471cb | 15,659,950,096,250,288,000,000,000,000,000,000,000 | 5 | transport/server.cc: Return correct size of decompressed lz4 buffer
An incorrect size is returned from the function, which could lead to
crashes or undefined behavior. Fix by erroring out in these cases.
Fixes #11476 |
static void muladd(uECC_word_t a,
uECC_word_t b,
uECC_word_t *r0,
uECC_word_t *r1,
uECC_word_t *r2) {
#if uECC_WORD_SIZE == 8 && !SUPPORTS_INT128
uint64_t a0 = a & 0xffffffffull;
uint64_t a1 = a >> 32;
uint64_t b0 = b & 0xffffffffull;
uint64_t b1 = b >> 32;
uint64_t i0 = a0 * b0;
uint64_t i1 = a0 * b1;
uint64_t i2 = a1 * b0;
uint64_t i3 = a1 * b1;
uint64_t p0, p1;
i2 += (i0 >> 32);
i2 += i1;
if (i2 < i1) { /* overflow */
i3 += 0x100000000ull;
}
p0 = (i0 & 0xffffffffull) | (i2 << 32);
p1 = i3 + (i2 >> 32);
*r0 += p0;
*r1 += (p1 + (*r0 < p0));
*r2 += ((*r1 < p1) || (*r1 == p1 && *r0 < p0));
#else
uECC_dword_t p = (uECC_dword_t)a * b;
uECC_dword_t r01 = ((uECC_dword_t)(*r1) << uECC_WORD_BITS) | *r0;
r01 += p;
*r2 += (r01 < p);
*r1 = r01 >> uECC_WORD_BITS;
*r0 = (uECC_word_t)r01;
#endif
} | 0 | [
"CWE-415"
]
| micro-ecc | 1b5f5cea5145c96dd8791b9b2c41424fc74c2172 | 117,370,178,775,554,800,000,000,000,000,000,000,000 | 39 | Fix for #168 |
static void maybe_wait_bpf_programs(struct bpf_map *map)
{
/* Wait for any running BPF programs to complete so that
* userspace, when we return to it, knows that all programs
* that could be running use the new map value.
*/
if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
synchronize_rcu();
} | 0 | [
"CWE-307"
]
| linux | 350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef | 74,488,173,925,722,530,000,000,000,000,000,000,000 | 10 | bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: [email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected] |
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_writeargs *args)
{
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
struct kvec *head = rqstp->rq_arg.head;
struct kvec *tail = rqstp->rq_arg.tail;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p = xdr_decode_hyper(p, &args->offset);
args->count = ntohl(*p++);
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
/*
* The count must equal the amount of data passed.
*/
if (args->count != args->len)
return 0;
/*
* Check to make sure that we got the right number of
* bytes.
*/
hdr = (void*)p - head->iov_base;
dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
* against the length which was actually received.
* Note that when RPCSEC/GSS (for example) is used, the
* data buffer can be padded so dlen might be larger
* than required. It must never be smaller.
*/
if (dlen < XDR_QUADLEN(len)*4)
return 0;
if (args->count > max_blocksize) {
args->count = max_blocksize;
len = args->len = max_blocksize;
}
rqstp->rq_vec[0].iov_base = (void*)p;
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
v++;
rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
rqstp->rq_vec[v].iov_len = PAGE_SIZE;
}
rqstp->rq_vec[v].iov_len = len;
args->vlen = v + 1;
return 1;
} | 1 | [
"CWE-119",
"CWE-703"
]
| linux | 13bf9fbff0e5e099e2b6f003a0ab8ae145436309 | 208,457,650,079,160,100,000,000,000,000,000,000,000 | 56 | nfsd: stricter decoding of write-like NFSv2/v3 ops
The NFSv2/v3 code does not systematically check whether we decode past
the end of the buffer. This generally appears to be harmless, but there
are a few places where we do arithmetic on the pointers involved and
don't account for the possibility that a length could be negative. Add
checks to catch these.
Reported-by: Tuomas Haanpää <[email protected]>
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: NeilBrown <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]> |
Get the unique message id associated with a standard sequential message number */
PHP_FUNCTION(imap_uid)
{
zval *streamind;
zend_long msgno;
pils *imap_le_struct;
int msgindex;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &streamind, &msgno) == FAILURE) {
return;
}
if ((imap_le_struct = (pils *)zend_fetch_resource(Z_RES_P(streamind), "imap", le_imap)) == NULL) {
RETURN_FALSE;
}
msgindex = msgno;
if ((msgindex < 1) || ((unsigned) msgindex > imap_le_struct->imap_stream->nmsgs)) {
php_error_docref(NULL, E_WARNING, "Bad message number");
RETURN_FALSE;
}
RETURN_LONG(mail_uid(imap_le_struct->imap_stream, msgno)); | 0 | [
"CWE-88"
]
| php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 110,050,171,788,900,350,000,000,000,000,000,000,000 | 23 | Disable rsh/ssh functionality in imap by default (bug #77153) |
const char *configEnumGetNameOrUnknown(configEnum *ce, int val) {
const char *name = configEnumGetName(ce,val);
return name ? name : "unknown";
} | 0 | [
"CWE-119",
"CWE-787"
]
| redis | 6d9f8e2462fc2c426d48c941edeb78e5df7d2977 | 107,298,634,792,813,560,000,000,000,000,000,000,000 | 4 | Security: CONFIG SET client-output-buffer-limit overflow fixed.
This commit fixes a vunlerability reported by Cory Duplantis
of Cisco Talos, see TALOS-2016-0206 for reference.
CONFIG SET client-output-buffer-limit accepts as client class "master"
which is actually only used to implement CLIENT KILL. The "master" class
has ID 3. What happens is that the global structure:
server.client_obuf_limits[class]
Is accessed with class = 3. However it is a 3 elements array, so writing
the 4th element means to write up to 24 bytes of memory *after* the end
of the array, since the structure is defined as:
typedef struct clientBufferLimitsConfig {
unsigned long long hard_limit_bytes;
unsigned long long soft_limit_bytes;
time_t soft_limit_seconds;
} clientBufferLimitsConfig;
EVALUATION OF IMPACT:
Checking what's past the boundaries of the array in the global
'server' structure, we find AOF state fields:
clientBufferLimitsConfig client_obuf_limits[CLIENT_TYPE_OBUF_COUNT];
/* AOF persistence */
int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */
int aof_fsync; /* Kind of fsync() policy */
char *aof_filename; /* Name of the AOF file */
int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */
int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */
off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */
off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */
off_t aof_current_size; /* AOF current size. */
Writing to most of these fields should be harmless and only cause problems in
Redis persistence that should not escalate to security problems.
However unfortunately writing to "aof_filename" could be potentially a
security issue depending on the access pattern.
Searching for "aof.filename" accesses in the source code returns many different
usages of the field, including using it as input for open(), logging to the
Redis log file or syslog, and calling the rename() syscall.
It looks possible that attacks could lead at least to informations
disclosure of the state and data inside Redis. However note that the
attacker must already have access to the server. But, worse than that,
it looks possible that being able to change the AOF filename can be used
to mount more powerful attacks: like overwriting random files with AOF
data (easily a potential security issue as demostrated here:
http://antirez.com/news/96), or even more subtle attacks where the
AOF filename is changed to a path were a malicious AOF file is loaded
in order to exploit other potential issues when the AOF parser is fed
with untrusted input (no known issue known currently).
The fix checks the places where the 'master' class is specifiedf in
order to access configuration data structures, and return an error in
this cases.
WHO IS AT RISK?
The "master" client class was introduced in Redis in Jul 28 2015.
Every Redis instance released past this date is not vulnerable
while all the releases after this date are. Notably:
Redis 3.0.x is NOT vunlerable.
Redis 3.2.x IS vulnerable.
Redis unstable is vulnerable.
In order for the instance to be at risk, at least one of the following
conditions must be true:
1. The attacker can access Redis remotely and is able to send
the CONFIG SET command (often banned in managed Redis instances).
2. The attacker is able to control the "redis.conf" file and
can wait or trigger a server restart.
The problem was fixed 26th September 2016 in all the releases affected. |
static inline int get_archive_read(struct archive* a,
struct archive_read** ar)
{
*ar = (struct archive_read*) a;
archive_check_magic(a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
"archive_read_support_format_rar5");
return ARCHIVE_OK;
} | 0 | [
"CWE-20",
"CWE-125"
]
| libarchive | 94821008d6eea81e315c5881cdf739202961040a | 259,140,978,845,878,540,000,000,000,000,000,000,000 | 9 | RAR5 reader: reject files that declare invalid header flags
One of the fields in RAR5's base block structure is the size of the
header. Some invalid files declare a 0 header size setting, which can
confuse the unpacker. Minimum header size for RAR5 base blocks is 7
bytes (4 bytes for CRC, and 3 bytes for the rest), so block size of 0
bytes should be rejected at header parsing stage.
The fix adds an error condition if header size of 0 bytes is detected.
In this case, the unpacker will not attempt to unpack the file, as the
header is corrupted.
The commit also adds OSSFuzz #20459 sample to test further regressions
in this area. |
static void ssd0323_save(QEMUFile *f, void *opaque)
{
SSISlave *ss = SSI_SLAVE(opaque);
ssd0323_state *s = (ssd0323_state *)opaque;
int i;
qemu_put_be32(f, s->cmd_len);
qemu_put_be32(f, s->cmd);
for (i = 0; i < 8; i++)
qemu_put_be32(f, s->cmd_data[i]);
qemu_put_be32(f, s->row);
qemu_put_be32(f, s->row_start);
qemu_put_be32(f, s->row_end);
qemu_put_be32(f, s->col);
qemu_put_be32(f, s->col_start);
qemu_put_be32(f, s->col_end);
qemu_put_be32(f, s->redraw);
qemu_put_be32(f, s->remap);
qemu_put_be32(f, s->mode);
qemu_put_buffer(f, s->framebuffer, sizeof(s->framebuffer));
qemu_put_be32(f, ss->cs);
} | 0 | [
"CWE-119"
]
| qemu | ead7a57df37d2187813a121308213f41591bd811 | 136,410,011,994,961,070,000,000,000,000,000,000,000 | 23 | ssd0323: fix buffer overun on invalid state load
CVE-2013-4538
s->cmd_len used as index in ssd0323_transfer() to store 32-bit field.
Possible this field might then be supplied by guest to overwrite a
return addr somewhere. Same for row/col fields, which are indicies into
framebuffer array.
To fix validate after load.
Additionally, validate that the row/col_start/end are within bounds;
otherwise the guest can provoke an overrun by either setting the _end
field so large that the row++ increments just walk off the end of the
array, or by setting the _start value to something bogus and then
letting the "we hit end of row" logic reset row to row_start.
For completeness, validate mode as well.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Peter Maydell <[email protected]>
Signed-off-by: Juan Quintela <[email protected]> |
GF_Err gnra_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
//careful we are not writing the box type but the entry type so switch for write
ptr->type = ptr->EntryType;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
ptr->type = GF_ISOM_BOX_TYPE_GNRA;
gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox *)ptr, bs);
if (ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->data_size);
}
return GF_OK;
} | 0 | [
"CWE-476",
"CWE-787"
]
| gpac | b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8 | 114,412,391,724,978,940,000,000,000,000,000,000,000 | 17 | fixed #1757 |
static int vhost_user_set_vring_err(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
int expected_fds;
expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
if (validate_msg_fds(dev, ctx, expected_fds) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
close(ctx->fds[0]);
VHOST_LOG_CONFIG(INFO, "(%s) not implemented\n", dev->ifname);
return RTE_VHOST_MSG_RESULT_OK;
} | 0 | [
"CWE-125",
"CWE-787"
]
| dpdk | 6442c329b9d2ded0f44b27d2016aaba8ba5844c5 | 43,609,337,965,956,590,000,000,000,000,000,000,000 | 17 | vhost: fix queue number check when setting inflight FD
In function vhost_user_set_inflight_fd, queue number in inflight
message is used to access virtqueue. However, queue number could
be larger than VHOST_MAX_VRING and cause write OOB as this number
will be used to write inflight info in virtqueue structure. This
patch checks the queue number to avoid the issue and also make
sure virtqueues are allocated before setting inflight information.
Fixes: ad0a4ae491fe ("vhost: checkout resubmit inflight information")
Cc: [email protected]
Reported-by: Wenxiang Qian <[email protected]>
Signed-off-by: Chenbo Xia <[email protected]>
Reviewed-by: Maxime Coquelin <[email protected]> |
int udev_monitor_send_device(struct udev_monitor *udev_monitor, struct udev_device *udev_device)
{
const char *buf;
ssize_t len;
ssize_t count;
len = udev_device_get_properties_monitor_buf(udev_device, &buf);
if (len < 32)
return -1;
if (udev_monitor->sun.sun_family != 0)
count = sendto(udev_monitor->sock,
buf, len, 0,
(struct sockaddr *)&udev_monitor->sun,
udev_monitor->addrlen);
else if (udev_monitor->snl.nl_family != 0)
/* no destination besides the muticast group, we will always get ECONNREFUSED */
count = sendto(udev_monitor->sock,
buf, len, 0,
(struct sockaddr *)&udev_monitor->snl_peer,
sizeof(struct sockaddr_nl));
else
return -1;
info(udev_monitor->udev, "passed %zi bytes to monitor %p, \n", count, udev_monitor);
return count;
} | 1 | [
"CWE-346"
]
| udev | e86a923d508c2aed371cdd958ce82489cf2ab615 | 82,325,051,517,463,695,000,000,000,000,000,000,000 | 26 | libudev: monitor - ignore messages from unusual sources
For added protection, ignore any unicast message received on the
netlink socket or any multicast message on the kernel group not
received from the kernel.
Signed-off-by: Scott James Remnant <[email protected]> |
QUtil::strcasecmp(char const *s1, char const *s2)
{
#if defined(_WIN32) && defined(__BORLANDC__)
return stricmp(s1, s2);
#elif defined(_WIN32)
return _stricmp(s1, s2);
#else
return ::strcasecmp(s1, s2);
#endif
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 257,095,613,278,587,700,000,000,000,000,000,000,000 | 10 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static UINT drdynvc_process_create_request(drdynvcPlugin* drdynvc, int Sp,
int cbChId, wStream* s)
{
size_t pos;
UINT status;
UINT32 ChannelId;
wStream* data_out;
UINT channel_status;
char* name;
size_t length;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
if (drdynvc->state == DRDYNVC_STATE_CAPABILITIES)
{
/**
* For some reason the server does not always send the
* capabilities pdu as it should. When this happens,
* send a capabilities response.
*/
drdynvc->version = 3;
if ((status = drdynvc_send_capability_response(drdynvc)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "drdynvc_send_capability_response failed!");
return status;
}
drdynvc->state = DRDYNVC_STATE_READY;
}
if (Stream_GetRemainingLength(s) < drdynvc_cblen_to_bytes(cbChId))
return ERROR_INVALID_DATA;
ChannelId = drdynvc_read_variable_uint(s, cbChId);
pos = Stream_GetPosition(s);
name = Stream_Pointer(s);
length = Stream_GetRemainingLength(s);
if (strnlen(name, length) >= length)
return ERROR_INVALID_DATA;
WLog_Print(drdynvc->log, WLOG_DEBUG, "process_create_request: ChannelId=%"PRIu32" ChannelName=%s",
ChannelId, name);
channel_status = dvcman_create_channel(drdynvc, drdynvc->channel_mgr, ChannelId, name);
data_out = Stream_New(NULL, pos + 4);
if (!data_out)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_New failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT8(data_out, 0x10 | cbChId);
Stream_SetPosition(s, 1);
Stream_Copy(s, data_out, pos - 1);
if (channel_status == CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_DEBUG, "channel created");
Stream_Write_UINT32(data_out, 0);
}
else
{
WLog_Print(drdynvc->log, WLOG_DEBUG, "no listener");
Stream_Write_UINT32(data_out, (UINT32)0xC0000001); /* same code used by mstsc */
}
status = drdynvc_send(drdynvc, data_out);
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "VirtualChannelWriteEx failed with %s [%08"PRIX32"]",
WTSErrorToString(status), status);
return status;
}
if (channel_status == CHANNEL_RC_OK)
{
if ((status = dvcman_open_channel(drdynvc, drdynvc->channel_mgr, ChannelId)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_open_channel failed with error %"PRIu32"!", status);
return status;
}
}
else
{
if ((status = dvcman_close_channel(drdynvc->channel_mgr, ChannelId)))
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_close_channel failed with error %"PRIu32"!", status);
}
return status;
} | 0 | [
"CWE-125"
]
| FreeRDP | baee520e3dd9be6511c45a14c5f5e77784de1471 | 158,427,007,362,528,450,000,000,000,000,000,000,000 | 94 | Fix for #4866: Added additional length checks |
void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params,
struct elf_fdpic_params *interp_params,
unsigned long *start_stack,
unsigned long *start_brk)
{
*start_stack = 0x02200000UL;
/* if the only executable is a shared object, assume that it is an interpreter rather than
* a true executable, and map it such that "ld.so --list" comes out right
*/
if (!(interp_params->flags & ELF_FDPIC_FLAG_PRESENT) &&
exec_params->hdr.e_type != ET_EXEC
) {
exec_params->load_addr = PAGE_SIZE;
*start_brk = 0x80000000UL;
}
else {
exec_params->load_addr = 0x02200000UL;
if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) ==
ELF_FDPIC_FLAG_INDEPENDENT
) {
exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT;
exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP;
}
}
} /* end elf_fdpic_arch_lay_out_mm() */ | 0 | [
"CWE-119"
]
| linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 177,468,002,225,190,650,000,000,000,000,000,000,000 | 29 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
longlong Item_param::val_int()
{
// There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case REAL_VALUE:
return Converter_double_to_longlong(value.real, unsigned_flag).result();
case INT_VALUE:
return value.integer;
case DECIMAL_VALUE:
{
longlong i;
my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &i);
return i;
}
case STRING_VALUE:
case LONG_DATA_VALUE:
{
return longlong_from_string_with_check(&str_value);
}
case TIME_VALUE:
return (longlong) TIME_to_ulonglong(&value.time);
case IGNORE_VALUE:
case DEFAULT_VALUE:
invalid_default_param();
// fall through
case NULL_VALUE:
return 0;
case NO_VALUE:
DBUG_ASSERT(0); // Should not be possible
return 0;
}
DBUG_ASSERT(0); // Garbage
return 0;
} | 0 | [
"CWE-89"
]
| server | b5e16a6e0381b28b598da80b414168ce9a5016e5 | 290,169,238,944,268,130,000,000,000,000,000,000,000 | 34 | MDEV-26061 MariaDB server crash at Field::set_default
* Item_default_value::fix_fields creates a copy of its argument's field.
* Field::default_value is changed when its expression is prepared in
unpack_vcol_info_from_frm()
This means we must unpack any vcol expression that includes DEFAULT(x)
strictly after unpacking x->default_value.
To avoid building and solving this dependency graph on every table open,
we update Item_default_value::field->default_value after all vcols
are unpacked and fixed. |
relpTcpAcceptConnReqInitTLS(relpTcp_t *pThis, relpSrv_t *pSrv)
{
int r;
ENTER_RELPFUNC;
r = gnutls_init(&pThis->session, GNUTLS_SERVER);
if(chkGnutlsCode(pThis, "Failed to initialize GnuTLS", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
gnutls_session_set_ptr(pThis->session, pThis);
if(pSrv->pTcp->pristring != NULL)
pThis->pristring = strdup(pSrv->pTcp->pristring);
pThis->authmode = pSrv->pTcp->authmode;
pThis->pUsr = pSrv->pUsr;
CHKRet(relpTcpTLSSetPrio(pThis));
if(isAnonAuth(pSrv->pTcp)) {
r = gnutls_credentials_set(pThis->session, GNUTLS_CRD_ANON, pSrv->pTcp->anoncredSrv);
if(chkGnutlsCode(pThis, "Failed setting anonymous credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
} else { /* cert-based auth */
if(pSrv->pTcp->caCertFile == NULL) {
gnutls_certificate_send_x509_rdn_sequence(pThis->session, 0);
}
r = gnutls_credentials_set(pThis->session, GNUTLS_CRD_CERTIFICATE, pSrv->pTcp->xcred);
if(chkGnutlsCode(pThis, "Failed setting certificate credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
}
gnutls_dh_set_prime_bits(pThis->session, pThis->dhBits);
gnutls_certificate_server_set_request(pThis->session, GNUTLS_CERT_REQUEST);
gnutls_transport_set_ptr(pThis->session, (gnutls_transport_ptr_t) pThis->sock);
r = gnutls_handshake(pThis->session);
if(r == GNUTLS_E_INTERRUPTED || r == GNUTLS_E_AGAIN) {
pThis->pEngine->dbgprint("librelp: gnutls_handshake retry necessary (this is OK and expected)\n");
pThis->rtryOp = relpTCP_RETRY_handshake;
} else if(r != GNUTLS_E_SUCCESS) {
chkGnutlsCode(pThis, "TLS handshake failed", RELP_RET_ERR_TLS_HANDS, r);
ABORT_FINALIZE(RELP_RET_ERR_TLS_HANDS);
}
pThis->bTLSActive = 1;
finalize_it:
LEAVE_RELPFUNC;
} | 0 | [
"CWE-787"
]
| librelp | 2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf | 277,859,349,956,069,660,000,000,000,000,000,000,000 | 50 | unify error message generation |
vips_foreign_save_init( VipsForeignSave *save )
{
save->background = vips_array_double_newv( 1, 0.0 );
} | 0 | [
"CWE-362",
"CWE-476"
]
| libvips | 20d840e6da15c1574b3ed998bc92f91d1e36c2a5 | 197,143,850,438,244,920,000,000,000,000,000,000,000 | 4 | fix a crash with delayed load
If a delayed load failed, it could leave the pipeline only half-set up.
Sebsequent threads could then segv.
Set a load-has-failed flag and test before generate.
See https://github.com/jcupitt/libvips/issues/893 |
EmbFile *FileSpec::getEmbeddedFile()
{
if(!ok)
return nullptr;
if (embFile)
return embFile;
XRef *xref = fileSpec.getDict()->getXRef();
embFile = new EmbFile(fileStream.fetch(xref));
return embFile;
} | 0 | [
"CWE-20"
]
| poppler | de0c0b8324e776f0b851485e0fc9622fc35695b7 | 255,679,305,829,700,600,000,000,000,000,000,000,000 | 13 | FileSpec: Move the fileSpec.dictLookup call inside fileSpec.isDict if
Fixes #704 |
Nef_polyhedron_2& operator=(const Nef_polyhedron_2<T,Items,Mark>& N1)
{ Base::operator=(N1); return (*this); } | 0 | [
"CWE-269"
]
| cgal | 618b409b0fbcef7cb536a4134ae3a424ef5aae45 | 295,652,504,160,027,700,000,000,000,000,000,000,000 | 2 | Fix Nef_2 and Nef_S2 IO |
dwg_ref_tblname (const Dwg_Data *restrict dwg, Dwg_Object_Ref *restrict ref)
{
const char *restrict name = dwg_dynapi_handle_name (dwg, ref);
return name ? name : "";
} | 0 | [
"CWE-703",
"CWE-835"
]
| libredwg | c6f6668b82bfe595899cc820279ac37bb9ef16f5 | 23,454,424,407,769,333,000,000,000,000,000,000,000 | 5 | cleanup tio.unknown
not needed anymore, we only have UNKNOWN_OBJ or UNKNOWN_ENT with full common
entity_data.
Fixes GH #178 heap_overflow2 |
xsmp_cancel_end_session (GsmClient *client,
GError **error)
{
GsmXSMPClient *xsmp = (GsmXSMPClient *) client;
g_debug ("GsmXSMPClient: xsmp_cancel_end_session ('%s')", xsmp->priv->description);
if (xsmp->priv->conn == NULL) {
g_set_error (error,
GSM_CLIENT_ERROR,
GSM_CLIENT_ERROR_NOT_REGISTERED,
"Client is not registered");
return FALSE;
}
SmsShutdownCancelled (xsmp->priv->conn);
/* reset the state */
xsmp->priv->current_save_yourself = -1;
xsmp->priv->next_save_yourself = -1;
xsmp->priv->next_save_yourself_allow_interact = FALSE;
return TRUE;
} | 0 | [
"CWE-125",
"CWE-835"
]
| gnome-session | b0dc999e0b45355314616321dbb6cb71e729fc9d | 36,621,459,300,521,535,000,000,000,000,000,000,000 | 24 | [gsm] Delay the creation of the GsmXSMPClient until it really exists
We used to create the GsmXSMPClient before the XSMP connection is really
accepted. This can lead to some issues, though. An example is:
https://bugzilla.gnome.org/show_bug.cgi?id=598211#c19. Quoting:
"What is happening is that a new client (probably metacity in your
case) is opening an ICE connection in the GSM_MANAGER_PHASE_END_SESSION
phase, which causes a new GsmXSMPClient to be added to the client
store. The GSM_MANAGER_PHASE_EXIT phase then begins before the client
has had a chance to establish a xsmp connection, which means that
client->priv->conn will not be initialized at the point that xsmp_stop
is called on the new unregistered client."
The fix is to create the GsmXSMPClient object when there's a real XSMP
connection. This implies moving the timeout that makes sure we don't
have an empty client to the XSMP server.
https://bugzilla.gnome.org/show_bug.cgi?id=598211 |
longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
const char *end)
{
int err;
longlong tmp;
char *end_of_num= (char*) end;
tmp= (*(cs->cset->strtoll10))(cs, cptr, &end_of_num, &err);
/*
TODO: Give error if we wanted a signed integer and we got an unsigned
one
*/
if (!current_thd->no_errors &&
(err > 0 ||
(end != end_of_num && !check_if_only_end_space(cs, end_of_num, end))))
{
ErrConvString err(cptr, end - cptr, cs);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
err.ptr());
}
return tmp;
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 162,114,693,291,832,010,000,000,000,000,000,000,000 | 24 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
CImgDisplay& set_key() {
std::memset((void*)_keys,0,128*sizeof(unsigned int));
std::memset((void*)_released_keys,0,128*sizeof(unsigned int));
_is_keyESC = _is_keyF1 = _is_keyF2 = _is_keyF3 = _is_keyF4 = _is_keyF5 = _is_keyF6 = _is_keyF7 = _is_keyF8 =
_is_keyF9 = _is_keyF10 = _is_keyF11 = _is_keyF12 = _is_keyPAUSE = _is_key1 = _is_key2 = _is_key3 = _is_key4 =
_is_key5 = _is_key6 = _is_key7 = _is_key8 = _is_key9 = _is_key0 = _is_keyBACKSPACE = _is_keyINSERT =
_is_keyHOME = _is_keyPAGEUP = _is_keyTAB = _is_keyQ = _is_keyW = _is_keyE = _is_keyR = _is_keyT = _is_keyY =
_is_keyU = _is_keyI = _is_keyO = _is_keyP = _is_keyDELETE = _is_keyEND = _is_keyPAGEDOWN = _is_keyCAPSLOCK =
_is_keyA = _is_keyS = _is_keyD = _is_keyF = _is_keyG = _is_keyH = _is_keyJ = _is_keyK = _is_keyL =
_is_keyENTER = _is_keySHIFTLEFT = _is_keyZ = _is_keyX = _is_keyC = _is_keyV = _is_keyB = _is_keyN =
_is_keyM = _is_keySHIFTRIGHT = _is_keyARROWUP = _is_keyCTRLLEFT = _is_keyAPPLEFT = _is_keyALT = _is_keySPACE =
_is_keyALTGR = _is_keyAPPRIGHT = _is_keyMENU = _is_keyCTRLRIGHT = _is_keyARROWLEFT = _is_keyARROWDOWN =
_is_keyARROWRIGHT = _is_keyPAD0 = _is_keyPAD1 = _is_keyPAD2 = _is_keyPAD3 = _is_keyPAD4 = _is_keyPAD5 =
_is_keyPAD6 = _is_keyPAD7 = _is_keyPAD8 = _is_keyPAD9 = _is_keyPADADD = _is_keyPADSUB = _is_keyPADMUL =
_is_keyPADDIV = false;
_is_event = true;
#if cimg_display==1
pthread_cond_broadcast(&cimg::X11_attr().wait_event);
#elif cimg_display==2
SetEvent(cimg::Win32_attr().wait_event);
#endif
return *this;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 33,683,978,065,108,167,000,000,000,000,000,000,000 | 23 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
plperl_trigger_build_args(FunctionCallInfo fcinfo)
{
TriggerData *tdata;
TupleDesc tupdesc;
int i;
char *level;
char *event;
char *relid;
char *when;
HV *hv;
hv = newHV();
hv_ksplit(hv, 12); /* pre-grow the hash */
tdata = (TriggerData *) fcinfo->context;
tupdesc = tdata->tg_relation->rd_att;
relid = DatumGetCString(
DirectFunctionCall1(oidout,
ObjectIdGetDatum(tdata->tg_relation->rd_id)
)
);
hv_store_string(hv, "name", cstr2sv(tdata->tg_trigger->tgname));
hv_store_string(hv, "relid", cstr2sv(relid));
if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
{
event = "INSERT";
if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
hv_store_string(hv, "new",
plperl_hash_from_tuple(tdata->tg_trigtuple,
tupdesc));
}
else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event))
{
event = "DELETE";
if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
hv_store_string(hv, "old",
plperl_hash_from_tuple(tdata->tg_trigtuple,
tupdesc));
}
else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event))
{
event = "UPDATE";
if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
{
hv_store_string(hv, "old",
plperl_hash_from_tuple(tdata->tg_trigtuple,
tupdesc));
hv_store_string(hv, "new",
plperl_hash_from_tuple(tdata->tg_newtuple,
tupdesc));
}
}
else if (TRIGGER_FIRED_BY_TRUNCATE(tdata->tg_event))
event = "TRUNCATE";
else
event = "UNKNOWN";
hv_store_string(hv, "event", cstr2sv(event));
hv_store_string(hv, "argc", newSViv(tdata->tg_trigger->tgnargs));
if (tdata->tg_trigger->tgnargs > 0)
{
AV *av = newAV();
av_extend(av, tdata->tg_trigger->tgnargs);
for (i = 0; i < tdata->tg_trigger->tgnargs; i++)
av_push(av, cstr2sv(tdata->tg_trigger->tgargs[i]));
hv_store_string(hv, "args", newRV_noinc((SV *) av));
}
hv_store_string(hv, "relname",
cstr2sv(SPI_getrelname(tdata->tg_relation)));
hv_store_string(hv, "table_name",
cstr2sv(SPI_getrelname(tdata->tg_relation)));
hv_store_string(hv, "table_schema",
cstr2sv(SPI_getnspname(tdata->tg_relation)));
if (TRIGGER_FIRED_BEFORE(tdata->tg_event))
when = "BEFORE";
else if (TRIGGER_FIRED_AFTER(tdata->tg_event))
when = "AFTER";
else if (TRIGGER_FIRED_INSTEAD(tdata->tg_event))
when = "INSTEAD OF";
else
when = "UNKNOWN";
hv_store_string(hv, "when", cstr2sv(when));
if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
level = "ROW";
else if (TRIGGER_FIRED_FOR_STATEMENT(tdata->tg_event))
level = "STATEMENT";
else
level = "UNKNOWN";
hv_store_string(hv, "level", cstr2sv(level));
return newRV_noinc((SV *) hv);
} | 0 | [
"CWE-264"
]
| postgres | 537cbd35c893e67a63c59bc636c3e888bd228bc7 | 254,003,048,481,530,700,000,000,000,000,000,000,000 | 102 | Prevent privilege escalation in explicit calls to PL validators.
The primary role of PL validators is to be called implicitly during
CREATE FUNCTION, but they are also normal functions that a user can call
explicitly. Add a permissions check to each validator to ensure that a
user cannot use explicit validator calls to achieve things he could not
otherwise achieve. Back-patch to 8.4 (all supported versions).
Non-core procedural language extensions ought to make the same two-line
change to their own validators.
Andres Freund, reviewed by Tom Lane and Noah Misch.
Security: CVE-2014-0061 |
static void update_numa_stats(struct numa_stats *ns, int nid)
{
int cpu;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | de53fd7aedb100f03e5d2231cfce0e4993282425 | 91,821,366,225,412,870,000,000,000,000,000,000,000 | 13 | sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Phil Auld <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: John Hammond <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kyle Anderson <[email protected]>
Cc: Gabriel Munos <[email protected]>
Cc: Peter Oskolkov <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Brendan Gregg <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->mutex);
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
if (trans->trans_cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans);
else
_iwl_trans_pcie_stop_device(trans);
}
} | 0 | [
"CWE-476"
]
| linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 200,197,709,700,479,460,000,000,000,000,000,000,000 | 16 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
void __user *uaddr, int __user *ulen)
{
int err;
int len;
BUG_ON(klen > sizeof(struct sockaddr_storage));
err = get_user(len, ulen);
if (err)
return err;
if (len > klen)
len = klen;
if (len < 0)
return -EINVAL;
if (len) {
if (audit_sockaddr(klen, kaddr))
return -ENOMEM;
if (copy_to_user(uaddr, kaddr, len))
return -EFAULT;
}
/*
* "fromlen shall refer to the value before truncation.."
* 1003.1g
*/
return __put_user(klen, ulen);
} | 0 | [
"CWE-264"
]
| net | 4de930efc23b92ddf88ce91c405ee645fe6e27ea | 20,629,019,787,775,620,000,000,000,000,000,000,000 | 26 | net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom
Cc: [email protected] # v3.19
Signed-off-by: Al Viro <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
{
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_free_disk(rbd_dev);
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
} | 0 | [
"CWE-863"
]
| linux | f44d04e696feaf13d192d942c4f14ad2e117065a | 320,961,133,142,547,800,000,000,000,000,000,000,000 | 7 | rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]> |
lseek_or_error (struct tar_sparse_file *file, off_t offset)
{
if (file->seekable
? lseek (file->fd, offset, SEEK_SET) < 0
: ! dump_zeros (file, offset))
{
seek_diag_details (file->stat_info->orig_file_name, offset);
return false;
}
return true;
} | 0 | []
| tar | c15c42ccd1e2377945fd0414eca1a49294bff454 | 130,297,179,926,511,310,000,000,000,000,000,000,000 | 11 | Fix CVE-2018-20482
* NEWS: Update.
* src/sparse.c (sparse_dump_region): Handle short read condition.
(sparse_extract_region,check_data_region): Fix dumped_size calculation.
Handle short read condition.
(pax_decode_header): Fix dumped_size calculation.
* tests/Makefile.am: Add new testcases.
* tests/testsuite.at: Likewise.
* tests/sptrcreat.at: New file.
* tests/sptrdiff00.at: New file.
* tests/sptrdiff01.at: New file. |
static int lzh_read_input(struct kwajd_stream *lzh) {
int read;
if (lzh->input_end) {
lzh->input_end += 8;
lzh->inbuf[0] = 0;
read = 1;
}
else {
read = lzh->sys->read(lzh->input, &lzh->inbuf[0], KWAJ_INPUT_SIZE);
if (read < 0) return MSPACK_ERR_READ;
if (read == 0) {
lzh->input_end = 8;
lzh->inbuf[0] = 0;
read = 1;
}
}
/* update i_ptr and i_end */
lzh->i_ptr = &lzh->inbuf[0];
lzh->i_end = &lzh->inbuf[read];
return MSPACK_ERR_OK;
} | 0 | [
"CWE-310",
"CWE-787"
]
| libmspack | 0b0ef9344255ff5acfac6b7af09198ac9c9756c8 | 49,215,844,948,109,570,000,000,000,000,000,000,000 | 22 | kwaj_read_headers(): fix handling of non-terminated strings |
void AsyncClientHandler::onSuccess(Envoy::Http::MessagePtr&& response) {
context->onHttpCallSuccess(token, response);
} | 0 | [
"CWE-476"
]
| envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 69,742,252,362,042,600,000,000,000,000,000,000,000 | 3 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
TPMS_TAGGED_PCR_SELECT_Unmarshal(TPMS_TAGGED_PCR_SELECT *target, BYTE **buffer, INT32 *size)
{
TPM_RC rc = TPM_RC_SUCCESS;
if (rc == TPM_RC_SUCCESS) {
rc = TPM_PT_PCR_Unmarshal(&target->tag, buffer, size);
}
if (rc == TPM_RC_SUCCESS) {
rc = UINT8_Unmarshal(&target->sizeofSelect, buffer, size);
}
if (rc == TPM_RC_SUCCESS) {
rc = Array_Unmarshal(target->pcrSelect, target->sizeofSelect, buffer, size);
}
return rc;
} | 0 | [
"CWE-787"
]
| libtpms | 5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b | 7,765,984,988,981,178,000,000,000,000,000,000,000 | 15 | tpm2: Restore original value if unmarshalled value was illegal
Restore the original value of the memory location where data from
a stream was unmarshalled and the unmarshalled value was found to
be illegal. The goal is to not keep illegal values in memory.
Signed-off-by: Stefan Berger <[email protected]> |
static struct inode *udf_alloc_inode(struct super_block *sb)
{
struct udf_inode_info *ei;
ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->i_unique = 0;
ei->i_lenExtents = 0;
ei->i_next_alloc_block = 0;
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
init_rwsem(&ei->i_data_sem);
return &ei->vfs_inode;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 1df2ae31c724e57be9d7ac00d78db8a5dabdd050 | 191,563,620,007,042,350,000,000,000,000,000,000,000 | 16 | udf: Fortify loading of sparing table
Add sanity checks when loading sparing table from disk to avoid accessing
unallocated memory or writing to it.
Signed-off-by: Jan Kara <[email protected]> |
static inline int security_key_getsecurity(struct key *key, char **_buffer)
{
*_buffer = NULL;
return 0;
} | 0 | []
| linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 169,407,969,720,388,270,000,000,000,000,000,000,000 | 5 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static void imap_msn_index_to_uid_seqset(struct Buffer *buf, struct ImapMboxData *mdata)
{
int first = 1, state = 0;
unsigned int cur_uid = 0, last_uid = 0;
unsigned int range_begin = 0, range_end = 0;
const size_t max_msn = imap_msn_highest(&mdata->msn);
for (unsigned int msn = 1; msn <= max_msn + 1; msn++)
{
bool match = false;
if (msn <= max_msn)
{
struct Email *e_cur = imap_msn_get(&mdata->msn, msn - 1);
cur_uid = e_cur ? imap_edata_get(e_cur)->uid : 0;
if (!state || (cur_uid && ((cur_uid - 1) == last_uid)))
match = true;
last_uid = cur_uid;
}
if (match)
{
switch (state)
{
case 1: /* single: convert to a range */
state = 2;
/* fall through */
case 2: /* extend range ending */
range_end = cur_uid;
break;
default:
state = 1;
range_begin = cur_uid;
break;
}
}
else if (state)
{
if (first)
first = 0;
else
mutt_buffer_addch(buf, ',');
if (state == 1)
mutt_buffer_add_printf(buf, "%u", range_begin);
else if (state == 2)
mutt_buffer_add_printf(buf, "%u:%u", range_begin, range_end);
state = 1;
range_begin = cur_uid;
}
}
} | 0 | [
"CWE-125"
]
| neomutt | fa1db5785e5cfd9d3cd27b7571b9fe268d2ec2dc | 33,849,810,927,805,570,000,000,000,000,000,000,000 | 52 | Fix seqset iterator when it ends in a comma
If the seqset ended with a comma, the substr_end marker would be just
before the trailing nul. In the next call, the loop to skip the
marker would iterate right past the end of string too.
The fix is simple: place the substr_end marker and skip past it
immediately. |
PHP_FUNCTION(curl_exec)
{
CURLcode error;
zval *zid;
php_curl *ch;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) {
return;
}
if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) {
RETURN_FALSE;
}
_php_curl_verify_handlers(ch, 1);
_php_curl_cleanup_handle(ch);
error = curl_easy_perform(ch->cp);
SAVE_CURL_ERROR(ch, error);
/* CURLE_PARTIAL_FILE is returned by HEAD requests */
if (error != CURLE_OK && error != CURLE_PARTIAL_FILE) {
smart_str_free(&ch->handlers->write->buf);
RETURN_FALSE;
}
if (!Z_ISUNDEF(ch->handlers->std_err)) {
php_stream *stream;
stream = (php_stream*)zend_fetch_resource2_ex(&ch->handlers->std_err, NULL, php_file_le_stream(), php_file_le_pstream());
if (stream) {
php_stream_flush(stream);
}
}
if (ch->handlers->write->method == PHP_CURL_RETURN && ch->handlers->write->buf.s) {
smart_str_0(&ch->handlers->write->buf);
RETURN_STR_COPY(ch->handlers->write->buf.s);
}
/* flush the file handle, so any remaining data is synched to disk */
if (ch->handlers->write->method == PHP_CURL_FILE && ch->handlers->write->fp) {
fflush(ch->handlers->write->fp);
}
if (ch->handlers->write_header->method == PHP_CURL_FILE && ch->handlers->write_header->fp) {
fflush(ch->handlers->write_header->fp);
}
if (ch->handlers->write->method == PHP_CURL_RETURN) {
RETURN_EMPTY_STRING();
} else {
RETURN_TRUE;
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| php-src | 72dbb7f416160f490c4e9987040989a10ad431c7 | 115,692,264,883,712,820,000,000,000,000,000,000,000 | 53 | Fix bug #72674 - check both curl_escape and curl_unescape |
gsicc_set_srcgtag_struct(gsicc_manager_t *icc_manager, const char* pname,
int namelen)
{
gs_memory_t *mem;
stream *str;
int code;
int info_size;
char *buffer_ptr, *curr_ptr, *last;
int num_bytes;
int k;
static const char *const srcgtag_keys[] = {GSICC_SRCGTAG_KEYS};
cmm_profile_t *icc_profile;
cmm_srcgtag_profile_t *srcgtag;
bool start = true;
gsicc_cmm_t cmm = gsCMM_DEFAULT;
/* If we don't have an icc manager or if this thing is already set
then ignore the call. For now, I am going to allow it to
be set one time. */
if (icc_manager == NULL || icc_manager->srcgtag_profile != NULL) {
return 0;
} else {
mem = icc_manager->memory->non_gc_memory;
code = gsicc_open_search(pname, namelen, mem, mem->gs_lib_ctx->profiledir,
mem->gs_lib_ctx->profiledir_len, &str);
if (code < 0)
return code;
}
if (str != NULL) {
/* Get the information in the file */
code = sfseek(str,0,SEEK_END);
if (code < 0)
return code;
info_size = sftell(str);
code = srewind(str);
if (code < 0)
return code;
if (info_size > (GSICC_NUM_SRCGTAG_KEYS + 1) * FILENAME_MAX) {
return gs_throw1(-1, "setting of %s src obj color info failed",
pname);
}
/* Allocate the buffer, stuff with the data */
buffer_ptr = (char*) gs_alloc_bytes(mem, info_size+1,
"gsicc_set_srcgtag_struct");
if (buffer_ptr == NULL) {
return gs_throw1(gs_error_VMerror, "setting of %s src obj color info failed",
pname);
}
num_bytes = sfread(buffer_ptr,sizeof(unsigned char), info_size, str);
code = sfclose(str);
if (code < 0)
return code;
buffer_ptr[info_size] = 0;
if (num_bytes != info_size) {
gs_free_object(mem, buffer_ptr, "gsicc_set_srcgtag_struct");
return gs_throw1(-1, "setting of %s src obj color info failed",
pname);
}
/* Create the structure in which we will store this data */
srcgtag = gsicc_new_srcgtag_profile(mem);
/* Now parse through the data opening the profiles that are needed */
curr_ptr = buffer_ptr;
/* Initialize that we want color management. Then if profile is not
present we know we did not want anything special done with that
source type. Else if we have no profile and don't want color
management we will make sure to do that */
for (k = 0; k < NUM_SOURCE_PROFILES; k++) {
srcgtag->rgb_rend_cond[k].cmm = gsCMM_DEFAULT;
srcgtag->cmyk_rend_cond[k].cmm = gsCMM_DEFAULT;
srcgtag->gray_rend_cond[k].cmm = gsCMM_DEFAULT;
}
while (start || strlen(curr_ptr) > 0) {
if (start) {
curr_ptr = gs_strtok(buffer_ptr, "\t,\32\n\r", &last);
start = false;
} else {
curr_ptr = gs_strtok(NULL, "\t,\32\n\r", &last);
}
if (curr_ptr == NULL) break;
/* Now go ahead and see if we have a match */
for (k = 0; k < GSICC_NUM_SRCGTAG_KEYS; k++) {
if (strncmp(curr_ptr, srcgtag_keys[k], strlen(srcgtag_keys[k])) == 0 ) {
/* Check if the curr_ptr is None which indicates that this
object is not to be color managed. Also, if the
curr_ptr is Replace which indicates we will be doing
direct replacement of the colors. */
curr_ptr = gs_strtok(NULL, "\t,\32\n\r", &last);
if (strncmp(curr_ptr, GSICC_SRCTAG_NOCM, strlen(GSICC_SRCTAG_NOCM)) == 0 &&
strlen(curr_ptr) == strlen(GSICC_SRCTAG_NOCM)) {
cmm = gsCMM_NONE;
icc_profile = NULL;
break;
} else if ((strncmp(curr_ptr, GSICC_SRCTAG_REPLACE, strlen(GSICC_SRCTAG_REPLACE)) == 0 &&
strlen(curr_ptr) == strlen(GSICC_SRCTAG_REPLACE))) {
cmm = gsCMM_REPLACE;
icc_profile = NULL;
break;
} else {
/* Try to open the file and set the profile */
code = gsicc_open_search(curr_ptr, strlen(curr_ptr), mem,
mem->gs_lib_ctx->profiledir,
mem->gs_lib_ctx->profiledir_len, &str);
if (code < 0)
return code;
if (str != NULL) {
icc_profile =
gsicc_profile_new(str, mem, curr_ptr, strlen(curr_ptr));
code = sfclose(str);
if (code < 0)
return code;
}
if (str != NULL && icc_profile != NULL) {
code = gsicc_init_profile_info(icc_profile);
if (code < 0)
return code;
cmm = gsCMM_DEFAULT;
/* Check if this object is a devicelink profile.
If it is then the intent, blackpoint etc. are not
read nor used when dealing with these profiles */
gsicc_check_device_link(icc_profile);
break;
} else {
/* Failed to open profile file. End this now. */
gs_free_object(mem, buffer_ptr, "gsicc_set_srcgtag_struct");
rc_decrement(srcgtag, "gsicc_set_srcgtag_struct");
return gs_throw1(-1,
"setting of %s src obj color info failed", pname);
}
}
}
}
/* Get the intent now and set the profile. If GSICC_SRCGTAG_KEYS
order changes this switch needs to change also */
switch (k) {
case COLOR_TUNE:
/* Color tune profile. No intent */
srcgtag->color_warp_profile = icc_profile;
break;
case GRAPHIC_CMYK:
srcgtag->cmyk_profiles[gsSRC_GRAPPRO] = icc_profile;
srcgtag->cmyk_rend_cond[gsSRC_GRAPPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->cmyk_rend_cond[gsSRC_GRAPPRO]), &last, true);
if (code < 0)
return code;
}
break;
case IMAGE_CMYK:
srcgtag->cmyk_profiles[gsSRC_IMAGPRO] = icc_profile;
srcgtag->cmyk_rend_cond[gsSRC_IMAGPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->cmyk_rend_cond[gsSRC_IMAGPRO]), &last, true);
if (code < 0)
return code;
}
break;
case TEXT_CMYK:
srcgtag->cmyk_profiles[gsSRC_TEXTPRO] = icc_profile;
srcgtag->cmyk_rend_cond[gsSRC_TEXTPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->cmyk_rend_cond[gsSRC_TEXTPRO]), &last, true);
if (code < 0)
return code;
}
break;
case GRAPHIC_RGB:
srcgtag->rgb_profiles[gsSRC_GRAPPRO] = icc_profile;
srcgtag->rgb_rend_cond[gsSRC_GRAPPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->rgb_rend_cond[gsSRC_GRAPPRO]), &last, false);
if (code < 0)
return code;
}
break;
case IMAGE_RGB:
srcgtag->rgb_profiles[gsSRC_IMAGPRO] = icc_profile;
srcgtag->rgb_rend_cond[gsSRC_IMAGPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->rgb_rend_cond[gsSRC_IMAGPRO]), &last, false);
if (code < 0)
return code;
}
break;
case TEXT_RGB:
srcgtag->rgb_profiles[gsSRC_TEXTPRO] = icc_profile;
srcgtag->rgb_rend_cond[gsSRC_TEXTPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->rgb_rend_cond[gsSRC_TEXTPRO]), &last, false);
if (code < 0)
return code;
}
break;
case GRAPHIC_GRAY:
srcgtag->gray_profiles[gsSRC_GRAPPRO] = icc_profile;
srcgtag->gray_rend_cond[gsSRC_GRAPPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->gray_rend_cond[gsSRC_GRAPPRO]), &last, false);
if (code < 0)
return code;
}
break;
case IMAGE_GRAY:
srcgtag->gray_profiles[gsSRC_IMAGPRO] = icc_profile;
srcgtag->gray_rend_cond[gsSRC_IMAGPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->gray_rend_cond[gsSRC_IMAGPRO]), &last, false);
if (code < 0)
return code;
}
break;
case TEXT_GRAY:
srcgtag->gray_profiles[gsSRC_TEXTPRO] = icc_profile;
srcgtag->gray_rend_cond[gsSRC_TEXTPRO].cmm = cmm;
if (cmm == gsCMM_DEFAULT) {
code = gsicc_fill_srcgtag_item(&(srcgtag->gray_rend_cond[gsSRC_TEXTPRO]), &last, false);
if (code < 0)
return code;
}
break;
case GSICC_NUM_SRCGTAG_KEYS:
/* Failed to match the key */
gs_free_object(mem, buffer_ptr, "gsicc_set_srcgtag_struct");
rc_decrement(srcgtag, "gsicc_set_srcgtag_struct");
return gs_throw1(-1, "failed to find key in %s", pname);
break;
default:
/* Some issue */
gs_free_object(mem, buffer_ptr, "gsicc_set_srcgtag_struct");
rc_decrement(srcgtag, "gsicc_set_srcgtag_struct");
return gs_throw1(-1, "Error in srcgtag data %s", pname);
break;
}
}
} else {
return gs_throw1(-1, "setting of %s src obj color info failed", pname);
}
gs_free_object(mem, buffer_ptr, "gsicc_set_srcgtag_struct");
srcgtag->name_length = strlen(pname);
srcgtag->name = (char*) gs_alloc_bytes(mem, srcgtag->name_length,
"gsicc_set_srcgtag_struct");
if (srcgtag->name == NULL)
return gs_throw(gs_error_VMerror, "Insufficient memory for tag name");
strncpy(srcgtag->name, pname, srcgtag->name_length);
icc_manager->srcgtag_profile = srcgtag;
return 0;
} | 0 | []
| ghostpdl | 6d444c273da5499a4cd72f21cb6d4c9a5256807d | 208,640,326,140,815,700,000,000,000,000,000,000,000 | 246 | Bug 697178: Add a file permissions callback
For the rare occasions when the graphics library directly opens a file
(currently for reading), this allows us to apply any restrictions on
file access normally applied in the interpteter. |
static int aesni_cbc_hmac_sha1_init_key(EVP_CIPHER_CTX *ctx,
const unsigned char *inkey,
const unsigned char *iv, int enc)
{
EVP_AES_HMAC_SHA1 *key = data(ctx);
int ret;
if (enc)
ret = aesni_set_encrypt_key(inkey, ctx->key_len * 8, &key->ks);
else
ret = aesni_set_decrypt_key(inkey, ctx->key_len * 8, &key->ks);
SHA1_Init(&key->head); /* handy when benchmarking */
key->tail = key->head;
key->md = key->head;
key->payload_length = NO_PAYLOAD_LENGTH;
return ret < 0 ? 0 : 1;
} | 0 | [
"CWE-310"
]
| openssl | 4159f311671cf3bac03815e5de44681eb758304a | 50,969,328,195,291,390,000,000,000,000,000,000,000 | 20 | Check that we have enough padding characters.
Reviewed-by: Emilia Käsper <[email protected]>
CVE-2016-2107
MR: #2572 |
struct connection_list *smtp_server_connection_list_init(void)
{
return connection_list_init(&smtp_server_connection_set,
&smtp_server_connection_vfuncs);
} | 0 | [
"CWE-77"
]
| core | 321c339756f9b2b98fb7326359d1333adebb5295 | 162,434,066,972,287,310,000,000,000,000,000,000,000 | 5 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
void cgroup_exit(struct task_struct *tsk)
{
struct cgroup_subsys *ss;
struct css_set *cset;
int i;
spin_lock_irq(&css_set_lock);
WARN_ON_ONCE(list_empty(&tsk->cg_list));
cset = task_css_set(tsk);
css_set_move_task(tsk, cset, NULL, false);
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
WARN_ON_ONCE(cgroup_task_frozen(tsk));
if (unlikely(!(tsk->flags & PF_KTHREAD) &&
test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
cgroup_update_frozen(task_dfl_cgroup(tsk));
spin_unlock_irq(&css_set_lock);
/* see cgroup_post_fork() for details */
do_each_subsys_mask(ss, i, have_exit_callback) {
ss->exit(tsk);
} while_each_subsys_mask();
} | 0 | [
"CWE-416"
]
| linux | a06247c6804f1a7c86a2e5398a4c1f1db1471848 | 56,398,746,449,433,710,000,000,000,000,000,000,000 | 26 | psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected] |
void
word_list_remove_quoted_nulls (list)
WORD_LIST *list;
{
register WORD_LIST *t;
for (t = list; t; t = t->next)
{
remove_quoted_nulls (t->word->word);
t->word->flags &= ~W_HASQUOTEDNULL;
} | 0 | [
"CWE-20"
]
| bash | 4f747edc625815f449048579f6e65869914dd715 | 11,634,659,674,545,304,000,000,000,000,000,000,000 | 11 | Bash-4.4 patch 7 |
BigInt inverse_mod_order(const BigInt& x) const
{
return inverse_mod(x, m_order);
} | 0 | [
"CWE-200"
]
| botan | 48fc8df51d99f9d8ba251219367b3d629cc848e3 | 273,833,051,809,616,260,000,000,000,000,000,000,000 | 4 | Address DSA/ECDSA side channel |
static int sanity_check_raw_super(struct super_block *sb,
struct f2fs_super_block *raw_super)
{
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
f2fs_msg(sb, KERN_INFO,
"Magic Mismatch, valid(0x%x) - read(0x%x)",
F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
return 1;
}
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
PAGE_CACHE_SIZE);
return 1;
}
/* Currently, support only 4KB block size */
blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
if (blocksize != F2FS_BLKSIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid blocksize (%u), supports only 4KB\n",
blocksize);
return 1;
}
/* check log blocks per segment */
if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
f2fs_msg(sb, KERN_INFO,
"Invalid log blocks per segment (%u)\n",
le32_to_cpu(raw_super->log_blocks_per_seg));
return 1;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
le32_to_cpu(raw_super->log_sectorsize) <
F2FS_MIN_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
le32_to_cpu(raw_super->log_sectorsize));
return 1;
}
if (le32_to_cpu(raw_super->log_sectors_per_block) +
le32_to_cpu(raw_super->log_sectorsize) !=
F2FS_MAX_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid log sectors per block(%u) log sectorsize(%u)",
le32_to_cpu(raw_super->log_sectors_per_block),
le32_to_cpu(raw_super->log_sectorsize));
return 1;
}
/* check reserved ino info */
if (le32_to_cpu(raw_super->node_ino) != 1 ||
le32_to_cpu(raw_super->meta_ino) != 2 ||
le32_to_cpu(raw_super->root_ino) != 3) {
f2fs_msg(sb, KERN_INFO,
"Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
le32_to_cpu(raw_super->node_ino),
le32_to_cpu(raw_super->meta_ino),
le32_to_cpu(raw_super->root_ino));
return 1;
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sb, raw_super))
return 1;
return 0;
} | 0 | [
"CWE-787"
]
| linux | 9a59b62fd88196844cee5fff851bee2cfd7afb6e | 145,838,015,188,938,500,000,000,000,000,000,000,000 | 74 | f2fs: do more integrity verification for superblock
Do more sanity check for superblock during ->mount.
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]> |
static int pppol2tp_create(struct net *net, struct socket *sock)
{
int error = -ENOMEM;
struct sock *sk;
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
if (!sk)
goto out;
sock_init_data(sock, sk);
sock->state = SS_UNCONNECTED;
sock->ops = &pppol2tp_ops;
sk->sk_backlog_rcv = pppol2tp_backlog_recv;
sk->sk_protocol = PX_PROTO_OL2TP;
sk->sk_family = PF_PPPOX;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_destruct = pppol2tp_session_destruct;
error = 0;
out:
return error;
} | 0 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 317,658,467,543,051,300,000,000,000,000,000,000,000 | 26 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void AsyncSSLSocket::resetClientHelloParsing(SSL* ssl) {
SSL_set_msg_callback(ssl, nullptr);
SSL_set_msg_callback_arg(ssl, nullptr);
clientHelloInfo_->clientHelloBuf_.clear();
} | 0 | [
"CWE-125"
]
| folly | c321eb588909646c15aefde035fd3133ba32cdee | 307,945,055,941,151,260,000,000,000,000,000,000,000 | 5 | Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836 |
archive_is_encrypted (FrWindow *window,
GList *file_list)
{
gboolean encrypted = FALSE;
if (file_list == NULL) {
int i;
for (i = 0; ! encrypted && i < window->archive->files->len; i++) {
FileData *fdata = g_ptr_array_index (window->archive->files, i);
if (fdata->encrypted)
encrypted = TRUE;
}
}
else {
GList *scan;
for (scan = file_list; ! encrypted && scan; scan = scan->next) {
char *filename = scan->data;
FileData *fdata;
fdata = g_hash_table_lookup (window->archive->files_hash, filename);
g_return_val_if_fail (fdata != NULL, FALSE);
if (fdata->encrypted)
encrypted = TRUE;
}
}
return encrypted;
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 40,623,910,598,836,815,000,000,000,000,000,000,000 | 32 | libarchive: sanitize filenames before extracting |
GF_Err tref_AddBox(GF_Box *ptr, GF_Box *a)
{
return gf_isom_box_add_default(ptr, a);
} | 0 | [
"CWE-125"
]
| gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | 126,176,028,496,122,240,000,000,000,000,000,000,000 | 4 | fixed 2 possible heap overflows (inc. #1088) |
void test_checkout_nasty__dotcapitalgit_backslash_path(void)
{
#ifdef GIT_WIN32
test_checkout_fails("refs/heads/dotcapitalgit_backslash_path", ".GIT/foobar");
#endif
} | 0 | [
"CWE-20",
"CWE-706"
]
| libgit2 | 3f7851eadca36a99627ad78cbe56a40d3776ed01 | 339,357,726,915,127,040,000,000,000,000,000,000,000 | 6 | Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS
A little-known feature of NTFS is that it offers to store metadata in
so-called "Alternate Data Streams" (inspired by Apple's "resource
forks") that are copied together with the file they are associated with.
These Alternate Data Streams can be accessed via `<file name>:<stream
name>:<stream type>`.
Directories, too, have Alternate Data Streams, and they even have a
default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and
`abc::$INDEX_ALLOCATION/` are actually equivalent.
This is of course another attack vector on the Git directory that we
definitely want to prevent.
On Windows, we already do this incidentally, by disallowing colons in
file/directory names.
While it looks as if files'/directories' Alternate Data Streams are not
accessible in the Windows Subsystem for Linux, and neither via
CIFS/SMB-mounted network shares in Linux, it _is_ possible to access
them on SMB-mounted network shares on macOS.
Therefore, let's go the extra mile and prevent this particular attack
_everywhere_. To keep things simple, let's just disallow *any* Alternate
Data Stream of `.git`.
This is libgit2's variant of CVE-2019-1352.
Signed-off-by: Johannes Schindelin <[email protected]> |
SES_SetTransport(struct worker *wrk, struct sess *sp, struct req *req,
const struct transport *xp)
{
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(xp, TRANSPORT_MAGIC);
assert(xp->number > 0);
sp->sattr[SA_TRANSPORT] = xp->number;
req->transport = xp;
wrk->task.func = xp->new_session;
wrk->task.priv = req;
} | 0 | [
"CWE-617"
]
| varnish-cache | 2d8fc1a784a1e26d78c30174923a2b14ee2ebf62 | 184,958,560,193,663,550,000,000,000,000,000,000,000 | 15 | Take sizeof pool_task into account when reserving WS in SES_Wait
The assert on WS_ReserveSize() in ses_handle() can not trip because
sizeof (struct pool_task) is less than sizeof (struct waited). But to safe
guard against future problems if that were to change, this patch makes
sure that the session workspace can hold the largest of them before
entering the waiter, erroring out if not. |
int delegpt_add_target_mlc(struct delegpt* dp, uint8_t* name, size_t namelen,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t bogus,
uint8_t lame)
{
struct delegpt_ns* ns = delegpt_find_ns(dp, name, namelen);
log_assert(dp->dp_type_mlc);
if(!ns) {
/* ignore it */
return 1;
}
if(!lame) {
if(addr_is_ip6(addr, addrlen))
ns->got6 = 1;
else ns->got4 = 1;
if(ns->got4 && ns->got6)
ns->resolved = 1;
}
return delegpt_add_addr_mlc(dp, addr, addrlen, bogus, lame, NULL);
} | 0 | [
"CWE-400"
]
| unbound | ba0f382eee814e56900a535778d13206b86b6d49 | 276,320,971,787,050,400,000,000,000,000,000,000,000 | 19 | - CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive. |
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
goto L_INT_OVERFLOW;
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#define OP_MATH_OVERFLOW_INT() goto L_INT_OVERFLOW
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
} | 1 | [
"CWE-476"
]
| mruby | 00acae117da1b45b318dc36531a7b0021b8097ae | 251,133,800,800,873,450,000,000,000,000,000,000,000 | 1,828 | vm.c: target class may be NULL. |
ipp_length(ipp_t *ipp, /* I - IPP message or collection */
int collection) /* I - 1 if a collection, 0 otherwise */
{
int i; /* Looping var */
size_t bytes; /* Number of bytes */
ipp_attribute_t *attr; /* Current attribute */
ipp_tag_t group; /* Current group */
_ipp_value_t *value; /* Current value */
DEBUG_printf(("3ipp_length(ipp=%p, collection=%d)", (void *)ipp, collection));
if (!ipp)
{
DEBUG_puts("4ipp_length: Returning 0 bytes");
return (0);
}
/*
* Start with 8 bytes for the IPP message header...
*/
bytes = collection ? 0 : 8;
/*
* Then add the lengths of each attribute...
*/
group = IPP_TAG_ZERO;
for (attr = ipp->attrs; attr != NULL; attr = attr->next)
{
if (attr->group_tag != group && !collection)
{
group = attr->group_tag;
if (group == IPP_TAG_ZERO)
continue;
bytes ++; /* Group tag */
}
if (!attr->name)
continue;
DEBUG_printf(("5ipp_length: attr->name=\"%s\", attr->num_values=%d, "
"bytes=" CUPS_LLFMT, attr->name, attr->num_values, CUPS_LLCAST bytes));
if ((attr->value_tag & ~IPP_TAG_CUPS_CONST) < IPP_TAG_EXTENSION)
bytes += (size_t)attr->num_values;/* Value tag for each value */
else
bytes += (size_t)(5 * attr->num_values);
/* Value tag for each value */
bytes += (size_t)(2 * attr->num_values);
/* Name lengths */
bytes += strlen(attr->name); /* Name */
bytes += (size_t)(2 * attr->num_values);
/* Value lengths */
if (collection)
bytes += 5; /* Add membername overhead */
switch (attr->value_tag & ~IPP_TAG_CUPS_CONST)
{
case IPP_TAG_UNSUPPORTED_VALUE :
case IPP_TAG_DEFAULT :
case IPP_TAG_UNKNOWN :
case IPP_TAG_NOVALUE :
case IPP_TAG_NOTSETTABLE :
case IPP_TAG_DELETEATTR :
case IPP_TAG_ADMINDEFINE :
break;
case IPP_TAG_INTEGER :
case IPP_TAG_ENUM :
bytes += (size_t)(4 * attr->num_values);
break;
case IPP_TAG_BOOLEAN :
bytes += (size_t)attr->num_values;
break;
case IPP_TAG_TEXT :
case IPP_TAG_NAME :
case IPP_TAG_KEYWORD :
case IPP_TAG_URI :
case IPP_TAG_URISCHEME :
case IPP_TAG_CHARSET :
case IPP_TAG_LANGUAGE :
case IPP_TAG_MIMETYPE :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
if (value->string.text)
bytes += strlen(value->string.text);
break;
case IPP_TAG_DATE :
bytes += (size_t)(11 * attr->num_values);
break;
case IPP_TAG_RESOLUTION :
bytes += (size_t)(9 * attr->num_values);
break;
case IPP_TAG_RANGE :
bytes += (size_t)(8 * attr->num_values);
break;
case IPP_TAG_TEXTLANG :
case IPP_TAG_NAMELANG :
bytes += (size_t)(4 * attr->num_values);
/* Charset + text length */
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
{
if (value->string.language)
bytes += strlen(value->string.language);
if (value->string.text)
bytes += strlen(value->string.text);
}
break;
case IPP_TAG_BEGIN_COLLECTION :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
bytes += ipp_length(value->collection, 1);
break;
default :
for (i = 0, value = attr->values;
i < attr->num_values;
i ++, value ++)
bytes += (size_t)value->unknown.length;
break;
}
}
/*
* Finally, add 1 byte for the "end of attributes" tag or 5 bytes
* for the "end of collection" tag and return...
*/
if (collection)
bytes += 5;
else
bytes ++;
DEBUG_printf(("4ipp_length: Returning " CUPS_LLFMT " bytes", CUPS_LLCAST bytes));
return (bytes);
} | 0 | [
"CWE-120"
]
| cups | f24e6cf6a39300ad0c3726a41a4aab51ad54c109 | 95,841,985,090,461,240,000,000,000,000,000,000,000 | 155 | Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929) |
grub_crypto_ecb_encrypt (grub_crypto_cipher_handle_t cipher,
void *out, const void *in, grub_size_t size)
{
const grub_uint8_t *inptr, *end;
grub_uint8_t *outptr;
grub_size_t blocksize;
if (!cipher->cipher->encrypt)
return GPG_ERR_NOT_SUPPORTED;
blocksize = cipher->cipher->blocksize;
if (blocksize == 0 || (((blocksize - 1) & blocksize) != 0)
|| ((size & (blocksize - 1)) != 0))
return GPG_ERR_INV_ARG;
end = (const grub_uint8_t *) in + size;
for (inptr = in, outptr = out; inptr < end;
inptr += blocksize, outptr += blocksize)
cipher->cipher->encrypt (cipher->ctx, outptr, inptr);
return GPG_ERR_NO_ERROR;
} | 0 | [
"CWE-264"
]
| grub | 451d80e52d851432e109771bb8febafca7a5f1f2 | 263,666,488,868,969,360,000,000,000,000,000,000,000 | 18 | Fix security issue when reading username and password
This patch fixes two integer underflows at:
* grub-core/lib/crypto.c
* grub-core/normal/auth.c
CVE-2015-8370
Signed-off-by: Hector Marco-Gisbert <[email protected]>
Signed-off-by: Ismael Ripoll-Ripoll <[email protected]>
Also-By: Andrey Borzenkov <[email protected]> |
Subsets and Splits