func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static MagickBooleanType WriteGIFImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
int
c;
ImageInfo
*write_info;
MagickBooleanType
status;
MagickOffsetType
scene;
RectangleInfo
page;
register ssize_t
i;
register unsigned char
*q;
size_t
bits_per_pixel,
delay,
imageListLength,
length,
one;
ssize_t
j,
opacity;
unsigned char
*colormap,
*global_colormap;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Allocate colormap.
*/
global_colormap=(unsigned char *) AcquireQuantumMemory(768UL,
sizeof(*global_colormap));
colormap=(unsigned char *) AcquireQuantumMemory(768UL,sizeof(*colormap));
if ((global_colormap == (unsigned char *) NULL) ||
(colormap == (unsigned char *) NULL))
{
if (global_colormap != (unsigned char *) NULL)
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
if (colormap != (unsigned char *) NULL)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < 768; i++)
colormap[i]=(unsigned char) 0;
/*
Write GIF header.
*/
write_info=CloneImageInfo(image_info);
if (LocaleCompare(write_info->magick,"GIF87") != 0)
(void) WriteBlob(image,6,(unsigned char *) "GIF89a");
else
{
(void) WriteBlob(image,6,(unsigned char *) "GIF87a");
write_info->adjoin=MagickFalse;
}
/*
Determine image bounding box.
*/
page.width=image->columns;
if (image->page.width > page.width)
page.width=image->page.width;
page.height=image->rows;
if (image->page.height > page.height)
page.height=image->page.height;
page.x=image->page.x;
page.y=image->page.y;
(void) WriteBlobLSBShort(image,(unsigned short) page.width);
(void) WriteBlobLSBShort(image,(unsigned short) page.height);
/*
Write images to file.
*/
if ((write_info->adjoin != MagickFalse) &&
(GetNextImageInList(image) != (Image *) NULL))
write_info->interlace=NoInterlace;
scene=0;
one=1;
imageListLength=GetImageListLength(image);
do
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
opacity=(-1);
if (IsImageOpaque(image,exception) != MagickFalse)
{
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteType,exception);
}
else
{
double
alpha,
beta;
/*
Identify transparent colormap index.
*/
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=fabs(image->colormap[i].alpha-TransparentAlpha);
beta=fabs(image->colormap[opacity].alpha-TransparentAlpha);
if (alpha < beta)
opacity=i;
}
if (opacity == -1)
{
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=fabs(image->colormap[i].alpha-TransparentAlpha);
beta=fabs(image->colormap[opacity].alpha-TransparentAlpha);
if (alpha < beta)
opacity=i;
}
}
if (opacity >= 0)
{
image->colormap[opacity].red=image->transparent_color.red;
image->colormap[opacity].green=image->transparent_color.green;
image->colormap[opacity].blue=image->transparent_color.blue;
}
}
if ((image->storage_class == DirectClass) || (image->colors > 256))
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
for (bits_per_pixel=1; bits_per_pixel < 8; bits_per_pixel++)
if ((one << bits_per_pixel) >= image->colors)
break;
q=colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue));
}
for ( ; i < (ssize_t) (one << bits_per_pixel); i++)
{
*q++=(unsigned char) 0x0;
*q++=(unsigned char) 0x0;
*q++=(unsigned char) 0x0;
}
if ((GetPreviousImageInList(image) == (Image *) NULL) ||
(write_info->adjoin == MagickFalse))
{
/*
Write global colormap.
*/
c=0x80;
c|=(8-1) << 4; /* color resolution */
c|=(bits_per_pixel-1); /* size of global colormap */
(void) WriteBlobByte(image,(unsigned char) c);
for (j=0; j < (ssize_t) image->colors; j++)
if (IsPixelInfoEquivalent(&image->background_color,image->colormap+j))
break;
(void) WriteBlobByte(image,(unsigned char)
(j == (ssize_t) image->colors ? 0 : j)); /* background color */
(void) WriteBlobByte(image,(unsigned char) 0x00); /* reserved */
length=(size_t) (3*(one << bits_per_pixel));
(void) WriteBlob(image,length,colormap);
for (j=0; j < 768; j++)
global_colormap[j]=colormap[j];
}
if (LocaleCompare(write_info->magick,"GIF87") != 0)
{
const char
*value;
/*
Write graphics control extension.
*/
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xf9);
(void) WriteBlobByte(image,(unsigned char) 0x04);
c=image->dispose << 2;
if (opacity >= 0)
c|=0x01;
(void) WriteBlobByte(image,(unsigned char) c);
delay=(size_t) (100*image->delay/MagickMax((size_t)
image->ticks_per_second,1));
(void) WriteBlobLSBShort(image,(unsigned short) delay);
(void) WriteBlobByte(image,(unsigned char) (opacity >= 0 ? opacity :
0));
(void) WriteBlobByte(image,(unsigned char) 0x00);
value=GetImageProperty(image,"comment",exception);
if (value != (const char *) NULL)
{
register const char
*p;
size_t
count;
/*
Write comment extension.
*/
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xfe);
for (p=value; *p != '\0'; )
{
count=MagickMin(strlen(p),255);
(void) WriteBlobByte(image,(unsigned char) count);
for (i=0; i < (ssize_t) count; i++)
(void) WriteBlobByte(image,(unsigned char) *p++);
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
if ((GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) != (Image *) NULL) &&
(image->iterations != 1))
{
/*
Write Netscape Loop extension.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","NETSCAPE2.0");
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
(void) WriteBlob(image,11,(unsigned char *) "NETSCAPE2.0");
(void) WriteBlobByte(image,(unsigned char) 0x03);
(void) WriteBlobByte(image,(unsigned char) 0x01);
(void) WriteBlobLSBShort(image,(unsigned short) (image->iterations ?
image->iterations-1 : 0));
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
if ((image->gamma != 1.0f/2.2f))
{
char
attributes[MagickPathExtent];
ssize_t
count;
/*
Write ImageMagick extension.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","ImageMagick");
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
(void) WriteBlob(image,11,(unsigned char *) "ImageMagick");
count=FormatLocaleString(attributes,MagickPathExtent,"gamma=%g",
image->gamma);
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlob(image,(size_t) count,(unsigned char *) attributes);
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
ResetImageProfileIterator(image);
for ( ; ; )
{
char
*name;
const StringInfo
*profile;
name=GetNextImageProfile(image);
if (name == (const char *) NULL)
break;
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0) ||
(LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0) ||
(LocaleNCompare(name,"gif:",4) == 0))
{
ssize_t
offset;
unsigned char
*datum;
datum=GetStringInfoDatum(profile);
length=GetStringInfoLength(profile);
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0))
{
/*
Write ICC extension.
*/
(void) WriteBlob(image,11,(unsigned char *) "ICCRGBG1012");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","ICCRGBG1012");
}
else
if ((LocaleCompare(name,"IPTC") == 0))
{
/*
Write IPTC extension.
*/
(void) WriteBlob(image,11,(unsigned char *) "MGKIPTC0000");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","MGKIPTC0000");
}
else
if ((LocaleCompare(name,"8BIM") == 0))
{
/*
Write 8BIM extension.
*/
(void) WriteBlob(image,11,(unsigned char *)
"MGK8BIM0000");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","MGK8BIM0000");
}
else
{
char
extension[MagickPathExtent];
/*
Write generic extension.
*/
(void) CopyMagickString(extension,name+4,
sizeof(extension));
(void) WriteBlob(image,11,(unsigned char *) extension);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s",name);
}
offset=0;
while ((ssize_t) length > offset)
{
size_t
block_length;
if ((length-offset) < 255)
block_length=length-offset;
else
block_length=255;
(void) WriteBlobByte(image,(unsigned char) block_length);
(void) WriteBlob(image,(size_t) block_length,datum+offset);
offset+=(ssize_t) block_length;
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
}
}
}
(void) WriteBlobByte(image,','); /* image separator */
/*
Write the image header.
*/
page.x=image->page.x;
page.y=image->page.y;
if ((image->page.width != 0) && (image->page.height != 0))
page=image->page;
(void) WriteBlobLSBShort(image,(unsigned short) (page.x < 0 ? 0 : page.x));
(void) WriteBlobLSBShort(image,(unsigned short) (page.y < 0 ? 0 : page.y));
(void) WriteBlobLSBShort(image,(unsigned short) image->columns);
(void) WriteBlobLSBShort(image,(unsigned short) image->rows);
c=0x00;
if (write_info->interlace != NoInterlace)
c|=0x40; /* pixel data is interlaced */
for (j=0; j < (ssize_t) (3*image->colors); j++)
if (colormap[j] != global_colormap[j])
break;
if (j == (ssize_t) (3*image->colors))
(void) WriteBlobByte(image,(unsigned char) c);
else
{
c|=0x80;
c|=(bits_per_pixel-1); /* size of local colormap */
(void) WriteBlobByte(image,(unsigned char) c);
length=(size_t) (3*(one << bits_per_pixel));
(void) WriteBlob(image,length,colormap);
}
/*
Write the image data.
*/
c=(int) MagickMax(bits_per_pixel,2);
(void) WriteBlobByte(image,(unsigned char) c);
status=EncodeImage(write_info,image,(size_t) MagickMax(bits_per_pixel,2)+1,
exception);
if (status == MagickFalse)
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
write_info=DestroyImageInfo(write_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
scene++;
status=SetImageProgress(image,SaveImagesTag,scene,imageListLength);
if (status == MagickFalse)
break;
} while (write_info->adjoin != MagickFalse);
(void) WriteBlobByte(image,';'); /* terminator */
global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
write_info=DestroyImageInfo(write_info);
(void) CloseBlob(image);
return(MagickTrue);
} | 0 | [
"CWE-119",
"CWE-703",
"CWE-787"
] | ImageMagick | 61135001a625364e29bdce83832f043eebde7b5a | 8,431,788,649,478,003,000,000,000,000,000,000,000 | 441 | https://github.com/ImageMagick/ImageMagick/issues/1595 |
copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
struct vring_desc **cur_desc, uint32_t size,
uint32_t *nb_descs, uint32_t vq_size)
{
struct vring_desc *desc = *cur_desc;
uint64_t remain, addr, dlen, len;
uint32_t to_copy;
uint8_t *data = dst_data;
uint8_t *src;
int left = size;
to_copy = RTE_MIN(desc->len, (uint32_t)left);
dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
VHOST_ACCESS_RO);
if (unlikely(!src || !dlen))
return -1;
rte_memcpy((uint8_t *)data, src, dlen);
data += dlen;
if (unlikely(dlen < to_copy)) {
remain = to_copy - dlen;
addr = desc->addr + dlen;
while (remain) {
len = remain;
src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
VHOST_ACCESS_RO);
if (unlikely(!src || !len)) {
VC_LOG_ERR("Failed to map descriptor");
return -1;
}
rte_memcpy(data, src, len);
addr += len;
remain -= len;
data += len;
}
}
left -= to_copy;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
VC_LOG_ERR("Invalid descriptors");
return -1;
}
(*nb_descs)--;
desc = &vc_req->head[desc->next];
rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
dlen = desc->len;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
VHOST_ACCESS_RO);
if (unlikely(!src || !dlen)) {
VC_LOG_ERR("Failed to map descriptor");
return -1;
}
rte_memcpy(data, src, dlen);
data += dlen;
if (unlikely(dlen < to_copy)) {
remain = to_copy - dlen;
addr = desc->addr + dlen;
while (remain) {
len = remain;
src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
VHOST_ACCESS_RO);
if (unlikely(!src || !len)) {
VC_LOG_ERR("Failed to map descriptor");
return -1;
}
rte_memcpy(data, src, len);
addr += len;
remain -= len;
data += len;
}
}
left -= to_copy;
}
if (unlikely(left > 0)) {
VC_LOG_ERR("Incorrect virtio descriptor");
return -1;
}
if (unlikely(*nb_descs == 0))
*cur_desc = NULL;
else {
if (unlikely(desc->next >= vq_size))
return -1;
*cur_desc = &vc_req->head[desc->next];
}
return 0;
} | 0 | [
"CWE-125"
] | dpdk | acd4c92fa693bbea695f2bb42bb93fb8567c3ca5 | 241,705,549,738,532,060,000,000,000,000,000,000,000 | 102 | vhost/crypto: validate keys lengths
transform_cipher_param() and transform_chain_param() handle
the payload data for the VHOST_USER_CRYPTO_CREATE_SESS
message. These payloads have to be validated, since it
could come from untrusted sources.
Two buffers and their lengths are defined in this payload,
one the the auth key and one for the cipher key. But above
functions do not validate the key length inputs, which could
lead to read out of bounds, as buffers have static sizes of
64 bytes for the cipher key and 512 bytes for the auth key.
This patch adds necessary checks on the key length field
before being used.
CVE-2020-10724
Fixes: e80a98708166 ("vhost/crypto: add session message handler")
Cc: [email protected]
Reported-by: Ilja Van Sprundel <[email protected]>
Signed-off-by: Maxime Coquelin <[email protected]>
Reviewed-by: Xiaolong Ye <[email protected]>
Reviewed-by: Ilja Van Sprundel <[email protected]> |
void visit(Repetition &ope) override {
if (ope.min_ == 0) {
set_error();
} else {
ope.ope_->accept(*this);
}
} | 0 | [
"CWE-125"
] | cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 115,307,791,642,847,540,000,000,000,000,000,000,000 | 7 | Fix #122 |
static ssize_t systab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *str = buf;
if (!kobj || !buf)
return -EINVAL;
if (efi.mps != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "MPS=0x%lx\n", efi.mps);
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
/*
* If both SMBIOS and SMBIOS3 entry points are implemented, the
* SMBIOS3 entry point shall be preferred, so we list it first to
* let applications stop parsing after the first match.
*/
if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
if (efi.uga != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "UGA=0x%lx\n", efi.uga);
return str - buf;
} | 0 | [] | linux | 1957a85b0032a81e6482ca4aab883643b8dae06e | 306,679,649,584,199,740,000,000,000,000,000,000,000 | 32 | efi: Restrict efivar_ssdt_load when the kernel is locked down
efivar_ssdt_load allows the kernel to import arbitrary ACPI code from an
EFI variable, which gives arbitrary code execution in ring 0. Prevent
that when the kernel is locked down.
Signed-off-by: Matthew Garrett <[email protected]>
Acked-by: Ard Biesheuvel <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: [email protected]
Signed-off-by: James Morris <[email protected]> |
int ecryptfs_decrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *enc_extent_virt;
struct page *enc_extent_page = NULL;
unsigned long extent_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
PAGE_CACHE_SIZE,
ecryptfs_inode);
if (rc)
printk(KERN_ERR "%s: Error attempting to copy "
"page at index [%ld]\n", __func__,
page->index);
goto out;
}
enc_extent_page = alloc_page(GFP_USER);
if (!enc_extent_page) {
rc = -ENOMEM;
ecryptfs_printk(KERN_ERR, "Error allocating memory for "
"encrypted extent\n");
goto out;
}
enc_extent_virt = kmap(enc_extent_page);
for (extent_offset = 0;
extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
extent_offset++) {
loff_t offset;
ecryptfs_lower_offset_for_extent(
&offset, ((page->index * (PAGE_CACHE_SIZE
/ crypt_stat->extent_size))
+ extent_offset), crypt_stat);
rc = ecryptfs_read_lower(enc_extent_virt, offset,
crypt_stat->extent_size,
ecryptfs_inode);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error attempting "
"to read lower page; rc = [%d]"
"\n", rc);
goto out;
}
rc = ecryptfs_decrypt_extent(page, crypt_stat, enc_extent_page,
extent_offset);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
}
out:
if (enc_extent_page) {
kunmap(enc_extent_page);
__free_page(enc_extent_page);
}
return rc;
} | 0 | [
"CWE-189"
] | linux-2.6 | 8faece5f906725c10e7a1f6caf84452abadbdc7b | 240,630,533,125,729,300,000,000,000,000,000,000,000 | 63 | eCryptfs: Allocate a variable number of pages for file headers
When allocating the memory used to store the eCryptfs header contents, a
single, zeroed page was being allocated with get_zeroed_page().
However, the size of an eCryptfs header is either PAGE_CACHE_SIZE or
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE (8192), whichever is larger, and is
stored in the file's private_data->crypt_stat->num_header_bytes_at_front
field.
ecryptfs_write_metadata_to_contents() was using
num_header_bytes_at_front to decide how many bytes should be written to
the lower filesystem for the file header. Unfortunately, at least 8K
was being written from the page, despite the chance of the single,
zeroed page being smaller than 8K. This resulted in random areas of
kernel memory being written between the 0x1000 and 0x1FFF bytes offsets
in the eCryptfs file headers if PAGE_SIZE was 4K.
This patch allocates a variable number of pages, calculated with
num_header_bytes_at_front, and passes the number of allocated pages
along to ecryptfs_write_metadata_to_contents().
Thanks to Florian Streibelt for reporting the data leak and working with
me to find the problem. 2.6.28 is the only kernel release with this
vulnerability. Corresponds to CVE-2009-0787
Signed-off-by: Tyler Hicks <[email protected]>
Acked-by: Dustin Kirkland <[email protected]>
Reviewed-by: Eric Sandeen <[email protected]>
Reviewed-by: Eugene Teo <[email protected]>
Cc: Greg KH <[email protected]>
Cc: dann frazier <[email protected]>
Cc: Serge E. Hallyn <[email protected]>
Cc: Florian Streibelt <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
{
struct b43_dma *dma = &dev->dma;
struct b43_dmaring *ring = NULL;
switch (cookie & 0xF000) {
case 0x1000:
ring = dma->tx_ring_AC_BK;
break;
case 0x2000:
ring = dma->tx_ring_AC_BE;
break;
case 0x3000:
ring = dma->tx_ring_AC_VI;
break;
case 0x4000:
ring = dma->tx_ring_AC_VO;
break;
case 0x5000:
ring = dma->tx_ring_mcast;
break;
}
*slot = (cookie & 0x0FFF);
if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
b43dbg(dev->wl, "TX-status contains "
"invalid cookie: 0x%04X\n", cookie);
return NULL;
}
return ring;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | c85ce65ecac078ab1a1835c87c4a6319cf74660a | 203,196,602,189,669,070,000,000,000,000,000,000,000 | 31 | b43: allocate receive buffers big enough for max frame len + offset
Otherwise, skb_put inside of dma_rx can fail...
https://bugzilla.kernel.org/show_bug.cgi?id=32042
Signed-off-by: John W. Linville <[email protected]>
Acked-by: Larry Finger <[email protected]>
Cc: [email protected] |
void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) {
context->onGrpcReceiveTrailingMetadata(token, std::move(metadata));
} | 0 | [
"CWE-476"
] | envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 159,035,094,206,134,200,000,000,000,000,000,000,000 | 3 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
explicit TfDlManagedTensorCtx(const TensorReference& ref) : reference(ref) {} | 0 | [
"CWE-20",
"CWE-476",
"CWE-908"
] | tensorflow | 22e07fb204386768e5bcbea563641ea11f96ceb8 | 177,589,089,632,050,380,000,000,000,000,000,000,000 | 1 | Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`.
We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr.
PiperOrigin-RevId: 332568894
Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572 |
int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt, int is_vp7)
{
VP8Context *s = avctx->priv_data;
int ret, i, referenced, num_jobs;
enum AVDiscard skip_thresh;
VP8Frame *av_uninit(curframe), *prev_frame;
if (is_vp7)
ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
else
ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
if (ret < 0)
goto err;
prev_frame = s->framep[VP56_FRAME_CURRENT];
referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
s->update_altref == VP56_FRAME_CURRENT;
skip_thresh = !referenced ? AVDISCARD_NONREF
: !s->keyframe ? AVDISCARD_NONKEY
: AVDISCARD_ALL;
if (avctx->skip_frame >= skip_thresh) {
s->invisible = 1;
memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
goto skip_decode;
}
s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
// release no longer referenced frames
for (i = 0; i < 5; i++)
if (s->frames[i].tf.f->data[0] &&
&s->frames[i] != prev_frame &&
&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
vp8_release_frame(s, &s->frames[i]);
curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
if (!s->colorspace)
avctx->colorspace = AVCOL_SPC_BT470BG;
if (s->fullrange)
avctx->color_range = AVCOL_RANGE_JPEG;
else
avctx->color_range = AVCOL_RANGE_MPEG;
/* Given that arithmetic probabilities are updated every frame, it's quite
* likely that the values we have on a random interframe are complete
* junk if we didn't start decode on a keyframe. So just don't display
* anything rather than junk. */
if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
!s->framep[VP56_FRAME_GOLDEN] ||
!s->framep[VP56_FRAME_GOLDEN2])) {
av_log(avctx, AV_LOG_WARNING,
"Discarding interframe without a prior keyframe!\n");
ret = AVERROR_INVALIDDATA;
goto err;
}
curframe->tf.f->key_frame = s->keyframe;
curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
: AV_PICTURE_TYPE_P;
if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
goto err;
// check if golden and altref are swapped
if (s->update_altref != VP56_FRAME_NONE)
s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
else
s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
if (s->update_golden != VP56_FRAME_NONE)
s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
else
s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
if (s->update_last)
s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
else
s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
s->next_framep[VP56_FRAME_CURRENT] = curframe;
if (avctx->codec->update_thread_context)
ff_thread_finish_setup(avctx);
s->linesize = curframe->tf.f->linesize[0];
s->uvlinesize = curframe->tf.f->linesize[1];
memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
/* Zero macroblock structures for top/top-left prediction
* from outside the frame. */
if (!s->mb_layout)
memset(s->macroblocks + s->mb_height * 2 - 1, 0,
(s->mb_width + 1) * sizeof(*s->macroblocks));
if (!s->mb_layout && s->keyframe)
memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
memset(s->ref_count, 0, sizeof(s->ref_count));
if (s->mb_layout == 1) {
// Make sure the previous frame has read its segmentation map,
// if we re-use the same map.
if (prev_frame && s->segmentation.enabled &&
!s->segmentation.update_map)
ff_thread_await_progress(&prev_frame->tf, 1, 0);
if (is_vp7)
vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
else
vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
}
if (avctx->active_thread_type == FF_THREAD_FRAME)
num_jobs = 1;
else
num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
s->num_jobs = num_jobs;
s->curframe = curframe;
s->prev_frame = prev_frame;
s->mv_bounds.mv_min.y = -MARGIN;
s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
for (i = 0; i < MAX_THREADS; i++) {
VP8ThreadData *td = &s->thread_data[i];
atomic_init(&td->thread_mb_pos, 0);
atomic_init(&td->wait_mb_pos, INT_MAX);
}
if (is_vp7)
avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
else
avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
skip_decode:
// if future frames don't use the updated probabilities,
// reset them to the values we saved
if (!s->update_probabilities)
s->prob[0] = s->prob[1];
if (!s->invisible) {
if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
return ret;
*got_frame = 1;
}
return avpkt->size;
err:
memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
return ret;
} | 1 | [
"CWE-119",
"CWE-787"
] | FFmpeg | 6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef | 207,494,053,381,225,100,000,000,000,000,000,000,000 | 157 | avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
static int compat_x25_subscr_ioctl(unsigned int cmd,
struct compat_x25_subscrip_struct __user *x25_subscr32)
{
struct compat_x25_subscrip_struct x25_subscr;
struct x25_neigh *nb;
struct net_device *dev;
int rc = -EINVAL;
rc = -EFAULT;
if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32)))
goto out;
rc = -EINVAL;
dev = x25_dev_get(x25_subscr.device);
if (dev == NULL)
goto out;
nb = x25_get_neigh(dev);
if (nb == NULL)
goto out_dev_put;
dev_put(dev);
if (cmd == SIOCX25GSUBSCRIP) {
read_lock_bh(&x25_neigh_list_lock);
x25_subscr.extended = nb->extended;
x25_subscr.global_facil_mask = nb->global_facil_mask;
read_unlock_bh(&x25_neigh_list_lock);
rc = copy_to_user(x25_subscr32, &x25_subscr,
sizeof(*x25_subscr32)) ? -EFAULT : 0;
} else {
rc = -EINVAL;
if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
rc = 0;
write_lock_bh(&x25_neigh_list_lock);
nb->extended = x25_subscr.extended;
nb->global_facil_mask = x25_subscr.global_facil_mask;
write_unlock_bh(&x25_neigh_list_lock);
}
}
x25_neigh_put(nb);
out:
return rc;
out_dev_put:
dev_put(dev);
goto out;
} | 0 | [] | net | 7781607938c8371d4c2b243527430241c62e39c2 | 256,578,881,325,786,820,000,000,000,000,000,000,000 | 47 | net/x25: Fix null-ptr-deref caused by x25_disconnect
When the link layer is terminating, x25->neighbour will be set to NULL
in x25_disconnect(). As a result, it could cause null-ptr-deref bugs in
x25_sendmsg(),x25_recvmsg() and x25_connect(). One of the bugs is
shown below.
(Thread 1) | (Thread 2)
x25_link_terminated() | x25_recvmsg()
x25_kill_by_neigh() | ...
x25_disconnect() | lock_sock(sk)
... | ...
x25->neighbour = NULL //(1) |
... | x25->neighbour->extended //(2)
The code sets NULL to x25->neighbour in position (1) and dereferences
x25->neighbour in position (2), which could cause null-ptr-deref bug.
This patch adds lock_sock() in x25_kill_by_neigh() in order to synchronize
with x25_sendmsg(), x25_recvmsg() and x25_connect(). What`s more, the
sock held by lock_sock() is not NULL, because it is extracted from x25_list
and uses x25_list_lock to synchronize.
Fixes: 4becb7ee5b3d ("net/x25: Fix x25_neigh refcnt leak when x25 disconnect")
Signed-off-by: Duoming Zhou <[email protected]>
Reviewed-by: Lin Ma <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void JSObject::allocateNewSlotStorage(
Handle<JSObject> selfHandle,
Runtime *runtime,
SlotIndex newSlotIndex,
Handle<> valueHandle) {
// If it is a direct property, just store the value and we are done.
if (LLVM_LIKELY(newSlotIndex < DIRECT_PROPERTY_SLOTS)) {
selfHandle->directProps()[newSlotIndex].set(
*valueHandle, &runtime->getHeap());
return;
}
// Make the slot index relative to the indirect storage.
newSlotIndex -= DIRECT_PROPERTY_SLOTS;
// Allocate a new property storage if not already allocated.
if (LLVM_UNLIKELY(!selfHandle->propStorage_)) {
// Allocate new storage.
assert(newSlotIndex == 0 && "allocated slot must be at end");
auto arrRes = runtime->ignoreAllocationFailure(
PropStorage::create(runtime, DEFAULT_PROPERTY_CAPACITY));
selfHandle->propStorage_.set(
runtime, vmcast<PropStorage>(arrRes), &runtime->getHeap());
} else if (LLVM_UNLIKELY(
newSlotIndex >=
selfHandle->propStorage_.get(runtime)->capacity())) {
// Reallocate the existing one.
assert(
newSlotIndex == selfHandle->propStorage_.get(runtime)->size() &&
"allocated slot must be at end");
auto hnd = runtime->makeMutableHandle(selfHandle->propStorage_);
PropStorage::resize(hnd, runtime, newSlotIndex + 1);
selfHandle->propStorage_.set(runtime, *hnd, &runtime->getHeap());
}
{
NoAllocScope scope{runtime};
auto *const propStorage = selfHandle->propStorage_.getNonNull(runtime);
if (newSlotIndex >= propStorage->size()) {
assert(
newSlotIndex == propStorage->size() &&
"allocated slot must be at end");
PropStorage::resizeWithinCapacity(propStorage, runtime, newSlotIndex + 1);
}
// If we don't need to resize, just store it directly.
propStorage->at(newSlotIndex).set(*valueHandle, &runtime->getHeap());
}
} | 0 | [
"CWE-843",
"CWE-125"
] | hermes | fe52854cdf6725c2eaa9e125995da76e6ceb27da | 209,039,212,030,881,900,000,000,000,000,000,000,000 | 48 | [CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain.
Summary:
The change in the hermes repository fixes the security vulnerability
CVE-2020-1911. This vulnerability only affects applications which
allow evaluation of uncontrolled, untrusted JavaScript code not
shipped with the app, so React Native apps will generally not be affected.
This revision includes a test for the bug. The test is generic JSI
code, so it is included in the hermes and react-native repositories.
Changelog: [Internal]
Reviewed By: tmikov
Differential Revision: D23322992
fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a |
nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
int ret;
switch (cmd) {
case NOSY_IOC_GET_STATS:
spin_lock_irq(client_list_lock);
stats.total_packet_count = client->buffer.total_packet_count;
stats.lost_packet_count = client->buffer.lost_packet_count;
spin_unlock_irq(client_list_lock);
if (copy_to_user((void __user *) arg, &stats, sizeof stats))
return -EFAULT;
else
return 0;
case NOSY_IOC_START:
ret = -EBUSY;
spin_lock_irq(client_list_lock);
if (list_empty(&client->link)) {
list_add_tail(&client->link, &client->lynx->client_list);
ret = 0;
}
spin_unlock_irq(client_list_lock);
return ret;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
list_del_init(&client->link);
spin_unlock_irq(client_list_lock);
return 0;
case NOSY_IOC_FILTER:
spin_lock_irq(client_list_lock);
client->tcode_mask = arg;
spin_unlock_irq(client_list_lock);
return 0;
default:
return -EINVAL;
/* Flush buffer, configure filter. */
}
} | 0 | [
"CWE-416"
] | linux | 829933ef05a951c8ff140e814656d73e74915faf | 228,155,701,602,816,500,000,000,000,000,000,000,000 | 49 | firewire: nosy: Fix a use-after-free bug in nosy_ioctl()
For each device, the nosy driver allocates a pcilynx structure.
A use-after-free might happen in the following scenario:
1. Open nosy device for the first time and call ioctl with command
NOSY_IOC_START, then a new client A will be malloced and added to
doubly linked list.
2. Open nosy device for the second time and call ioctl with command
NOSY_IOC_START, then a new client B will be malloced and added to
doubly linked list.
3. Call ioctl with command NOSY_IOC_START for client A, then client A
will be readded to the doubly linked list. Now the doubly linked
list is messed up.
4. Close the first nosy device and nosy_release will be called. In
nosy_release, client A will be unlinked and freed.
5. Close the second nosy device, and client A will be referenced,
resulting in UAF.
The root cause of this bug is that the element in the doubly linked list
is reentered into the list.
Fix this bug by adding a check before inserting a client. If a client
is already in the linked list, don't insert it.
The following KASAN report reveals it:
BUG: KASAN: use-after-free in nosy_release+0x1ea/0x210
Write of size 8 at addr ffff888102ad7360 by task poc
CPU: 3 PID: 337 Comm: poc Not tainted 5.12.0-rc5+ #6
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
nosy_release+0x1ea/0x210
__fput+0x1e2/0x840
task_work_run+0xe8/0x180
exit_to_user_mode_prepare+0x114/0x120
syscall_exit_to_user_mode+0x1d/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xae
Allocated by task 337:
nosy_open+0x154/0x4d0
misc_open+0x2ec/0x410
chrdev_open+0x20d/0x5a0
do_dentry_open+0x40f/0xe80
path_openat+0x1cf9/0x37b0
do_filp_open+0x16d/0x390
do_sys_openat2+0x11d/0x360
__x64_sys_open+0xfd/0x1a0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xae
Freed by task 337:
kfree+0x8f/0x210
nosy_release+0x158/0x210
__fput+0x1e2/0x840
task_work_run+0xe8/0x180
exit_to_user_mode_prepare+0x114/0x120
syscall_exit_to_user_mode+0x1d/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xae
The buggy address belongs to the object at ffff888102ad7300 which belongs to the cache kmalloc-128 of size 128
The buggy address is located 96 bytes inside of 128-byte region [ffff888102ad7300, ffff888102ad7380)
[ Modified to use 'list_empty()' inside proper lock - Linus ]
Link: https://lore.kernel.org/lkml/[email protected]/
Reported-and-tested-by: 马哲宇 (Zheyu Ma) <[email protected]>
Signed-off-by: Zheyu Ma <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Stefan Richter <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void test_bug9992()
{
MYSQL *mysql1;
MYSQL_RES* res ;
int rc;
myheader("test_bug9992");
if (!opt_silent)
printf("Establishing a connection with option CLIENT_MULTI_STATEMENTS..\n");
mysql1= mysql_client_init(NULL);
if (!mysql_real_connect(mysql1, opt_host, opt_user, opt_password,
opt_db ? opt_db : "test", opt_port, opt_unix_socket,
CLIENT_MULTI_STATEMENTS))
{
fprintf(stderr, "Failed to connect to the database\n");
DIE_UNLESS(0);
}
/* Sic: SHOW DATABASE is incorrect syntax. */
rc= mysql_query(mysql1, "SHOW TABLES; SHOW DATABASE; SELECT 1;");
if (rc)
{
fprintf(stderr, "[%d] %s\n", mysql_errno(mysql1), mysql_error(mysql1));
DIE_UNLESS(0);
}
if (!opt_silent)
printf("Testing mysql_store_result/mysql_next_result..\n");
res= mysql_store_result(mysql1);
DIE_UNLESS(res);
mysql_free_result(res);
rc= mysql_next_result(mysql1);
DIE_UNLESS(rc == 1); /* Got errors, as expected */
if (!opt_silent)
fprintf(stdout, "Got error, as expected:\n [%d] %s\n",
mysql_errno(mysql1), mysql_error(mysql1));
mysql_close(mysql1);
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 16,385,866,937,351,580,000,000,000,000,000,000,000 | 46 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
int meth_put(struct transaction_t *txn, void *params)
{
struct meth_params *pparams = (struct meth_params *) params;
int ret, r, precond, rights, reqd_rights;
const char **hdr, *etag;
struct mime_type_t *mime = NULL;
struct mailbox *mailbox = NULL;
struct dav_data *ddata;
struct index_record oldrecord;
quota_t qdiffs[QUOTA_NUMRESOURCES] = QUOTA_DIFFS_INITIALIZER;
time_t lastmod;
unsigned flags = 0;
void *davdb = NULL, *obj = NULL;
struct buf msg_buf = BUF_INITIALIZER;
if (txn->meth == METH_POST) {
reqd_rights = DACL_ADDRSRC;
}
else {
/* Response should not be cached */
txn->flags.cc |= CC_NOCACHE;
/* Parse the path */
r = dav_parse_req_target(txn, pparams);
if (r) {
switch (r){
case HTTP_MOVED:
case HTTP_SERVER_ERROR: return r;
default: return HTTP_FORBIDDEN;
}
}
/* Make sure method is allowed (only allowed on resources) */
if (!((txn->req_tgt.allow & ALLOW_WRITE) && txn->req_tgt.resource))
return HTTP_NOT_ALLOWED;
reqd_rights = DACL_WRITECONT;
if (txn->req_tgt.allow & ALLOW_USERDATA) reqd_rights |= DACL_PROPRSRC;
}
/* Make sure mailbox type is correct */
if (txn->req_tgt.mbentry->mbtype != txn->req_tgt.namespace->mboxtype)
return HTTP_FORBIDDEN;
/* Make sure Content-Range isn't specified */
if (spool_getheader(txn->req_hdrs, "Content-Range"))
return HTTP_BAD_REQUEST;
/* Check Content-Type */
mime = pparams->mime_types;
if ((hdr = spool_getheader(txn->req_hdrs, "Content-Type"))) {
for (; mime->content_type; mime++) {
if (is_mediatype(mime->content_type, hdr[0])) break;
}
if (!mime->content_type) {
txn->error.precond = pparams->put.supp_data_precond;
return HTTP_FORBIDDEN;
}
}
/* Check ACL for current user */
rights = httpd_myrights(httpd_authstate, txn->req_tgt.mbentry);
if (!(rights & reqd_rights)) {
/* DAV:need-privileges */
txn->error.precond = DAV_NEED_PRIVS;
txn->error.resource = txn->req_tgt.path;
txn->error.rights = reqd_rights;
return HTTP_NO_PRIVS;
}
if (txn->req_tgt.mbentry->server) {
/* Remote mailbox */
struct backend *be;
be = proxy_findserver(txn->req_tgt.mbentry->server,
&http_protocol, httpd_userid,
&backend_cached, NULL, NULL, httpd_in);
if (!be) return HTTP_UNAVAILABLE;
return http_pipe_req_resp(be, txn);
}
/* Local Mailbox */
/* Read body */
txn->req_body.flags |= BODY_DECODE;
ret = http_read_req_body(txn);
if (ret) {
txn->flags.conn = CONN_CLOSE;
return ret;
}
if (rights & DACL_WRITECONT) {
/* Check if we can append a new message to mailbox */
qdiffs[QUOTA_STORAGE] = buf_len(&txn->req_body.payload);
if ((r = append_check(txn->req_tgt.mbentry->name, httpd_authstate,
ACL_INSERT, ignorequota ? NULL : qdiffs))) {
syslog(LOG_ERR, "append_check(%s) failed: %s",
txn->req_tgt.mbentry->name, error_message(r));
txn->error.desc = error_message(r);
return HTTP_SERVER_ERROR;
}
}
/* Open mailbox for writing */
r = mailbox_open_iwl(txn->req_tgt.mbentry->name, &mailbox);
if (r) {
syslog(LOG_ERR, "http_mailbox_open(%s) failed: %s",
txn->req_tgt.mbentry->name, error_message(r));
txn->error.desc = error_message(r);
return HTTP_SERVER_ERROR;
}
/* Open the DAV DB corresponding to the mailbox */
davdb = pparams->davdb.open_db(mailbox);
/* Find message UID for the resource, if exists */
pparams->davdb.lookup_resource(davdb, txn->req_tgt.mbentry->name,
txn->req_tgt.resource, (void *) &ddata, 0);
/* XXX Check errors */
/* Fetch resource validators */
r = pparams->get_validators(mailbox, (void *) ddata, httpd_userid,
&oldrecord, &etag, &lastmod);
if (r) {
txn->error.desc = error_message(r);
ret = HTTP_SERVER_ERROR;
goto done;
}
/* Check any preferences */
flags = get_preferences(txn);
/* Check any preconditions */
if (txn->meth == METH_POST) {
assert(!buf_len(&txn->buf));
buf_printf(&txn->buf, "%u-%u-%u", mailbox->i.uidvalidity,
mailbox->i.last_uid, mailbox->i.exists);
ret = precond = pparams->check_precond(txn, params, mailbox, NULL,
buf_cstring(&txn->buf),
mailbox->index_mtime);
buf_reset(&txn->buf);
}
else {
ret = precond = pparams->check_precond(txn, params, mailbox,
(void *) ddata, etag, lastmod);
}
switch (precond) {
case HTTP_OK:
/* Parse, validate, and store the resource */
obj = mime->to_object(&txn->req_body.payload);
ret = pparams->put.proc(txn, obj, mailbox,
txn->req_tgt.resource, davdb, flags);
break;
case HTTP_PRECOND_FAILED:
if ((flags & PREFER_REP) && ((rights & DACL_READ) == DACL_READ)) {
/* Fill in ETag and Last-Modified */
txn->resp_body.etag = etag;
txn->resp_body.lastmod = lastmod;
if (pparams->get) {
r = pparams->get(txn, mailbox, &oldrecord, (void *) ddata, &obj);
if (r != HTTP_CONTINUE) flags &= ~PREFER_REP;
}
else {
unsigned offset;
struct buf buf;
/* Load message containing the resource */
mailbox_map_record(mailbox, &oldrecord, &msg_buf);
/* Resource length doesn't include RFC 5322 header */
offset = oldrecord.header_size;
/* Parse existing resource */
buf_init_ro(&buf, buf_base(&msg_buf) + offset,
buf_len(&msg_buf) - offset);
obj = pparams->mime_types[0].to_object(&buf);
buf_free(&buf);
}
}
break;
case HTTP_LOCKED:
txn->error.precond = DAV_NEED_LOCK_TOKEN;
txn->error.resource = txn->req_tgt.path;
default:
/* We failed a precondition */
goto done;
}
if (txn->req_tgt.allow & ALLOW_PATCH) {
/* Add Accept-Patch formats to response */
txn->resp_body.patch = pparams->patch_docs;
}
if (flags & PREFER_REP) {
struct resp_body_t *resp_body = &txn->resp_body;
const char **hdr;
struct buf *data;
if ((hdr = spool_getheader(txn->req_hdrs, "Accept"))) {
mime = get_accept_type(hdr, pparams->mime_types);
if (!mime) goto done;
}
switch (ret) {
case HTTP_NO_CONTENT:
ret = HTTP_OK;
GCC_FALLTHROUGH
case HTTP_CREATED:
case HTTP_PRECOND_FAILED:
/* Convert into requested MIME type */
data = mime->from_object(obj);
/* Fill in Content-Type, Content-Length */
resp_body->type = mime->content_type;
resp_body->len = buf_len(data);
/* Fill in Content-Location */
resp_body->loc = txn->req_tgt.path;
/* Fill in Expires and Cache-Control */
resp_body->maxage = 3600; /* 1 hr */
txn->flags.cc = CC_MAXAGE
| CC_REVALIDATE /* don't use stale data */
| CC_NOTRANSFORM; /* don't alter iCal data */
/* Output current representation */
write_body(ret, txn, buf_base(data), buf_len(data));
buf_destroy(data);
ret = 0;
break;
default:
/* failure - do nothing */
break;
}
}
done:
if (obj && pparams->mime_types[0].free)
pparams->mime_types[0].free(obj);
buf_free(&msg_buf);
if (davdb) pparams->davdb.close_db(davdb);
mailbox_close(&mailbox);
return ret;
} | 1 | [] | cyrus-imapd | 6703ff881b6056e0c045a7b795ce8ba1bbb87027 | 301,784,555,688,605,740,000,000,000,000,000,000,000 | 256 | http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication |
void reset_buffer()
{
m_string.set(buffer, buffer_size, &my_charset_bin);
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 245,217,378,160,728,850,000,000,000,000,000,000,000 | 4 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
static int bmp_read_4bit(gdImagePtr im, gdIOCtxPtr infile, bmp_info_t *info, bmp_hdr_t *header)
{
int ypos = 0, xpos = 0, row = 0, index = 0;
int padding = 0, current_byte = 0;
if (info->enctype != BMP_BI_RGB && info->enctype != BMP_BI_RLE4) {
return 1;
}
if (!info->numcolors) {
info->numcolors = 16;
} else if (info->numcolors < 0 || info->numcolors > 16) {
return 1;
}
if (bmp_read_palette(im, infile, info->numcolors, (info->type == BMP_PALETTE_4))) {
return 1;
}
im->colorsTotal = info->numcolors;
/* There is a chance the data isn't until later, would be weird but it is possible */
if (gdTell(infile) != header->off) {
/* Should make sure we don't seek past the file size */
if (!gdSeek(infile, header->off)) {
return 1;
}
}
/* The line must be divisible by 4, else its padded with NULLs */
padding = ((int)ceil(0.5 * info->width)) % 4;
if (padding) {
padding = 4 - padding;
}
switch (info->enctype) {
case BMP_BI_RGB:
for (ypos = 0; ypos < info->height; ++ypos) {
if (info->topdown) {
row = ypos;
} else {
row = info->height - ypos - 1;
}
for (xpos = 0; xpos < info->width; xpos += 2) {
if (!gdGetByte(¤t_byte, infile)) {
return 1;
}
index = (current_byte >> 4) & 0x0f;
if (im->open[index]) {
im->open[index] = 0;
}
gdImageSetPixel(im, xpos, row, index);
/* This condition may get called often, potential optimsations */
if (xpos >= info->width) {
break;
}
index = current_byte & 0x0f;
if (im->open[index]) {
im->open[index] = 0;
}
gdImageSetPixel(im, xpos + 1, row, index);
}
for (xpos = padding; xpos > 0; --xpos) {
if (!gdGetByte(&index, infile)) {
return 1;
}
}
}
break;
case BMP_BI_RLE4:
if (bmp_read_rle(im, infile, info)) {
return 1;
}
break;
default:
return 1;
}
return 0;
} | 0 | [
"CWE-415"
] | libgd | ac16bdf2d41724b5a65255d4c28fb0ec46bc42f5 | 326,109,254,809,571,600,000,000,000,000,000,000,000 | 86 | bmp: check return value in gdImageBmpPtr
Closes #447. |
Item_bool_rowready_func2* Ge_creator::create_swap(THD *thd, Item *a, Item *b) const
{
return new(thd->mem_root) Item_func_le(thd, b, a);
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 213,049,455,758,819,180,000,000,000,000,000,000,000 | 4 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
static ssize_t poison_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (any_slab_objects(s))
return -EBUSY;
s->flags &= ~SLAB_POISON;
if (buf[0] == '1') {
s->flags |= SLAB_POISON;
}
calculate_sizes(s, -1);
return length;
} | 0 | [] | linux | fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8 | 323,482,901,904,561,000,000,000,000,000,000,000,000 | 13 | mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: [email protected]
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
PHP_FUNCTION(odbc_gettypeinfo)
{
zval *pv_conn;
long pv_data_type = SQL_ALL_TYPES;
odbc_result *result = NULL;
odbc_connection *conn;
RETCODE rc;
SQLSMALLINT data_type;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|l", &pv_conn, &pv_data_type) == FAILURE) {
return;
}
data_type = (SQLSMALLINT) pv_data_type;
ZEND_FETCH_RESOURCE2(conn, odbc_connection *, &pv_conn, -1, "ODBC-Link", le_conn, le_pconn);
result = (odbc_result *)ecalloc(1, sizeof(odbc_result));
rc = PHP_ODBC_SQLALLOCSTMT(conn->hdbc, &(result->stmt));
if (rc == SQL_INVALID_HANDLE) {
efree(result);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "SQLAllocStmt error 'Invalid Handle'");
RETURN_FALSE;
}
if (rc == SQL_ERROR) {
odbc_sql_error(conn, SQL_NULL_HSTMT, "SQLAllocStmt");
efree(result);
RETURN_FALSE;
}
rc = SQLGetTypeInfo(result->stmt, data_type );
if (rc == SQL_ERROR) {
odbc_sql_error(conn, SQL_NULL_HSTMT, "SQLGetTypeInfo");
efree(result);
RETURN_FALSE;
}
result->numparams = 0;
SQLNumResultCols(result->stmt, &(result->numcols));
if (result->numcols > 0) {
if (!odbc_bindcols(result TSRMLS_CC)) {
efree(result);
RETURN_FALSE;
}
} else {
result->values = NULL;
}
result->conn_ptr = conn;
result->fetched = 0;
ZEND_REGISTER_RESOURCE(return_value, result, le_result);
} | 0 | [
"CWE-20"
] | php-src | 16db4d1462bf3eacb93c0cd940f799160a284b24 | 128,633,780,114,806,630,000,000,000,000,000,000,000 | 55 | Fix #69975: PHP segfaults when accessing nvarchar(max) defined columns
The SQL Server Native Client 11.0 and maybe other ODBC drivers report
NVARCHAR(MAX) columns as SQL_WVARCHAR with size 0. This causes too small a
buffer to be emalloc'd, likely causing a segfault in the following. As we don't
know the real size of the column data, we treat such colums as
SQL_WLONGVARCHAR.
The related bug #67437 suggests that some drivers report a size of ~4GB. It is
not certain that this is really the case (there might be some integer overflow
involved, and anyway, there has been no feedback), so we do not cater for this
now. However, it would not be hard to treat all sizes above a certain threshold
in a similar way, i.e. as SQL_WLONGVARCHAR. |
v3d_invalidate_slices(struct v3d_dev *v3d, int core)
{
V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 29cd13cfd7624726d9e6becbae9aa419ef35af7f | 156,998,152,593,200,570,000,000,000,000,000,000,000 | 8 | drm/v3d: Fix memory leak in v3d_submit_cl_ioctl
In the impelementation of v3d_submit_cl_ioctl() there are two memory
leaks. One is when allocation for bin fails, and the other is when bin
initialization fails. If kcalloc fails to allocate memory for bin then
render->base should be put. Also, if v3d_job_init() fails to initialize
bin->base then allocated memory for bin should be released.
Fixes: a783a09ee76d ("drm/v3d: Refactor job management.")
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Eric Anholt <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] |
bool InstanceKlass::link_class_impl(TRAPS) {
if (DumpSharedSpaces && SystemDictionaryShared::has_class_failed_verification(this)) {
// This is for CDS dumping phase only -- we use the in_error_state to indicate that
// the class has failed verification. Throwing the NoClassDefFoundError here is just
// a convenient way to stop repeat attempts to verify the same (bad) class.
//
// Note that the NoClassDefFoundError is not part of the JLS, and should not be thrown
// if we are executing Java code. This is not a problem for CDS dumping phase since
// it doesn't execute any Java code.
ResourceMark rm(THREAD);
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_NoClassDefFoundError(),
"Class %s, or one of its supertypes, failed class initialization",
external_name());
return false;
}
// return if already verified
if (is_linked()) {
return true;
}
// Timing
// timer handles recursion
JavaThread* jt = THREAD;
// link super class before linking this class
Klass* super_klass = super();
if (super_klass != NULL) {
if (super_klass->is_interface()) { // check if super class is an interface
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_IncompatibleClassChangeError(),
"class %s has interface %s as super class",
external_name(),
super_klass->external_name()
);
return false;
}
InstanceKlass* ik_super = InstanceKlass::cast(super_klass);
ik_super->link_class_impl(CHECK_false);
}
// link all interfaces implemented by this class before linking this class
Array<InstanceKlass*>* interfaces = local_interfaces();
int num_interfaces = interfaces->length();
for (int index = 0; index < num_interfaces; index++) {
InstanceKlass* interk = interfaces->at(index);
interk->link_class_impl(CHECK_false);
}
// in case the class is linked in the process of linking its superclasses
if (is_linked()) {
return true;
}
// trace only the link time for this klass that includes
// the verification time
PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
ClassLoader::perf_class_link_selftime(),
ClassLoader::perf_classes_linked(),
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LINK);
// verification & rewriting
{
HandleMark hm(THREAD);
Handle h_init_lock(THREAD, init_lock());
ObjectLocker ol(h_init_lock, jt);
// rewritten will have been set if loader constraint error found
// on an earlier link attempt
// don't verify or rewrite if already rewritten
//
if (!is_linked()) {
if (!is_rewritten()) {
{
bool verify_ok = verify_code(THREAD);
if (!verify_ok) {
return false;
}
}
// Just in case a side-effect of verify linked this class already
// (which can sometimes happen since the verifier loads classes
// using custom class loaders, which are free to initialize things)
if (is_linked()) {
return true;
}
// also sets rewritten
rewrite_class(CHECK_false);
} else if (is_shared()) {
SystemDictionaryShared::check_verification_constraints(this, CHECK_false);
}
// relocate jsrs and link methods after they are all rewritten
link_methods(CHECK_false);
// Initialize the vtable and interface table after
// methods have been rewritten since rewrite may
// fabricate new Method*s.
// also does loader constraint checking
//
// initialize_vtable and initialize_itable need to be rerun
// for a shared class if
// 1) the class is loaded by custom class loader or
// 2) the class is loaded by built-in class loader but failed to add archived loader constraints
bool need_init_table = true;
if (is_shared() && SystemDictionaryShared::check_linking_constraints(THREAD, this)) {
need_init_table = false;
}
if (need_init_table) {
vtable().initialize_vtable_and_check_constraints(CHECK_false);
itable().initialize_itable_and_check_constraints(CHECK_false);
}
#ifdef ASSERT
vtable().verify(tty, true);
// In case itable verification is ever added.
// itable().verify(tty, true);
#endif
if (UseVtableBasedCHA) {
MutexLocker ml(THREAD, Compile_lock);
set_init_state(linked);
// Now flush all code that assume the class is not linked.
if (Universe::is_fully_initialized()) {
CodeCache::flush_dependents_on(this);
}
} else {
set_init_state(linked);
}
if (JvmtiExport::should_post_class_prepare()) {
JvmtiExport::post_class_prepare(THREAD, this);
}
}
}
return true;
} | 0 | [] | jdk17u | f8eb9abe034f7c6bea4da05a9ea42017b3f80730 | 157,957,897,959,623,870,000,000,000,000,000,000,000 | 141 | 8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4 |
uint64_t
e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size)
{
uint64_t val;
uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr);
if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) {
if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
trace_e1000e_wrn_regs_read_trivial(index << 2);
}
val = e1000e_macreg_readops[index](core, index);
trace_e1000e_core_read(index << 2, size, val);
return val;
} else {
trace_e1000e_wrn_regs_read_unknown(index << 2, size);
}
return 0; | 0 | [
"CWE-835"
] | qemu | 4154c7e03fa55b4cf52509a83d50d6c09d743b77 | 164,974,756,909,070,270,000,000,000,000,000,000,000 | 17 | net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
SDL_DitherColors(SDL_Color * colors, int bpp)
{
int i;
if (bpp != 8)
return; /* only 8bpp supported right now */
for (i = 0; i < 256; i++) {
int r, g, b;
/* map each bit field to the full [0, 255] interval,
so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */
r = i & 0xe0;
r |= r >> 3 | r >> 6;
colors[i].r = r;
g = (i << 3) & 0xe0;
g |= g >> 3 | g >> 6;
colors[i].g = g;
b = i & 0x3;
b |= b << 2;
b |= b << 4;
colors[i].b = b;
colors[i].a = SDL_ALPHA_OPAQUE;
}
} | 0 | [
"CWE-703",
"CWE-787"
] | SDL | 8c91cf7dba5193f5ce12d06db1336515851c9ee9 | 126,017,706,229,122,430,000,000,000,000,000,000,000 | 23 | Always create a full 256-entry map in case color values are out of range
Fixes https://github.com/libsdl-org/SDL/issues/5042 |
INST_HANDLER (sts) { // STS k, Rr
int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4);
int k = (buf[3] << 8) | buf[2];
op->ptr = k;
ESIL_A ("r%d,", r);
__generic_ld_st (op, "ram", 0, 1, 0, k, 1);
op->cycles = 2;
} | 1 | [
"CWE-125"
] | radare2 | d04c78773f6959bcb427453f8e5b9824d5ba9eff | 64,577,354,453,485,360,000,000,000,000,000,000,000 | 10 | Fix #10091 - crash in AVR analysis |
void DL_Dxf::writeDimLinear(DL_WriterA& dw,
const DL_DimensionData& data,
const DL_DimLinearData& edata,
const DL_Attributes& attrib) {
dw.entity("DIMENSION");
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbEntity");
}
dw.entityAttributes(attrib);
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbDimension");
}
dw.dxfReal(10, data.dpx);
dw.dxfReal(20, data.dpy);
dw.dxfReal(30, data.dpz);
dw.dxfReal(11, data.mpx);
dw.dxfReal(21, data.mpy);
dw.dxfReal(31, 0.0);
dw.dxfInt(70, data.type);
if (version>DL_VERSION_R12) {
dw.dxfInt(71, data.attachmentPoint);
dw.dxfInt(72, data.lineSpacingStyle); // opt
dw.dxfInt(74, data.arrow1Flipped);
dw.dxfInt(75, data.arrow2Flipped);
dw.dxfReal(41, data.lineSpacingFactor); // opt
}
dw.dxfReal(42, data.angle);
dw.dxfString(1, data.text); // opt
//dw.dxfString(3, data.style);
dw.dxfString(3, "Standard");
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbAlignedDimension");
}
dw.dxfReal(13, edata.dpx1);
dw.dxfReal(23, edata.dpy1);
dw.dxfReal(33, 0.0);
dw.dxfReal(14, edata.dpx2);
dw.dxfReal(24, edata.dpy2);
dw.dxfReal(34, 0.0);
dw.dxfReal(50, edata.angle/(2.0*M_PI)*360.0);
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbRotatedDimension");
}
writeDimStyleOverrides(dw, data);
} | 0 | [
"CWE-191"
] | qcad | 1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8 | 340,090,043,445,243,460,000,000,000,000,000,000,000 | 58 | check vertexIndex which might be -1 for broken DXF |
void CoreUserInputHandler::issueAway(const QString &msg, bool autoCheck)
{
QString awayMsg = msg;
IrcUser *me = network()->me();
// if there is no message supplied we have to check if we are already away or not
if (autoCheck && msg.isEmpty()) {
if (me && !me->isAway()) {
Identity *identity = network()->identityPtr();
if (identity) {
awayMsg = identity->awayReason();
}
if (awayMsg.isEmpty()) {
awayMsg = tr("away");
}
}
}
if (me)
me->setAwayMessage(awayMsg);
putCmd("AWAY", serverEncode(awayMsg));
} | 0 | [
"CWE-399"
] | quassel | b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8 | 93,613,164,941,118,310,000,000,000,000,000,000,000 | 22 | Improve the message-splitting algorithm for PRIVMSG and CTCP
This introduces a new message splitting algorithm based on
QTextBoundaryFinder. It works by first starting with the entire
message to be sent, encoding it, and checking to see if it is over
the maximum message length. If it is, it uses QTBF to find the
word boundary most immediately preceding the maximum length. If no
suitable boundary can be found, it falls back to searching for
grapheme boundaries. It repeats this process until the entire
message has been sent.
Unlike what it replaces, the new splitting code is not recursive
and cannot cause stack overflows. Additionally, if it is unable
to split a string, it will give up gracefully and not crash the
core or cause a thread to run away.
This patch fixes two bugs. The first is garbage characters caused
by accidentally splitting the string in the middle of a multibyte
character. Since the new code splits at a character level instead
of a byte level, this will no longer be an issue. The second is
the core crash caused by sending an overlength CTCP query ("/me")
containing only multibyte characters. This bug was caused by the
old CTCP splitter using the byte index from lastParamOverrun() as
a character index for a QString. |
static Image *ReadMIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define BZipMaxExtent(x) ((x)+((x)/100)+600)
#define LZMAMaxExtent(x) ((x)+((x)/3)+128)
#define ThrowMIFFException(exception,message) \
{ \
if (quantum_info != (QuantumInfo *) NULL) \
quantum_info=DestroyQuantumInfo(quantum_info); \
if (compress_pixels != (unsigned char *) NULL) \
compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels); \
ThrowReaderException((exception),(message)); \
}
#define ZipMaxExtent(x) ((x)+(((x)+7) >> 3)+(((x)+63) >> 6)+11)
#if defined(MAGICKCORE_BZLIB_DELEGATE)
bz_stream
bzip_info;
#endif
char
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
double
version;
GeometryInfo
geometry_info;
Image
*image;
IndexPacket
index;
int
c;
LinkedListInfo
*profiles;
#if defined(MAGICKCORE_LZMA_DELEGATE)
lzma_stream
initialize_lzma = LZMA_STREAM_INIT,
lzma_info;
lzma_allocator
allocator;
#endif
MagickBooleanType
status;
MagickStatusType
flags;
PixelPacket
pixel;
QuantumFormatType
quantum_format;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
compress_extent,
length,
packet_size;
ssize_t
count;
unsigned char
*compress_pixels,
*pixels;
size_t
colors;
ssize_t
y;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
z_stream
zip_info;
#endif
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Decode image header; header terminates one character beyond a ':'.
*/
c=ReadBlobByte(image);
if (c == EOF)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
*id='\0';
compress_pixels=(unsigned char *) NULL;
quantum_info=(QuantumInfo *) NULL;
(void) memset(keyword,0,sizeof(keyword));
version=0.0;
(void) version;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
SetGeometryInfo(&geometry_info);
length=MaxTextExtent;
options=AcquireString((char *) NULL);
quantum_format=UndefinedQuantumFormat;
profiles=(LinkedListInfo *) NULL;
colors=0;
image->depth=8UL;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
{
options=DestroyString(options);
ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
length=MaxTextExtent-1;
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while ((isspace((int) ((unsigned char) c)) != 0) && (c != EOF))
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
if (options == (char *) NULL)
ThrowMIFFException(ResourceLimitError,
"MemoryAllocationFailed");
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"gravity") == 0)
{
ssize_t
gravity;
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
options);
if (gravity < 0)
break;
image->gravity=(GravityType) gravity;
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if (LocaleCompare(keyword,"profile") == 0)
{
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(options));
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
if ((LocaleCompare(keyword,"quantum-format") == 0) ||
(LocaleCompare(keyword,"quantum:format") == 0))
{
ssize_t
format;
format=ParseCommandOption(MagickQuantumFormatOptions,
MagickFalse,options);
if (format < 0)
break;
quantum_format=(QuantumFormatType) format;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=
image->chromaticity.red_primary.x;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,
MagickFalse,options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'v':
case 'V':
{
if (LocaleCompare(keyword,"version") == 0)
{
version=StringToDouble(options,(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"ImageMagick") != 0) || (image->depth > 128) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) ||
(image->colorspace == UndefinedColorspace) ||
(image->columns == 0) || (image->rows == 0))
{
if (profiles != (LinkedListInfo *) NULL)
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
if (image->previous == (Image *) NULL)
ThrowMIFFException(CorruptImageError,"ImproperImageHeader");
DeleteImageFromList(&image);
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"ImproperImageHeader","`%s'",image->filename);
break;
}
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowMIFFException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
if (c == EOF)
break;
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
StringInfo
*profile;
/*
Read image profile blobs.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
ssize_t
count;
length=ReadBlobMSBLong(image);
if ((MagickSizeType) length > GetBlobSize(image))
break;
profile=AcquireStringInfo(length);
if (profile == (StringInfo *) NULL)
break;
count=ReadBlob(image,length,GetStringInfoDatum(profile));
if (count != (ssize_t) length)
{
profile=DestroyStringInfo(profile);
break;
}
status=SetImageProfile(image,name,profile);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
break;
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
image->depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Create image colormap.
*/
packet_size=(size_t) (3UL*image->depth/8UL);
if ((MagickSizeType) colors > GetBlobSize(image))
ThrowMIFFException(CorruptImageError,"InsufficientImageDataInFile");
if (((MagickSizeType) packet_size*colors) > GetBlobSize(image))
ThrowMIFFException(CorruptImageError,"InsufficientImageDataInFile");
status=AcquireImageColormap(image,colors != 0 ? colors : 256);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if (colors != 0)
{
/*
Read image colormap from file.
*/
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,packet_size*image->colors,colormap);
p=colormap;
switch (image->depth)
{
default:
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
ThrowMIFFException(CorruptImageError,"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Allocate image pixels.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed");
if (quantum_format != UndefinedQuantumFormat)
{
status=SetQuantumFormat(image,quantum_info,quantum_format);
if (status == MagickFalse)
ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
packet_size=(size_t) (quantum_info->depth/8);
if (image->storage_class == DirectClass)
packet_size=(size_t) (3*quantum_info->depth/8);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
packet_size=quantum_info->depth/8;
if (image->matte != MagickFalse)
packet_size+=quantum_info->depth/8;
if (image->colorspace == CMYKColorspace)
packet_size+=quantum_info->depth/8;
if (image->compression == RLECompression)
packet_size++;
compress_extent=MagickMax(MagickMax(BZipMaxExtent(packet_size*
image->columns),LZMAMaxExtent(packet_size*image->columns)),
ZipMaxExtent(packet_size*image->columns));
compress_pixels=(unsigned char *) AcquireQuantumMemory(compress_extent,
sizeof(*compress_pixels));
if (compress_pixels == (unsigned char *) NULL)
ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed");
/*
Read image pixels.
*/
quantum_type=RGBQuantum;
if (image->matte != MagickFalse)
quantum_type=RGBAQuantum;
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
if (image->matte != MagickFalse)
quantum_type=CMYKAQuantum;
}
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
quantum_type=GrayQuantum;
if (image->matte != MagickFalse)
quantum_type=GrayAlphaQuantum;
}
if (image->storage_class == PseudoClass)
{
quantum_type=IndexQuantum;
if (image->matte != MagickFalse)
quantum_type=IndexAlphaQuantum;
}
status=MagickTrue;
(void) memset(&pixel,0,sizeof(pixel));
#if defined(MAGICKCORE_BZLIB_DELEGATE)
(void) memset(&bzip_info,0,sizeof(bzip_info));
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
(void) memset(&allocator,0,sizeof(allocator));
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
(void) memset(&zip_info,0,sizeof(zip_info));
#endif
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
bzip_info.bzalloc=AcquireBZIPMemory;
bzip_info.bzfree=RelinquishBZIPMemory;
bzip_info.opaque=(void *) image;
code=BZ2_bzDecompressInit(&bzip_info,(int) image_info->verbose,
MagickFalse);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
allocator.opaque=(void *) image;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
code=lzma_auto_decoder(&lzma_info,-1,0);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque=(voidpf) image;
code=inflateInit(&zip_info);
if (code != Z_OK)
status=MagickFalse;
break;
}
#endif
case RLECompression:
{
pixel.opacity=(Quantum) TransparentOpacity;
index=(IndexPacket) 0;
break;
}
default:
break;
}
pixels=GetQuantumPixels(quantum_info);
index=(IndexPacket) 0;
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
bzip_info.next_out=(char *) pixels;
bzip_info.avail_out=(unsigned int) (packet_size*image->columns);
do
{
int
code;
if (bzip_info.avail_in == 0)
{
bzip_info.next_in=(char *) compress_pixels;
length=(size_t) BZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
if (length < compress_extent)
bzip_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) bzip_info.next_in);
if ((length > compress_extent) ||
((size_t) bzip_info.avail_in != length))
{
(void) BZ2_bzDecompressEnd(&bzip_info);
ThrowMIFFException(CorruptImageError,
"UnableToReadImageData");
}
}
code=BZ2_bzDecompress(&bzip_info);
if ((code != BZ_OK) && (code != BZ_STREAM_END))
{
status=MagickFalse;
break;
}
if (code == BZ_STREAM_END)
break;
} while (bzip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
lzma_info.next_out=pixels;
lzma_info.avail_out=packet_size*image->columns;
do
{
int
code;
if (lzma_info.avail_in == 0)
{
lzma_info.next_in=compress_pixels;
length=(size_t) ReadBlobMSBLong(image);
if (length <= compress_extent)
lzma_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) lzma_info.next_in);
if ((length > compress_extent) ||
(lzma_info.avail_in != length))
{
lzma_end(&lzma_info);
ThrowMIFFException(CorruptImageError,
"UnableToReadImageData");
}
}
code=lzma_code(&lzma_info,LZMA_RUN);
if ((code != LZMA_OK) && (code != LZMA_STREAM_END))
{
status=MagickFalse;
break;
}
if (code == LZMA_STREAM_END)
break;
} while (lzma_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
zip_info.next_out=pixels;
zip_info.avail_out=(uInt) (packet_size*image->columns);
do
{
int
code;
if (zip_info.avail_in == 0)
{
zip_info.next_in=compress_pixels;
length=(size_t) ZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
if (length <= compress_extent)
zip_info.avail_in=(unsigned int) ReadBlob(image,length,
zip_info.next_in);
if ((length > compress_extent) ||
((size_t) zip_info.avail_in != length))
{
(void) inflateEnd(&zip_info);
ThrowMIFFException(CorruptImageError,
"UnableToReadImageData");
}
}
code=inflate(&zip_info,Z_SYNC_FLUSH);
if ((code != Z_OK) && (code != Z_STREAM_END))
{
status=MagickFalse;
break;
}
if (code == Z_STREAM_END)
break;
} while (zip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
case RLECompression:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (length == 0)
{
count=ReadBlob(image,packet_size,pixels);
if (count != packet_size)
ThrowMIFFException(CorruptImageError,"UnableToReadImageData");
PushRunlengthPacket(image,pixels,&length,&pixel,&index);
}
length--;
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,index);
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
SetPixelOpacity(q,pixel.opacity);
q++;
}
break;
}
default:
{
count=ReadBlob(image,packet_size*image->columns,pixels);
if (count != (packet_size*image->columns))
ThrowMIFFException(CorruptImageError,"UnableToReadImageData");
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) bzip_info.avail_in),
SEEK_CUR);
if (offset < 0)
{
(void) BZ2_bzDecompressEnd(&bzip_info);
ThrowMIFFException(CorruptImageError,"ImproperImageHeader");
}
}
code=BZ2_bzDecompressEnd(&bzip_info);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
code=lzma_code(&lzma_info,LZMA_FINISH);
if ((code != LZMA_STREAM_END) && (code != LZMA_OK))
status=MagickFalse;
lzma_end(&lzma_info);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) zip_info.avail_in),
SEEK_CUR);
if (offset < 0)
{
(void) inflateEnd(&zip_info);
ThrowMIFFException(CorruptImageError,"ImproperImageHeader");
}
}
code=inflateEnd(&zip_info);
if (code != Z_OK)
status=MagickFalse;
break;
}
#endif
default:
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels);
if (((y != (ssize_t) image->rows)) || (status == MagickFalse))
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
} | 0 | [
"CWE-772"
] | ImageMagick6 | ae3eecad2f59e27123c1a6c891be75d06fc03656 | 46,396,000,491,935,240,000,000,000,000,000,000,000 | 1,305 | https://github.com/ImageMagick/ImageMagick/issues/1191 |
t2p_write_advance_directory(T2P* t2p, TIFF* output)
{
t2p_disable(output);
if(!TIFFWriteDirectory(output)){
TIFFError(TIFF2PDF_MODULE,
"Error writing virtual directory to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p_enable(output);
return;
} | 0 | [
"CWE-787"
] | libtiff | 7be2e452ddcf6d7abca88f41d3761e6edab72b22 | 286,256,393,321,516,600,000,000,000,000,000,000,000 | 13 | tiff2pdf.c: properly calculate datasize when saving to JPEG YCbCr
fixes #220 |
int show_line(FILE *stream, struct name_list **acl_names, acl_t acl,
acl_entry_t *acl_ent, const char *acl_mask,
struct name_list **dacl_names, acl_t dacl,
acl_entry_t *dacl_ent, const char *dacl_mask)
{
acl_tag_t tag_type;
const char *tag, *name;
char acl_perm[ACL_PERMS+1], dacl_perm[ACL_PERMS+1];
if (acl) {
acl_get_tag_type(*acl_ent, &tag_type);
name = (*acl_names)->name;
} else {
acl_get_tag_type(*dacl_ent, &tag_type);
name = (*dacl_names)->name;
}
switch(tag_type) {
case ACL_USER_OBJ:
tag = "USER";
break;
case ACL_USER:
tag = "user";
break;
case ACL_GROUP_OBJ:
tag = "GROUP";
break;
case ACL_GROUP:
tag = "group";
break;
case ACL_MASK:
tag = "mask";
break;
case ACL_OTHER:
tag = "other";
break;
default:
return -1;
}
memset(acl_perm, ' ', ACL_PERMS);
acl_perm[ACL_PERMS] = '\0';
if (acl_ent) {
acl_perm_str(*acl_ent, acl_perm);
if (tag_type != ACL_USER_OBJ && tag_type != ACL_OTHER &&
tag_type != ACL_MASK)
apply_mask(acl_perm, acl_mask);
}
memset(dacl_perm, ' ', ACL_PERMS);
dacl_perm[ACL_PERMS] = '\0';
if (dacl_ent) {
acl_perm_str(*dacl_ent, dacl_perm);
if (tag_type != ACL_USER_OBJ && tag_type != ACL_OTHER &&
tag_type != ACL_MASK)
apply_mask(dacl_perm, dacl_mask);
}
fprintf(stream, "%-5s %*s %*s %*s\n",
tag, -names_width, name,
-(int)ACL_PERMS, acl_perm,
-(int)ACL_PERMS, dacl_perm);
if (acl_names) {
acl_get_entry(acl, ACL_NEXT_ENTRY, acl_ent);
(*acl_names) = (*acl_names)->next;
}
if (dacl_names) {
acl_get_entry(dacl, ACL_NEXT_ENTRY, dacl_ent);
(*dacl_names) = (*dacl_names)->next;
}
return 0;
} | 0 | [] | acl | 63451a06b7484d220750ed8574d3ee84e156daf5 | 161,065,813,611,095,890,000,000,000,000,000,000,000 | 72 | Make sure that getfacl -R only calls stat(2) on symlinks when it needs to
This fixes http://oss.sgi.com/bugzilla/show_bug.cgi?id=790
"getfacl follows symlinks, even without -L". |
static xmlNodePtr to_xml_union(encodeTypePtr enc, zval *data, int style, xmlNodePtr parent TSRMLS_DC) {
/*FIXME*/
return to_xml_list(enc,data,style, parent TSRMLS_CC);
} | 0 | [
"CWE-19"
] | php-src | c8eaca013a3922e8383def6158ece2b63f6ec483 | 1,821,425,754,009,914,300,000,000,000,000,000,000 | 4 | Added type checks |
FizzHandshakeParam(bool argCHLOSync, bool argCFINSync, bool argAcceptZeroRtt)
: chloSync(argCHLOSync),
cfinSync(argCFINSync),
acceptZeroRtt(argAcceptZeroRtt) {} | 0 | [
"CWE-617",
"CWE-703"
] | mvfst | a67083ff4b8dcbb7ee2839da6338032030d712b0 | 158,212,539,899,513,860,000,000,000,000,000,000,000 | 4 | Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945 |
static void get_socket_name( char* buf, int len )
{
char* dpy = g_strdup(g_getenv("DISPLAY"));
if(dpy && *dpy)
{
char* p = strchr(dpy, ':');
for(++p; *p && *p != '.' && *p != '\n';)
++p;
if(*p)
*p = '\0';
}
g_snprintf( buf, len, "%s/.menu-cached-%s-%s", g_get_tmp_dir(),
dpy ? dpy : ":0", g_get_user_name() );
g_free(dpy);
} | 1 | [
"CWE-20"
] | menu-cache | 56f66684592abf257c4004e6e1fff041c64a12ce | 276,299,611,376,421,900,000,000,000,000,000,000,000 | 15 | Fix potential access violation, use runtime user dir instead of tmp dir.
Note: it limits libmenu-cache compatibility to menu-cached >= 0.7.0. |
static GPtrArray *textview_scan_header(TextView *textview, FILE *fp)
{
gchar buf[BUFFSIZE];
GPtrArray *headers, *sorted_headers;
GSList *disphdr_list;
Header *header;
gint i;
cm_return_val_if_fail(fp != NULL, NULL);
if (prefs_common.show_all_headers) {
headers = procheader_get_header_array_asis(fp);
sorted_headers = g_ptr_array_new();
for (i = 0; i < headers->len; i++) {
header = g_ptr_array_index(headers, i);
if (!procheader_header_is_internal(header->name))
g_ptr_array_add(sorted_headers, header);
else
procheader_header_free(header);
}
g_ptr_array_free(headers, TRUE);
return sorted_headers;
}
if (!prefs_common.display_header) {
while (claws_fgets(buf, sizeof(buf), fp) != NULL)
if (buf[0] == '\r' || buf[0] == '\n') break;
return NULL;
}
headers = procheader_get_header_array_asis(fp);
sorted_headers = g_ptr_array_new();
for (disphdr_list = prefs_common.disphdr_list; disphdr_list != NULL;
disphdr_list = disphdr_list->next) {
DisplayHeaderProp *dp =
(DisplayHeaderProp *)disphdr_list->data;
for (i = 0; i < headers->len; i++) {
header = g_ptr_array_index(headers, i);
if (procheader_headername_equal(header->name,
dp->name)) {
if (dp->hidden)
procheader_header_free(header);
else
g_ptr_array_add(sorted_headers, header);
g_ptr_array_remove_index(headers, i);
i--;
}
}
}
if (prefs_common.show_other_header) {
for (i = 0; i < headers->len; i++) {
header = g_ptr_array_index(headers, i);
if (!procheader_header_is_internal(header->name)) {
g_ptr_array_add(sorted_headers, header);
} else {
procheader_header_free(header);
}
}
g_ptr_array_free(headers, TRUE);
} else
procheader_header_array_destroy(headers);
return sorted_headers;
} | 0 | [
"CWE-601"
] | claws | ac286a71ed78429e16c612161251b9ea90ccd431 | 119,301,333,339,070,880,000,000,000,000,000,000,000 | 71 | harden link checker before accepting click |
CopyFileToDirectory(const char* srcPath, const char* destPath,
const char* fileName)
{
char command[1024];
int forkExecResult;
snprintf(command, sizeof(command), "/bin/cp %s/%s %s/%s.tmp", srcPath,
fileName, destPath, fileName);
command[sizeof(command) - 1] = '\0';
forkExecResult = ForkExecAndWaitCommand(command);
if (forkExecResult != 0) {
SetDeployError("Error while copying file %s: %s", fileName,
strerror(errno));
return false;
}
snprintf(command, sizeof(command), "/bin/mv -f %s/%s.tmp %s/%s", destPath,
fileName, destPath, fileName);
command[sizeof(command) - 1] = '\0';
forkExecResult = ForkExecAndWaitCommand(command);
if (forkExecResult != 0) {
SetDeployError("Error while renaming temp file %s: %s", fileName,
strerror(errno));
return false;
}
return true;
} | 0 | [
"CWE-362"
] | open-vm-tools | 22e58289f71232310d30cf162b83b5151a937bac | 26,039,836,255,867,295,000,000,000,000,000,000,000 | 26 | randomly generate tmp directory name |
static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
u64 guid)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->guid == guid)
return peer;
return NULL;
} | 0 | [
"CWE-119",
"CWE-284",
"CWE-787"
] | linux | 667121ace9dbafb368618dbabcf07901c962ddac | 65,577,800,922,772,640,000,000,000,000,000,000,000 | 11 | firewire: net: guard against rx buffer overflows
The IP-over-1394 driver firewire-net lacked input validation when
handling incoming fragmented datagrams. A maliciously formed fragment
with a respectively large datagram_offset would cause a memcpy past the
datagram buffer.
So, drop any packets carrying a fragment with offset + length larger
than datagram_size.
In addition, ensure that
- GASP header, unfragmented encapsulation header, or fragment
encapsulation header actually exists before we access it,
- the encapsulated datagram or fragment is of nonzero size.
Reported-by: Eyal Itkin <[email protected]>
Reviewed-by: Eyal Itkin <[email protected]>
Fixes: CVE 2016-8633
Cc: [email protected]
Signed-off-by: Stefan Richter <[email protected]> |
RuntimeShape() : size_(0) {} | 0 | [
"CWE-125",
"CWE-787"
] | tensorflow | 8ee24e7949a203d234489f9da2c5bf45a7d5157d | 124,986,740,443,484,130,000,000,000,000,000,000,000 | 1 | [tflite] Ensure `MatchingDim` does not allow buffer overflow.
We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes.
A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits.
PiperOrigin-RevId: 332526127
Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4 |
QPDF_Stream::getLength() const
{
return this->length;
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 89,361,748,551,688,240,000,000,000,000,000,000,000 | 4 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_logical_block_size(q), page);
} | 0 | [
"CWE-416"
] | linux | c3e2219216c92919a6bd1711f340f5faa98695e6 | 200,538,897,225,175,930,000,000,000,000,000,000,000 | 4 | block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool
of sched tags belongs to the request queue too.
However, the current tags allocation interfaces are re-used for both
driver tags and sched tags, and driver tags is definitely host wide,
and doesn't belong to any request queue, same with its request pool.
So we need tagset instance for freeing request of sched tags.
Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case
of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched
tags to be freed before calling blk_mq_free_tag_set().
Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue")
moves blk_exit_queue into __blk_release_queue for simplying the fast
path in generic_make_request(), then causes oops during freeing requests
of sched tags in __blk_release_queue().
Fix the above issue by move freeing request pool of sched tags into
blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any
in-queue requests at that time. Freeing sched tags has to be kept in queue's
release handler becasue there might be un-completed dispatch activity
which might refer to sched tags.
Cc: Bart Van Assche <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue")
Tested-by: Yi Zhang <[email protected]>
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
return mmu_notifier_register(&kvm->mmu_notifier, current->mm); | 0 | [
"CWE-459"
] | linux | 683412ccf61294d727ead4a73d97397396e69a6b | 131,229,651,083,134,550,000,000,000,000,000,000,000 | 5 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
PHP_FUNCTION(openssl_pkey_export)
{
struct php_x509_request req;
zval ** zpkey, * args = NULL, *out;
char * passphrase = NULL;
int passphrase_len = 0;
long key_resource = -1;
int pem_write = 0;
EVP_PKEY * key;
BIO * bio_out = NULL;
const EVP_CIPHER * cipher;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zz|s!a!", &zpkey, &out, &passphrase, &passphrase_len, &args) == FAILURE) {
return;
}
RETVAL_FALSE;
key = php_openssl_evp_from_zval(zpkey, 0, passphrase, 0, &key_resource TSRMLS_CC);
if (key == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "cannot get key from parameter 1");
RETURN_FALSE;
}
PHP_SSL_REQ_INIT(&req);
if (PHP_SSL_REQ_PARSE(&req, args) == SUCCESS) {
bio_out = BIO_new(BIO_s_mem());
if (passphrase && req.priv_key_encrypt) {
if (req.priv_key_encrypt_cipher) {
cipher = req.priv_key_encrypt_cipher;
} else {
cipher = (EVP_CIPHER *) EVP_des_ede3_cbc();
}
} else {
cipher = NULL;
}
switch (EVP_PKEY_type(key->type)) {
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
pem_write = PEM_write_bio_ECPrivateKey(bio_out, EVP_PKEY_get1_EC_KEY(key), cipher, (unsigned char *)passphrase, passphrase_len, NULL, NULL);
break;
#endif
default:
pem_write = PEM_write_bio_PrivateKey(bio_out, key, cipher, (unsigned char *)passphrase, passphrase_len, NULL, NULL);
break;
}
if (pem_write) {
/* Success!
* If returning the output as a string, do so now */
char * bio_mem_ptr;
long bio_mem_len;
RETVAL_TRUE;
bio_mem_len = BIO_get_mem_data(bio_out, &bio_mem_ptr);
zval_dtor(out);
ZVAL_STRINGL(out, bio_mem_ptr, bio_mem_len, 1);
}
}
PHP_SSL_REQ_DISPOSE(&req);
if (key_resource == -1 && key) {
EVP_PKEY_free(key);
}
if (bio_out) {
BIO_free(bio_out);
}
} | 0 | [
"CWE-754"
] | php-src | 89637c6b41b510c20d262c17483f582f115c66d6 | 55,147,275,155,091,310,000,000,000,000,000,000,000 | 72 | Fix bug #74651 - check EVP_SealInit as it can return -1 |
define_destination_uri (WebKitDownload *download,
gboolean temporary)
{
char *tmp_dir;
char *destination_filename;
char *destination_uri;
const char *suggested_filename;
suggested_filename = webkit_download_get_suggested_filename (download);
/* If we are not doing an automatic download, use a temporary file
* to start the download while we ask the user for the location to
* where the file must go.
*/
if (temporary)
tmp_dir = g_build_filename (ephy_dot_dir (), "downloads", NULL);
else
tmp_dir = ephy_file_get_downloads_dir ();
/* Make sure the download directory exists */
if (g_mkdir_with_parents (tmp_dir, 0700) == -1) {
g_critical ("Could not create downloads directory \"%s\": %s",
tmp_dir, strerror (errno));
g_free (tmp_dir);
return FALSE;
}
destination_filename = g_build_filename (tmp_dir, suggested_filename, NULL);
if (g_file_test (destination_filename, G_FILE_TEST_EXISTS)) {
int i = 1;
const char *dot_pos;
gssize position;
char *serial = NULL;
GString *tmp_filename;
dot_pos = parse_extension (destination_filename);
if (dot_pos)
position = dot_pos - destination_filename;
else
position = strlen (destination_filename);
tmp_filename = g_string_new (NULL);
do {
serial = g_strdup_printf ("(%d)", i++);
g_string_assign (tmp_filename, destination_filename);
g_string_insert (tmp_filename, position, serial);
g_free (serial);
} while (g_file_test (tmp_filename->str, G_FILE_TEST_EXISTS));
destination_filename = g_strdup (tmp_filename->str);
g_string_free (tmp_filename, TRUE);
}
destination_uri = g_strconcat ("file://", destination_filename, NULL);
LOG ("define_destination_uri: Downloading to %s", destination_filename);
webkit_download_set_destination_uri (download, destination_uri);
g_free (tmp_dir);
g_free (destination_filename);
g_free (destination_uri);
return TRUE;
} | 0 | [] | epiphany | 3e0f7dea754381c5ad11a06ccc62eb153382b498 | 61,713,900,972,051,760,000,000,000,000,000,000,000 | 69 | Report broken certs through the padlock icon
This uses a new feature in libsoup that reports through a
SoupMessageFlag whether the message is talking to a server that has a
trusted server.
Bug #600663 |
static int test_root(unsigned int a, unsigned int b)
{
while (1) {
if (a < b)
return 0;
if (a == b)
return 1;
if (a % b)
return 0;
a = a / b;
}
} | 0 | [] | e2fsprogs | f66e6ce4446738c2c7f43d41988a3eb73347e2f5 | 26,556,227,739,448,324,000,000,000,000,000,000,000 | 12 | libext2fs: avoid buffer overflow if s_first_meta_bg is too big
If s_first_meta_bg is greater than the of number block group
descriptor blocks, then reading or writing the block group descriptors
will end up overruning the memory buffer allocated for the
descriptors. Fix this by limiting first_meta_bg to no more than
fs->desc_blocks. This doesn't correct the bad s_first_meta_bg value,
but it avoids causing the e2fsprogs userspace programs from
potentially crashing.
Signed-off-by: Theodore Ts'o <[email protected]> |
GF_Err trak_box_write(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_write_header(s, bs);
} | 0 | [
"CWE-787"
] | gpac | 77510778516803b7f7402d7423c6d6bef50254c3 | 328,952,545,733,833,000,000,000,000,000,000,000,000 | 4 | fixed #2255 |
R_API void r_bin_java_interface_free(void /*RBinJavaInterfaceInfo*/ *o) {
RBinJavaInterfaceInfo *obj = o;
if (obj) {
free (obj->name);
free (obj);
}
} | 0 | [
"CWE-119",
"CWE-788"
] | radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 31,851,381,158,874,505,000,000,000,000,000,000,000 | 7 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class |
ImagingPcdDecode(Imaging im, ImagingCodecState state, UINT8* buf, int bytes)
{
int x;
int chunk;
UINT8* out;
UINT8* ptr;
ptr = buf;
chunk = 3 * state->xsize;
for (;;) {
/* We need data for two full lines before we can do anything */
if (bytes < chunk)
return ptr - buf;
/* Unpack first line */
out = state->buffer;
for (x = 0; x < state->xsize; x++) {
out[0] = ptr[x];
out[1] = ptr[(x+4*state->xsize)/2];
out[2] = ptr[(x+5*state->xsize)/2];
out += 3;
}
state->shuffle((UINT8*) im->image[state->y],
state->buffer, state->xsize);
if (++state->y >= state->ysize)
return -1; /* This can hardly happen */
/* Unpack second line */
out = state->buffer;
for (x = 0; x < state->xsize; x++) {
out[0] = ptr[x+state->xsize];
out[1] = ptr[(x+4*state->xsize)/2];
out[2] = ptr[(x+5*state->xsize)/2];
out += 3;
}
state->shuffle((UINT8*) im->image[state->y],
state->buffer, state->xsize);
if (++state->y >= state->ysize)
return -1;
ptr += chunk;
bytes -= chunk;
}
} | 0 | [
"CWE-119",
"CWE-787"
] | Pillow | ae453aa18b66af54e7ff716f4ccb33adca60afd4 | 166,336,336,821,992,650,000,000,000,000,000,000,000 | 52 | PCD decoder overruns the shuffle buffer, Fixes #568 |
static int asn1_item_embed_d2i(ASN1_VALUE **pval, const unsigned char **in,
long len, const ASN1_ITEM *it,
int tag, int aclass, char opt, ASN1_TLC *ctx)
{
const ASN1_TEMPLATE *tt, *errtt = NULL;
const ASN1_EXTERN_FUNCS *ef;
const ASN1_AUX *aux = it->funcs;
ASN1_aux_cb *asn1_cb;
const unsigned char *p = NULL, *q;
unsigned char oclass;
char seq_eoc, seq_nolen, cst, isopt;
long tmplen;
int i;
int otag;
int ret = 0;
ASN1_VALUE **pchptr;
if (!pval)
return 0;
if (aux && aux->asn1_cb)
asn1_cb = aux->asn1_cb;
else
asn1_cb = 0;
switch (it->itype) {
case ASN1_ITYPE_PRIMITIVE:
if (it->templates) {
/*
* tagging or OPTIONAL is currently illegal on an item template
* because the flags can't get passed down. In practice this
* isn't a problem: we include the relevant flags from the item
* template in the template itself.
*/
if ((tag != -1) || opt) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I,
ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE);
goto err;
}
return asn1_template_ex_d2i(pval, in, len,
it->templates, opt, ctx);
}
return asn1_d2i_ex_primitive(pval, in, len, it,
tag, aclass, opt, ctx);
case ASN1_ITYPE_MSTRING:
p = *in;
/* Just read in tag and class */
ret = asn1_check_tlen(NULL, &otag, &oclass, NULL, NULL,
&p, len, -1, 0, 1, ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
}
/* Must be UNIVERSAL class */
if (oclass != V_ASN1_UNIVERSAL) {
/* If OPTIONAL, assume this is OK */
if (opt)
return -1;
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_MSTRING_NOT_UNIVERSAL);
goto err;
}
/* Check tag matches bit map */
if (!(ASN1_tag2bit(otag) & it->utype)) {
/* If OPTIONAL, assume this is OK */
if (opt)
return -1;
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_MSTRING_WRONG_TAG);
goto err;
}
return asn1_d2i_ex_primitive(pval, in, len, it, otag, 0, 0, ctx);
case ASN1_ITYPE_EXTERN:
/* Use new style d2i */
ef = it->funcs;
return ef->asn1_ex_d2i(pval, in, len, it, tag, aclass, opt, ctx);
case ASN1_ITYPE_CHOICE:
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_PRE, pval, it, NULL))
goto auxerr;
if (*pval) {
/* Free up and zero CHOICE value if initialised */
i = asn1_get_choice_selector(pval, it);
if ((i >= 0) && (i < it->tcount)) {
tt = it->templates + i;
pchptr = asn1_get_field_ptr(pval, tt);
asn1_template_free(pchptr, tt);
asn1_set_choice_selector(pval, -1, it);
}
} else if (!ASN1_item_ex_new(pval, it)) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
}
/* CHOICE type, try each possibility in turn */
p = *in;
for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) {
pchptr = asn1_get_field_ptr(pval, tt);
/*
* We mark field as OPTIONAL so its absence can be recognised.
*/
ret = asn1_template_ex_d2i(pchptr, &p, len, tt, 1, ctx);
/* If field not present, try the next one */
if (ret == -1)
continue;
/* If positive return, read OK, break loop */
if (ret > 0)
break;
/*
* Must be an ASN1 parsing error.
* Free up any partial choice value
*/
asn1_template_free(pchptr, tt);
errtt = tt;
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
}
/* Did we fall off the end without reading anything? */
if (i == it->tcount) {
/* If OPTIONAL, this is OK */
if (opt) {
/* Free and zero it */
ASN1_item_ex_free(pval, it);
return -1;
}
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_NO_MATCHING_CHOICE_TYPE);
goto err;
}
asn1_set_choice_selector(pval, i, it);
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL))
goto auxerr;
*in = p;
return 1;
case ASN1_ITYPE_NDEF_SEQUENCE:
case ASN1_ITYPE_SEQUENCE:
p = *in;
tmplen = len;
/* If no IMPLICIT tagging set to SEQUENCE, UNIVERSAL */
if (tag == -1) {
tag = V_ASN1_SEQUENCE;
aclass = V_ASN1_UNIVERSAL;
}
/* Get SEQUENCE length and update len, p */
ret = asn1_check_tlen(&len, NULL, NULL, &seq_eoc, &cst,
&p, len, tag, aclass, opt, ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
} else if (ret == -1)
return -1;
if (aux && (aux->flags & ASN1_AFLG_BROKEN)) {
len = tmplen - (p - *in);
seq_nolen = 1;
}
/* If indefinite we don't do a length check */
else
seq_nolen = seq_eoc;
if (!cst) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_SEQUENCE_NOT_CONSTRUCTED);
goto err;
}
if (!*pval && !ASN1_item_ex_new(pval, it)) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
}
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_PRE, pval, it, NULL))
goto auxerr;
/* Free up and zero any ADB found */
for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) {
if (tt->flags & ASN1_TFLG_ADB_MASK) {
const ASN1_TEMPLATE *seqtt;
ASN1_VALUE **pseqval;
seqtt = asn1_do_adb(pval, tt, 0);
if (seqtt == NULL)
continue;
pseqval = asn1_get_field_ptr(pval, seqtt);
asn1_template_free(pseqval, seqtt);
}
}
/* Get each field entry */
for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) {
const ASN1_TEMPLATE *seqtt;
ASN1_VALUE **pseqval;
seqtt = asn1_do_adb(pval, tt, 1);
if (seqtt == NULL)
goto err;
pseqval = asn1_get_field_ptr(pval, seqtt);
/* Have we ran out of data? */
if (!len)
break;
q = p;
if (asn1_check_eoc(&p, len)) {
if (!seq_eoc) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_UNEXPECTED_EOC);
goto err;
}
len -= p - q;
seq_eoc = 0;
q = p;
break;
}
/*
* This determines the OPTIONAL flag value. The field cannot be
* omitted if it is the last of a SEQUENCE and there is still
* data to be read. This isn't strictly necessary but it
* increases efficiency in some cases.
*/
if (i == (it->tcount - 1))
isopt = 0;
else
isopt = (char)(seqtt->flags & ASN1_TFLG_OPTIONAL);
/*
* attempt to read in field, allowing each to be OPTIONAL
*/
ret = asn1_template_ex_d2i(pseqval, &p, len, seqtt, isopt, ctx);
if (!ret) {
errtt = seqtt;
goto err;
} else if (ret == -1) {
/*
* OPTIONAL component absent. Free and zero the field.
*/
asn1_template_free(pseqval, seqtt);
continue;
}
/* Update length */
len -= p - q;
}
/* Check for EOC if expecting one */
if (seq_eoc && !asn1_check_eoc(&p, len)) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_MISSING_EOC);
goto err;
}
/* Check all data read */
if (!seq_nolen && len) {
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_SEQUENCE_LENGTH_MISMATCH);
goto err;
}
/*
* If we get here we've got no more data in the SEQUENCE, however we
* may not have read all fields so check all remaining are OPTIONAL
* and clear any that are.
*/
for (; i < it->tcount; tt++, i++) {
const ASN1_TEMPLATE *seqtt;
seqtt = asn1_do_adb(pval, tt, 1);
if (seqtt == NULL)
goto err;
if (seqtt->flags & ASN1_TFLG_OPTIONAL) {
ASN1_VALUE **pseqval;
pseqval = asn1_get_field_ptr(pval, seqtt);
asn1_template_free(pseqval, seqtt);
} else {
errtt = seqtt;
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_FIELD_MISSING);
goto err;
}
}
/* Save encoding */
if (!asn1_enc_save(pval, *in, p - *in, it))
goto auxerr;
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL))
goto auxerr;
*in = p;
return 1;
default:
return 0;
}
auxerr:
ASN1err(ASN1_F_ASN1_ITEM_EMBED_D2I, ASN1_R_AUX_ERROR);
err:
if (errtt)
ERR_add_error_data(4, "Field=", errtt->field_name,
", Type=", it->sname);
else
ERR_add_error_data(2, "Type=", it->sname);
return 0;
} | 1 | [
"CWE-400",
"CWE-674",
"CWE-787"
] | openssl | 2ac4c6f7b2b2af20c0e2b0ba05367e454cd11b33 | 143,637,508,639,147,210,000,000,000,000,000,000,000 | 289 | Limit ASN.1 constructed types recursive definition depth
Constructed types with a recursive definition (such as can be found in
PKCS7) could eventually exceed the stack given malicious input with
excessive recursion. Therefore we limit the stack depth.
CVE-2018-0739
Credit to OSSFuzz for finding this issue.
Reviewed-by: Rich Salz <[email protected]> |
static struct inode *btrfs_iget_locked(struct super_block *s,
u64 objectid,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
args.ino = objectid;
args.root = root;
inode = iget5_locked(s, objectid, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
} | 0 | [
"CWE-310"
] | linux-2.6 | 9c52057c698fb96f8f07e7a4bcf4801a092bda89 | 324,541,657,793,216,100,000,000,000,000,000,000,000 | 14 | Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]> |
TEST_P(ProtocolIntegrationTest, ConnDurationInflightRequest) {
config_helper_.setDownstreamMaxConnectionDuration(std::chrono::milliseconds(500));
config_helper_.addConfigModifier(
[](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) { hcm.mutable_drain_timeout()->set_seconds(1); });
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024);
waitForNextUpstreamRequest();
// block and wait for counter to increase
test_server_->waitForCounterGe("http.config_test.downstream_cx_max_duration_reached", 1);
// ensure request processed correctly
upstream_request_->encodeHeaders(default_response_headers_, false);
upstream_request_->encodeData(512, true);
ASSERT_TRUE(response->waitForEndStream());
EXPECT_TRUE(upstream_request_->complete());
EXPECT_TRUE(response->complete());
test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 1);
test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 1);
ASSERT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(10000)));
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 260,938,444,497,631,830,000,000,000,000,000,000,000 | 26 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
Pdata_clear(Pdata *self, Py_ssize_t clearto)
{
Py_ssize_t i = Py_SIZE(self);
assert(clearto >= self->fence);
if (clearto >= i)
return 0;
while (--i >= clearto) {
Py_CLEAR(self->data[i]);
}
Py_SIZE(self) = clearto;
return 0;
} | 0 | [
"CWE-190",
"CWE-369"
] | cpython | a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd | 309,452,513,297,025,130,000,000,000,000,000,000,000 | 14 | closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261) |
int SSH_Access::HandleSSHMessage()
{
int m=STALL;
const char *b;
int s;
pty_recv_buf->Get(&b,&s);
const char *eol=find_char(b,s,'\n');
if(!eol)
{
const char *p="password:";
const char *p_for="password for ";
const char *y="(yes/no)?";
int p_len=strlen(p);
int p_for_len=strlen(p_for);
int y_len=strlen(y);
if(s>0 && b[s-1]==' ')
s--;
if((s>=p_len && !strncasecmp(b+s-p_len,p,p_len))
|| (s>10 && !strncmp(b+s-2,"':",2))
|| (s>p_for_len && b[s-1]==':' && !strncasecmp(b,p_for,p_for_len)))
{
if(!pass)
{
SetError(LOGIN_FAILED,_("Password required"));
return MOVED;
}
if(password_sent>0)
{
SetError(LOGIN_FAILED,_("Login incorrect"));
return MOVED;
}
pty_recv_buf->Put("XXXX");
pty_send_buf->Put(pass);
pty_send_buf->Put("\n");
password_sent++;
return m;
}
if(s>=y_len && !strncasecmp(b+s-y_len,y,y_len))
{
pty_recv_buf->Put("yes\n");
pty_send_buf->Put("yes\n");
return m;
}
if(!received_greeting && recv_buf->Size()>0)
{
recv_buf->Get(&b,&s);
eol=find_char(b,s,'\n');
if(eol)
{
xstring &line=xstring::get_tmp(b,eol-b);
if(line.eq(greeting))
received_greeting=true;
LogRecv(4,line);
recv_buf->Skip(eol-b+1);
}
}
LogSSHMessage();
return m;
}
const char *f=N_("Host key verification failed");
if(!strncasecmp(b,f,strlen(f)))
{
LogSSHMessage();
SetError(FATAL,_(f));
return MOVED;
}
if(eol>b && eol[-1]=='\r')
eol--;
f=N_("Name or service not known");
int f_len=strlen(f);
if(eol-b>=f_len && !strncasecmp(eol-f_len,f,f_len)) {
LogSSHMessage();
SetError(LOOKUP_ERROR,xstring::get_tmp(b,eol-b));
return MOVED;
}
LogSSHMessage();
return MOVED;
} | 1 | [] | lftp | bc7b476e782d77839765f56bbdb4cee9f36b54ec | 64,728,998,944,453,620,000,000,000,000,000,000,000 | 78 | add settings fish:auto-confirm and sftp:auto-confirm
New host keys are now not confirmed by default, this should improve security.
Suggested by Marcin Szewczyk <[email protected]> |
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp)
{
struct trace_entry *entry;
unsigned long flags;
char *raw_data;
int pc;
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
pc = preempt_count();
*rctxp = perf_swevent_get_recursion_context();
if (*rctxp < 0)
return NULL;
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
/* zero the dead bytes from align to not leak stack to user */
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
entry = (struct trace_entry *)raw_data;
local_save_flags(flags);
tracing_generic_entry_update(entry, flags, pc);
entry->type = type;
return raw_data;
} | 0 | [] | linux | ced39002f5ea736b716ae233fb68b26d59783912 | 265,084,978,050,255,020,000,000,000,000,000,000,000 | 28 | ftrace, perf: Add support to use function tracepoint in perf
Adding perf registration support for the ftrace function event,
so it is now possible to register it via perf interface.
The perf_event struct statically contains ftrace_ops as a handle
for function tracer. The function tracer is registered/unregistered
in open/close actions.
To be efficient, we enable/disable ftrace_ops each time the traced
process is scheduled in/out (via TRACE_REG_PERF_(ADD|DELL) handlers).
This way tracing is enabled only when the process is running.
Intentionally using this way instead of the event's hw state
PERF_HES_STOPPED, which would not disable the ftrace_ops.
It is now possible to use function trace within perf commands
like:
perf record -e ftrace:function ls
perf stat -e ftrace:function ls
Allowed only for root.
Link: http://lkml.kernel.org/r/[email protected]
Acked-by: Frederic Weisbecker <[email protected]>
Signed-off-by: Jiri Olsa <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]> |
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
{
if (--non_repr_priv->ref_count)
return;
list_del(&non_repr_priv->list);
kfree(non_repr_priv);
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 8572cea1461a006bce1d06c0c4b0575869125fa4 | 115,477,004,547,988,770,000,000,000,000,000,000,000 | 8 | nfp: flower: prevent memory leak in nfp_flower_spawn_phy_reprs
In nfp_flower_spawn_phy_reprs, in the for loop over eth_tbl if any of
intermediate allocations or initializations fail memory is leaked.
requiered releases are added.
Fixes: b94524529741 ("nfp: flower: add per repr private data for LAG offload")
Signed-off-by: Navid Emamdoost <[email protected]>
Acked-by: Jakub Kicinski <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
evdev_phys_rect_to_units(const struct evdev_device *device,
const struct phys_rect *mm)
{
struct device_coord_rect units = {0};
const struct input_absinfo *absx, *absy;
if (device->abs.absinfo_x == NULL ||
device->abs.absinfo_y == NULL) {
log_bug_libinput(evdev_libinput_context(device),
"%s: is not an abs device\n",
device->devname);
return units;
}
absx = device->abs.absinfo_x;
absy = device->abs.absinfo_y;
units.x = mm->x * absx->resolution + absx->minimum;
units.y = mm->y * absy->resolution + absy->minimum;
units.w = mm->w * absx->resolution;
units.h = mm->h * absy->resolution;
return units;
} | 0 | [
"CWE-134"
] | libinput | a423d7d3269dc32a87384f79e29bb5ac021c83d1 | 176,177,809,137,742,330,000,000,000,000,000,000,000 | 24 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]> |
static inline int do_seccomp(struct pt_regs *regs) { return 0; } | 0 | [
"CWE-119",
"CWE-787"
] | linux | c1fa0768a8713b135848f78fd43ffc208d8ded70 | 240,290,174,087,164,860,000,000,000,000,000,000,000 | 1 | powerpc/tm: Flush TM only if CPU has TM feature
Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
added code to access TM SPRs in flush_tmregs_to_thread(). However
flush_tmregs_to_thread() does not check if TM feature is available on
CPU before trying to access TM SPRs in order to copy live state to
thread structures. flush_tmregs_to_thread() is indeed guarded by
CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel
was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on
a CPU without TM feature available, thus rendering the execution
of TM instructions that are treated by the CPU as illegal instructions.
The fix is just to add proper checking in flush_tmregs_to_thread()
if CPU has the TM feature before accessing any TM-specific resource,
returning immediately if TM is no available on the CPU. Adding
that checking in flush_tmregs_to_thread() instead of in places
where it is called, like in vsr_get() and vsr_set(), is better because
avoids the same problem cropping up elsewhere.
Cc: [email protected] # v4.13+
Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
Signed-off-by: Gustavo Romero <[email protected]>
Reviewed-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
ext4_lblk_t start, ext4_lblk_t shift,
enum SHIFT_DIRECTION SHIFT)
{
struct ext4_ext_path *path;
int ret = 0, depth;
struct ext4_extent *extent;
ext4_lblk_t stop, *iterator, ex_start, ex_end;
/* Let path point to the last extent */
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent)
goto out;
stop = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
/*
* In case of left shift, Don't start shifting extents until we make
* sure the hole is big enough to accommodate the shift.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (extent) {
ex_start = le32_to_cpu(extent->ee_block);
ex_end = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
} else {
ex_start = 0;
ex_end = 0;
}
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
ext4_ext_drop_refs(path);
kfree(path);
return -EINVAL;
}
}
/*
* In case of left shift, iterator points to start and it is increased
* till we reach stop. In case of right shift, iterator points to stop
* and it is decreased till we reach start.
*/
if (SHIFT == SHIFT_LEFT)
iterator = &start;
else
iterator = &stop;
/* Its safe to start updating extents */
while (start < stop) {
path = ext4_find_extent(inode, *iterator, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) *iterator);
return -EFSCORRUPTED;
}
if (SHIFT == SHIFT_LEFT && *iterator >
le32_to_cpu(extent->ee_block)) {
/* Hole, move to the next extent */
if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
path[depth].p_ext++;
} else {
*iterator = ext4_ext_next_allocated_block(path);
continue;
}
}
if (SHIFT == SHIFT_LEFT) {
extent = EXT_LAST_EXTENT(path[depth].p_hdr);
*iterator = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
} else {
extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
*iterator = le32_to_cpu(extent->ee_block) > 0 ?
le32_to_cpu(extent->ee_block) - 1 : 0;
/* Update path extent in case we need to stop */
while (le32_to_cpu(extent->ee_block) < start)
extent++;
path[depth].p_ext = extent;
}
ret = ext4_ext_shift_path_extents(path, shift, inode,
handle, SHIFT);
if (ret)
break;
}
out:
ext4_ext_drop_refs(path);
kfree(path);
return ret;
} | 0 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 103,345,500,764,660,580,000,000,000,000,000,000,000 | 105 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
~Projection() { delete[] private_data_; } | 0 | [
"CWE-20"
] | libvpx | f00890eecdf8365ea125ac16769a83aa6b68792d | 338,561,504,129,547,530,000,000,000,000,000,000,000 | 1 | update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d |
GfxPattern *GfxResources::lookupPattern(char *name, Gfx *gfx) {
GfxResources *resPtr;
GfxPattern *pattern;
Object obj;
for (resPtr = this; resPtr; resPtr = resPtr->next) {
if (resPtr->patternDict.isDict()) {
if (!resPtr->patternDict.dictLookup(name, &obj)->isNull()) {
pattern = GfxPattern::parse(&obj, gfx);
obj.free();
return pattern;
}
obj.free();
}
}
error(-1, "Unknown pattern '%s'", name);
return NULL;
} | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 43,796,297,857,006,465,000,000,000,000,000,000,000 | 18 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
test_bson_count_keys (void)
{
bson_t b;
bson_init (&b);
BSON_ASSERT (bson_append_int32 (&b, "0", -1, 0));
BSON_ASSERT (bson_append_int32 (&b, "1", -1, 1));
BSON_ASSERT (bson_append_int32 (&b, "2", -1, 2));
ASSERT_CMPINT (bson_count_keys (&b), ==, 3);
bson_destroy (&b);
} | 0 | [
"CWE-125"
] | mongo-c-driver | 0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84 | 332,561,379,899,874,200,000,000,000,000,000,000,000 | 11 | Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example. |
static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
MemTxResult *result,
enum device_endian endian)
{
uint8_t *ptr;
uint64_t val;
MemoryRegion *mr;
hwaddr l = 8;
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap64(val);
}
#endif
} else {
/* RAM case */
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = ldq_le_p(ptr);
break;
case DEVICE_BIG_ENDIAN:
val = ldq_be_p(ptr);
break;
default:
val = ldq_p(ptr);
break;
}
r = MEMTX_OK;
}
if (result) {
*result = r;
}
rcu_read_unlock();
return val;
} | 0 | [] | qemu | e4a511f8cc6f4a46d409fb5c9f72c38ba45f8d83 | 252,439,795,014,991,400,000,000,000,000,000,000,000 | 51 | exec: clamp accesses against the MemoryRegionSection
Because the clamping was done against the MemoryRegion,
address_space_rw was effectively broken if a write spanned
multiple sections that are not linear in underlying memory
(with the memory not being under an IOMMU).
This is visible with the MIPS rc4030 IOMMU, which is implemented
as a series of alias memory regions that point to the actual RAM.
Tested-by: Hervé Poussineau <[email protected]>
Tested-by: Mark Cave-Ayland <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
xmlSchemaCheckDerivationOKRestriction2to4(xmlSchemaParserCtxtPtr pctxt,
int action,
xmlSchemaBasicItemPtr item,
xmlSchemaBasicItemPtr baseItem,
xmlSchemaItemListPtr uses,
xmlSchemaItemListPtr baseUses,
xmlSchemaWildcardPtr wild,
xmlSchemaWildcardPtr baseWild)
{
xmlSchemaAttributeUsePtr cur = NULL, bcur;
int i, j, found; /* err = 0; */
const xmlChar *bEffValue;
int effFixed;
if (uses != NULL) {
for (i = 0; i < uses->nbItems; i++) {
cur = uses->items[i];
found = 0;
if (baseUses == NULL)
goto not_found;
for (j = 0; j < baseUses->nbItems; j++) {
bcur = baseUses->items[j];
if ((WXS_ATTRUSE_DECL_NAME(cur) ==
WXS_ATTRUSE_DECL_NAME(bcur)) &&
(WXS_ATTRUSE_DECL_TNS(cur) ==
WXS_ATTRUSE_DECL_TNS(bcur)))
{
/*
* (2.1) "If there is an attribute use in the {attribute
* uses} of the {base type definition} (call this B) whose
* {attribute declaration} has the same {name} and {target
* namespace}, then all of the following must be true:"
*/
found = 1;
if ((cur->occurs == XML_SCHEMAS_ATTR_USE_OPTIONAL) &&
(bcur->occurs == XML_SCHEMAS_ATTR_USE_REQUIRED))
{
xmlChar *str = NULL;
/*
* (2.1.1) "one of the following must be true:"
* (2.1.1.1) "B's {required} is false."
* (2.1.1.2) "R's {required} is true."
*/
xmlSchemaPAttrUseErr4(pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_1,
WXS_ITEM_NODE(item), item, cur,
"The 'optional' attribute use is inconsistent "
"with the corresponding 'required' attribute use of "
"the %s %s",
WXS_ACTION_STR(action),
xmlSchemaGetComponentDesignation(&str, baseItem),
NULL, NULL);
FREE_AND_NULL(str);
/* err = pctxt->err; */
} else if (xmlSchemaCheckCOSSTDerivedOK(ACTXT_CAST pctxt,
WXS_ATTRUSE_TYPEDEF(cur),
WXS_ATTRUSE_TYPEDEF(bcur), 0) != 0)
{
xmlChar *strA = NULL, *strB = NULL, *strC = NULL;
/*
* SPEC (2.1.2) "R's {attribute declaration}'s
* {type definition} must be validly derived from
* B's {type definition} given the empty set as
* defined in Type Derivation OK (Simple) ($3.14.6)."
*/
xmlSchemaPAttrUseErr4(pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_2,
WXS_ITEM_NODE(item), item, cur,
"The attribute declaration's %s "
"is not validly derived from "
"the corresponding %s of the "
"attribute declaration in the %s %s",
xmlSchemaGetComponentDesignation(&strA,
WXS_ATTRUSE_TYPEDEF(cur)),
xmlSchemaGetComponentDesignation(&strB,
WXS_ATTRUSE_TYPEDEF(bcur)),
WXS_ACTION_STR(action),
xmlSchemaGetComponentDesignation(&strC, baseItem));
/* xmlSchemaGetComponentDesignation(&str, baseItem), */
FREE_AND_NULL(strA);
FREE_AND_NULL(strB);
FREE_AND_NULL(strC);
/* err = pctxt->err; */
} else {
/*
* 2.1.3 [Definition:] Let the effective value
* constraint of an attribute use be its {value
* constraint}, if present, otherwise its {attribute
* declaration}'s {value constraint} .
*/
xmlSchemaGetEffectiveValueConstraint(bcur,
&effFixed, &bEffValue, NULL);
/*
* 2.1.3 ... one of the following must be true
*
* 2.1.3.1 B's `effective value constraint` is
* `absent` or default.
*/
if ((bEffValue != NULL) &&
(effFixed == 1)) {
const xmlChar *rEffValue = NULL;
xmlSchemaGetEffectiveValueConstraint(bcur,
&effFixed, &rEffValue, NULL);
/*
* 2.1.3.2 R's `effective value constraint` is
* fixed with the same string as B's.
* MAYBE TODO: Compare the computed values.
* Hmm, it says "same string" so
* string-equality might really be sufficient.
*/
if ((effFixed == 0) ||
(! WXS_ARE_DEFAULT_STR_EQUAL(rEffValue, bEffValue)))
{
xmlChar *str = NULL;
xmlSchemaPAttrUseErr4(pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_3,
WXS_ITEM_NODE(item), item, cur,
"The effective value constraint of the "
"attribute use is inconsistent with "
"its correspondent in the %s %s",
WXS_ACTION_STR(action),
xmlSchemaGetComponentDesignation(&str,
baseItem),
NULL, NULL);
FREE_AND_NULL(str);
/* err = pctxt->err; */
}
}
}
break;
}
}
not_found:
if (!found) {
/*
* (2.2) "otherwise the {base type definition} must have an
* {attribute wildcard} and the {target namespace} of the
* R's {attribute declaration} must be `valid` with respect
* to that wildcard, as defined in Wildcard allows Namespace
* Name ($3.10.4)."
*/
if ((baseWild == NULL) ||
(xmlSchemaCheckCVCWildcardNamespace(baseWild,
(WXS_ATTRUSE_DECL(cur))->targetNamespace) != 0))
{
xmlChar *str = NULL;
xmlSchemaPAttrUseErr4(pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_2,
WXS_ITEM_NODE(item), item, cur,
"Neither a matching attribute use, "
"nor a matching wildcard exists in the %s %s",
WXS_ACTION_STR(action),
xmlSchemaGetComponentDesignation(&str, baseItem),
NULL, NULL);
FREE_AND_NULL(str);
/* err = pctxt->err; */
}
}
}
}
/*
* SPEC derivation-ok-restriction (3):
* (3) "For each attribute use in the {attribute uses} of the {base type
* definition} whose {required} is true, there must be an attribute
* use with an {attribute declaration} with the same {name} and
* {target namespace} as its {attribute declaration} in the {attribute
* uses} of the complex type definition itself whose {required} is true.
*/
if (baseUses != NULL) {
for (j = 0; j < baseUses->nbItems; j++) {
bcur = baseUses->items[j];
if (bcur->occurs != XML_SCHEMAS_ATTR_USE_REQUIRED)
continue;
found = 0;
if (uses != NULL) {
for (i = 0; i < uses->nbItems; i++) {
cur = uses->items[i];
if ((WXS_ATTRUSE_DECL_NAME(cur) ==
WXS_ATTRUSE_DECL_NAME(bcur)) &&
(WXS_ATTRUSE_DECL_TNS(cur) ==
WXS_ATTRUSE_DECL_TNS(bcur))) {
found = 1;
break;
}
}
}
if (!found) {
xmlChar *strA = NULL, *strB = NULL;
xmlSchemaCustomErr4(ACTXT_CAST pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_3,
NULL, item,
"A matching attribute use for the "
"'required' %s of the %s %s is missing",
xmlSchemaGetComponentDesignation(&strA, bcur),
WXS_ACTION_STR(action),
xmlSchemaGetComponentDesignation(&strB, baseItem),
NULL);
FREE_AND_NULL(strA);
FREE_AND_NULL(strB);
}
}
}
/*
* derivation-ok-restriction (4)
*/
if (wild != NULL) {
/*
* (4) "If there is an {attribute wildcard}, all of the
* following must be true:"
*/
if (baseWild == NULL) {
xmlChar *str = NULL;
/*
* (4.1) "The {base type definition} must also have one."
*/
xmlSchemaCustomErr4(ACTXT_CAST pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_1,
NULL, item,
"The %s has an attribute wildcard, "
"but the %s %s '%s' does not have one",
WXS_ITEM_TYPE_NAME(item),
WXS_ACTION_STR(action),
WXS_ITEM_TYPE_NAME(baseItem),
xmlSchemaGetComponentQName(&str, baseItem));
FREE_AND_NULL(str);
return(pctxt->err);
} else if ((baseWild->any == 0) &&
xmlSchemaCheckCOSNSSubset(wild, baseWild))
{
xmlChar *str = NULL;
/*
* (4.2) "The complex type definition's {attribute wildcard}'s
* {namespace constraint} must be a subset of the {base type
* definition}'s {attribute wildcard}'s {namespace constraint},
* as defined by Wildcard Subset ($3.10.6)."
*/
xmlSchemaCustomErr4(ACTXT_CAST pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_2,
NULL, item,
"The attribute wildcard is not a valid "
"subset of the wildcard in the %s %s '%s'",
WXS_ACTION_STR(action),
WXS_ITEM_TYPE_NAME(baseItem),
xmlSchemaGetComponentQName(&str, baseItem),
NULL);
FREE_AND_NULL(str);
return(pctxt->err);
}
/* 4.3 Unless the {base type definition} is the `ur-type
* definition`, the complex type definition's {attribute
* wildcard}'s {process contents} must be identical to or
* stronger than the {base type definition}'s {attribute
* wildcard}'s {process contents}, where strict is stronger
* than lax is stronger than skip.
*/
if ((! WXS_IS_ANYTYPE(baseItem)) &&
(wild->processContents < baseWild->processContents)) {
xmlChar *str = NULL;
xmlSchemaCustomErr4(ACTXT_CAST pctxt,
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_3,
NULL, baseItem,
"The {process contents} of the attribute wildcard is "
"weaker than the one in the %s %s '%s'",
WXS_ACTION_STR(action),
WXS_ITEM_TYPE_NAME(baseItem),
xmlSchemaGetComponentQName(&str, baseItem),
NULL);
FREE_AND_NULL(str)
return(pctxt->err);
}
}
return(0);
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 250,318,259,251,418,040,000,000,000,000,000,000,000 | 280 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
nautilus_create_templates_directory (void)
{
char *dir;
dir = nautilus_get_templates_directory ();
if (!g_file_test (dir, G_FILE_TEST_EXISTS)) {
g_mkdir (dir, DEFAULT_NAUTILUS_DIRECTORY_MODE);
}
g_free (dir);
} | 0 | [] | nautilus | a0f7bb5f2e9af8ecb463b13da834fa8559b0a481 | 307,217,498,022,684,180,000,000,000,000,000,000,000 | 10 | Use $XDG_DATA_HOME/.converted-launchers as marker for one-time desktop
2009-02-25 Alexander Larsson <[email protected]>
* src/nautilus-application.c:
Use $XDG_DATA_HOME/.converted-launchers as marker for
one-time desktop file trust operation.
* libnautilus-private/nautilus-file-utilities.[ch]:
Add nautilus_is_in_system_dir() to check if path is in
XDG_DATA_DIR or in ~/.gnome2.
* libnautilus-private/nautilus-directory-async.c:
(is_link_trusted):
Use new nautilus_is_in_system_dir() instead of open coding it.
* libnautilus-private/nautilus-file-operations.c:
When copying a desktop file from a trusted location to the desktop,
mark it as trusted.
svn path=/trunk/; revision=15018 |
static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
{
if (file_rss)
add_mm_counter(mm, file_rss, file_rss);
if (anon_rss)
add_mm_counter(mm, anon_rss, anon_rss);
} | 0 | [
"CWE-20"
] | linux-2.6 | 89f5b7da2a6bad2e84670422ab8192382a5aeb9f | 15,547,307,601,400,600,000,000,000,000,000,000,000 | 7 | Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
union {
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
void *buffer;
} u;
u.buffer = NULL;
switch (ioctl) {
case KVM_GET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
r = -ENOMEM;
if (!u.lapic)
goto out;
r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
goto out;
r = 0;
break;
}
case KVM_SET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
if (IS_ERR(u.lapic)) {
r = PTR_ERR(u.lapic);
goto out;
}
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
if (r)
goto out;
r = 0;
break;
}
case KVM_INTERRUPT: {
struct kvm_interrupt irq;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
if (r)
goto out;
r = 0;
break;
}
case KVM_NMI: {
r = kvm_vcpu_ioctl_nmi(vcpu);
if (r)
goto out;
r = 0;
break;
}
case KVM_SET_CPUID: {
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
if (r)
goto out;
break;
}
case KVM_SET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
if (r)
goto out;
break;
}
case KVM_GET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
goto out;
r = 0;
break;
}
case KVM_GET_MSRS:
r = msr_io(vcpu, argp, kvm_get_msr, 1);
break;
case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0);
break;
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
r = -EFAULT;
if (copy_from_user(&tac, argp, sizeof tac))
goto out;
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tac, sizeof tac))
goto out;
r = 0;
break;
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm))
goto out;
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
r = 0;
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
u64 mcg_cap;
r = -EFAULT;
if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
goto out;
r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
break;
}
case KVM_X86_SET_MCE: {
struct kvm_x86_mce mce;
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
break;
}
case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
r = -EFAULT;
if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
break;
r = 0;
break;
}
case KVM_SET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
r = -EFAULT;
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break;
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break;
}
case KVM_GET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
r = -EFAULT;
if (copy_to_user(argp, &dbgregs,
sizeof(struct kvm_debugregs)))
break;
r = 0;
break;
}
case KVM_SET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
r = -EFAULT;
if (copy_from_user(&dbgregs, argp,
sizeof(struct kvm_debugregs)))
break;
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
case KVM_GET_XSAVE: {
u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
r = -ENOMEM;
if (!u.xsave)
break;
kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
r = -EFAULT;
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
break;
r = 0;
break;
}
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
if (IS_ERR(u.xsave)) {
r = PTR_ERR(u.xsave);
goto out;
}
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
}
case KVM_GET_XCRS: {
u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
r = -ENOMEM;
if (!u.xcrs)
break;
kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
r = -EFAULT;
if (copy_to_user(argp, u.xcrs,
sizeof(struct kvm_xcrs)))
break;
r = 0;
break;
}
case KVM_SET_XCRS: {
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
if (IS_ERR(u.xcrs)) {
r = PTR_ERR(u.xcrs);
goto out;
}
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
}
case KVM_SET_TSC_KHZ: {
u32 user_tsc_khz;
r = -EINVAL;
user_tsc_khz = (u32)arg;
if (user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out;
if (user_tsc_khz == 0)
user_tsc_khz = tsc_khz;
kvm_set_tsc_khz(vcpu, user_tsc_khz);
r = 0;
goto out;
}
case KVM_GET_TSC_KHZ: {
r = vcpu->arch.virtual_tsc_khz;
goto out;
}
case KVM_KVMCLOCK_CTRL: {
r = kvm_set_guest_paused(vcpu);
goto out;
}
default:
r = -EINVAL;
}
out:
kfree(u.buffer);
return r;
} | 0 | [] | linux | 6d1068b3a98519247d8ba4ec85cd40ac136dbdf9 | 268,009,639,568,670,240,000,000,000,000,000,000,000 | 287 | KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461)
On hosts without the XSAVE support unprivileged local user can trigger
oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest
cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN
ioctl.
invalid opcode: 0000 [#2] SMP
Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables
...
Pid: 24935, comm: zoog_kvm_monito Tainted: G D 3.2.0-3-686-pae
EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0
EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm]
EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000
ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0
task.ti=d7c62000)
Stack:
00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000
ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0
c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80
Call Trace:
[<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm]
...
[<c12bfb44>] ? syscall_call+0x7/0xb
Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74
1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01
d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89
EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP
0068:d7c63e70
QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID
and then sets them later. So guest's X86_FEATURE_XSAVE should be masked
out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with
X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with
X86_FEATURE_XSAVE even on hosts that do not support it, might be
susceptible to this attack from inside the guest as well.
Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support.
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
}
} | 0 | [
"CWE-416"
] | linux | 27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2 | 275,204,573,084,110,050,000,000,000,000,000,000,000 | 10 | net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: lipeng <[email protected]>
Reported-by: Jun He <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
{
struct mr6_table *mrt;
struct flowi6 fl6 = {
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi6_oif = skb->dev->ifindex,
.flowi6_mark = skb->mark,
};
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
return NULL;
return mrt->mroute6_sk;
} | 0 | [
"CWE-20"
] | linux | 99253eb750fda6a644d5188fb26c43bad8d5a745 | 198,293,666,267,501,650,000,000,000,000,000,000,000 | 14 | ipv6: check sk sk_type and protocol early in ip_mroute_set/getsockopt
Commit 5e1859fbcc3c ("ipv4: ipmr: various fixes and cleanups") fixed
the issue for ipv4 ipmr:
ip_mroute_setsockopt() & ip_mroute_getsockopt() should not
access/set raw_sk(sk)->ipmr_table before making sure the socket
is a raw socket, and protocol is IGMP
The same fix should be done for ipv6 ipmr as well.
This patch can fix the panic caused by overwriting the same offset
as ipmr_table as in raw_sk(sk) when accessing other type's socket
by ip_mroute_setsockopt().
Signed-off-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
test_bson_append_symbol (void)
{
bson_t *b;
bson_t *b2;
b = bson_new ();
b2 = get_bson ("test32.bson");
BSON_ASSERT (bson_append_symbol (b, "hello", -1, "world", -1));
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
} | 0 | [
"CWE-125"
] | libbson | 42900956dc461dfe7fb91d93361d10737c1602b3 | 32,852,721,634,362,780,000,000,000,000,000,000,000 | 12 | CDRIVER-2269 Check for zero string length in codewscope |
int RGWDeleteLC::verify_permission()
{
bool perm;
perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
if (!perm)
return -EACCES;
return 0;
} | 0 | [
"CWE-770"
] | ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 292,399,781,942,518,860,000,000,000,000,000,000,000 | 9 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
bool IsTailOfShape(Type type1, Type type2) {
auto tail_type = type1.dyn_cast<ShapedType>();
auto full_type = type2.dyn_cast<ShapedType>();
if (!tail_type || !full_type || !tail_type.hasRank() ||
!full_type.hasRank() || tail_type.getRank() > full_type.getRank())
return false;
auto i1 = tail_type.getShape().rbegin(), e1 = tail_type.getShape().rend();
auto i2 = full_type.getShape().rbegin();
return std::equal(i1, e1, i2);
} | 0 | [
"CWE-476",
"CWE-125"
] | tensorflow | d6b57f461b39fd1aa8c1b870f1b974aac3554955 | 243,087,348,272,814,100,000,000,000,000,000,000,000 | 10 | Prevent nullptr dereference in MLIR TFLite dialect/optimizer.
PiperOrigin-RevId: 387220762
Change-Id: Id136ef04bb3d36123b4685d316ae81a9ec924d6b |
dissect_80211n_mac_phy(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset, int data_len, guint32 *n_mac_flags, guint32 *ampdu_id, struct ieee_802_11_phdr *phdr)
{
proto_tree *ftree;
proto_item *ti;
ptvcursor_t *csr;
guint8 mcs;
guint8 ness;
guint16 ext_frequency;
gchar *chan_str;
ftree = proto_tree_add_subtree(tree, tvb, offset, data_len, ett_dot11n_mac_phy, NULL, "802.11n MAC+PHY");
add_ppi_field_header(tvb, ftree, &offset);
data_len -= 4; /* Subtract field header length */
if (data_len != PPI_80211N_MAC_PHY_LEN) {
proto_tree_add_expert_format(ftree, pinfo, &ei_ppi_invalid_length, tvb, offset, data_len, "Invalid length: %u", data_len);
THROW(ReportedBoundsError);
}
dissect_80211n_mac(tvb, pinfo, ftree, offset, PPI_80211N_MAC_LEN,
FALSE, n_mac_flags, ampdu_id, phdr);
offset += PPI_80211N_MAC_PHY_OFF;
csr = ptvcursor_new(ftree, tvb, offset);
mcs = tvb_get_guint8(tvb, ptvcursor_current_offset(csr));
if (mcs != 255) {
phdr->phy_info.info_11n.presence_flags |= PHDR_802_11N_HAS_MCS_INDEX;
phdr->phy_info.info_11n.mcs_index = mcs;
}
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_mcs, 1, 255);
ness = tvb_get_guint8(tvb, ptvcursor_current_offset(csr));
phdr->phy_info.info_11n.presence_flags |= PHDR_802_11N_HAS_NESS;
phdr->phy_info.info_11n.ness = ness;
ti = ptvcursor_add(csr, hf_80211n_mac_phy_num_streams, 1, ENC_LITTLE_ENDIAN);
if (tvb_get_guint8(tvb, ptvcursor_current_offset(csr) - 1) == 0)
proto_item_append_text(ti, " (unknown)");
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_combined, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant0_ctl, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant1_ctl, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant2_ctl, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant3_ctl, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant0_ext, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant1_ext, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant2_ext, 1, 255);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_rssi_ant3_ext, 1, 255);
ext_frequency = tvb_get_letohs(ptvcursor_tvbuff(csr), ptvcursor_current_offset(csr));
chan_str = ieee80211_mhz_to_str(ext_frequency);
proto_tree_add_uint_format(ptvcursor_tree(csr), hf_80211n_mac_phy_ext_chan_freq, ptvcursor_tvbuff(csr),
ptvcursor_current_offset(csr), 2, ext_frequency, "Ext. Channel frequency: %s", chan_str);
g_free(chan_str);
ptvcursor_advance(csr, 2);
ptvcursor_add_with_subtree(csr, hf_80211n_mac_phy_ext_chan_flags, 2, ENC_LITTLE_ENDIAN,
ett_dot11n_mac_phy_ext_channel_flags);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_turbo, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_cck, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_ofdm, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_2ghz, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_5ghz, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_passive, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add_no_advance(csr, hf_80211n_mac_phy_ext_chan_flags_dynamic, 2, ENC_LITTLE_ENDIAN);
ptvcursor_add(csr, hf_80211n_mac_phy_ext_chan_flags_gfsk, 2, ENC_LITTLE_ENDIAN);
ptvcursor_pop_subtree(csr);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant0signal, 1, 0x80); /* -128 */
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant0noise, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant1signal, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant1noise, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant2signal, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant2noise, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant3signal, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_dbm_ant3noise, 1, 0x80);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_evm0, 4, 0);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_evm1, 4, 0);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_evm2, 4, 0);
ptvcursor_add_invalid_check(csr, hf_80211n_mac_phy_evm3, 4, 0);
ptvcursor_free(csr);
} | 0 | [
"CWE-20"
] | wireshark | 2c13e97d656c1c0ac4d76eb9d307664aae0e0cf7 | 246,540,774,878,459,570,000,000,000,000,000,000,000 | 82 | The WTAP_ENCAP_ETHERNET dissector needs to be passed a struct eth_phdr.
We now require that. Make it so.
Bug: 12440
Change-Id: Iffee520976b013800699bde3c6092a3e86be0d76
Reviewed-on: https://code.wireshark.org/review/15424
Reviewed-by: Guy Harris <[email protected]> |
static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
vmx_update_window_states(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_STI |
GUEST_INTR_STATE_MOV_SS);
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.interrupt.pending) {
enable_nmi_window(vcpu);
} else if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
else if (vcpu->arch.irq_summary
|| kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
return;
}
if (vcpu->arch.interrupt_window_open) {
if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
kvm_do_inject_irq(vcpu);
if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
}
if (!vcpu->arch.interrupt_window_open &&
(vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
enable_irq_window(vcpu);
} | 0 | [
"CWE-20"
] | linux-2.6 | 16175a796d061833aacfbd9672235f2d2725df65 | 44,928,085,267,741,750,000,000,000,000,000,000,000 | 42 | KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <[email protected]>
Cc: [email protected]
Signed-off-by: Avi Kivity <[email protected]> |
static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{
if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
return false;
if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
rb->need_iter++;
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
return true;
} | 0 | [] | linux | ff002b30181d30cdfbca316dadd099c3ca0d739c | 108,305,199,665,547,610,000,000,000,000,000,000,000 | 13 | io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: [email protected] # 5.3+
Signed-off-by: Jens Axboe <[email protected]> |
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
schedule();
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
} | 0 | [
"CWE-200"
] | linux | d11662f4f798b50d8c8743f433842c3e40fe3378 | 229,826,661,617,771,630,000,000,000,000,000,000,000 | 69 | ALSA: timer: Fix race between read and ioctl
The read from ALSA timer device, the function snd_timer_user_tread(),
may access to an uninitialized struct snd_timer_user fields when the
read is concurrently performed while the ioctl like
snd_timer_user_tselect() is invoked. We have already fixed the races
among ioctls via a mutex, but we seem to have forgotten the race
between read vs ioctl.
This patch simply applies (more exactly extends the already applied
range of) tu->ioctl_lock in snd_timer_user_tread() for closing the
race window.
Reported-by: Alexander Potapenko <[email protected]>
Tested-by: Alexander Potapenko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
g_tcp_select(int sck1, int sck2)
{
fd_set rfds;
struct timeval time;
int max = 0;
int rv = 0;
g_memset(&rfds,0,sizeof(fd_set));
g_memset(&time,0,sizeof(struct timeval));
time.tv_sec = 0;
time.tv_usec = 0;
FD_ZERO(&rfds);
if (sck1 > 0)
{
FD_SET(((unsigned int)sck1), &rfds);
}
if (sck2 > 0)
{
FD_SET(((unsigned int)sck2), &rfds);
}
max = sck1;
if (sck2 > max)
{
max = sck2;
}
rv = select(max + 1, &rfds, 0, 0, &time);
if (rv > 0)
{
rv = 0;
if (FD_ISSET(((unsigned int)sck1), &rfds))
{
rv = rv | 1;
}
if (FD_ISSET(((unsigned int)sck2), &rfds))
{
rv = rv | 2;
}
}
else
{
rv = 0;
}
return rv;
} | 0 | [] | xrdp | d8f9e8310dac362bb9578763d1024178f94f4ecc | 156,828,684,955,436,360,000,000,000,000,000,000,000 | 45 | move temp files from /tmp to /tmp/.xrdp |
grub_disk_cache_invalidate_all (void)
{
unsigned i;
for (i = 0; i < GRUB_DISK_CACHE_NUM; i++)
{
struct grub_disk_cache *cache = grub_disk_cache_table + i;
if (cache->data && ! cache->lock)
{
grub_free (cache->data);
cache->data = 0;
}
}
} | 0 | [
"CWE-20",
"CWE-119"
] | radare2 | c57997e76ec70862174a1b3b3aeb62a6f8570e85 | 281,739,857,702,911,350,000,000,000,000,000,000,000 | 15 | Fix r2_hbo_grub_memmove ext2 crash |
passIsComma ()
{
pass_Codes passCode = passGetScriptToken ();
if (passCode != pass_comma)
{
compileError (passNested, "',' expected");
return 0;
}
return 1;
} | 0 | [] | liblouis | dc97ef791a4fae9da11592c79f9f79e010596e0c | 330,091,689,744,546,200,000,000,000,000,000,000,000 | 10 | Merge branch 'table_resolver' |
static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
{
struct tipc_aead *tmp1, *tmp2 = NULL;
struct tipc_key key;
bool aligned = false;
u8 new_passive = 0;
int x;
spin_lock(&rx->lock);
key = rx->key;
if (key.pending == new_pending) {
aligned = true;
goto exit;
}
if (key.active)
goto exit;
if (!key.pending)
goto exit;
if (tipc_aead_users(rx->aead[key.pending]) > 0)
goto exit;
/* Try to "isolate" this pending key first */
tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
if (!refcount_dec_if_one(&tmp1->refcnt))
goto exit;
rcu_assign_pointer(rx->aead[key.pending], NULL);
/* Move passive key if any */
if (key.passive) {
tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
x = (key.passive - key.pending + new_pending) % KEY_MAX;
new_passive = (x <= 0) ? x + KEY_MAX : x;
}
/* Re-allocate the key(s) */
tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
rcu_assign_pointer(rx->aead[new_pending], tmp1);
if (new_passive)
rcu_assign_pointer(rx->aead[new_passive], tmp2);
refcount_set(&tmp1->refcnt, 1);
aligned = true;
pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
new_pending);
exit:
spin_unlock(&rx->lock);
return aligned;
} | 0 | [
"CWE-20"
] | linux | fa40d9734a57bcbfa79a280189799f76c88f7bb0 | 145,291,035,129,984,400,000,000,000,000,000,000,000 | 48 | tipc: fix size validations for the MSG_CRYPTO type
The function tipc_crypto_key_rcv is used to parse MSG_CRYPTO messages
to receive keys from other nodes in the cluster in order to decrypt any
further messages from them.
This patch verifies that any supplied sizes in the message body are
valid for the received message.
Fixes: 1ef6f7c9390f ("tipc: add automatic session key exchange")
Signed-off-by: Max VA <[email protected]>
Acked-by: Ying Xue <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void ax88179_get_mac_addr(struct usbnet *dev)
{
u8 mac[ETH_ALEN];
memset(mac, 0, sizeof(mac));
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
netif_dbg(dev, ifup, dev->net,
"MAC address read from device tree");
} else {
ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
ETH_ALEN, mac);
netif_dbg(dev, ifup, dev->net,
"MAC address read from ASIX chip");
}
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev->net, mac);
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
}
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
dev->net->dev_addr);
} | 0 | [
"CWE-787"
] | linux | 57bc3d3ae8c14df3ceb4e17d26ddf9eeab304581 | 212,105,987,553,433,800,000,000,000,000,000,000,000 | 27 | net: usb: ax88179_178a: Fix out-of-bounds accesses in RX fixup
ax88179_rx_fixup() contains several out-of-bounds accesses that can be
triggered by a malicious (or defective) USB device, in particular:
- The metadata array (hdr_off..hdr_off+2*pkt_cnt) can be out of bounds,
causing OOB reads and (on big-endian systems) OOB endianness flips.
- A packet can overlap the metadata array, causing a later OOB
endianness flip to corrupt data used by a cloned SKB that has already
been handed off into the network stack.
- A packet SKB can be constructed whose tail is far beyond its end,
causing out-of-bounds heap data to be considered part of the SKB's
data.
I have tested that this can be used by a malicious USB device to send a
bogus ICMPv6 Echo Request and receive an ICMPv6 Echo Reply in response
that contains random kernel heap data.
It's probably also possible to get OOB writes from this on a
little-endian system somehow - maybe by triggering skb_cow() via IP
options processing -, but I haven't tested that.
Fixes: e2ca90c276e1 ("ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static struct hlist_head *policy_hash_bysel(struct net *net,
const struct xfrm_selector *sel,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash;
u8 dbits;
u8 sbits;
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __sel_hash(sel, family, hmask, dbits, sbits);
if (hash == hmask + 1)
return &net->xfrm.policy_inexact[dir];
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
} | 0 | [
"CWE-125"
] | ipsec | 7bab09631c2a303f87a7eb7e3d69e888673b9b7e | 293,990,017,060,900,100,000,000,000,000,000,000,000 | 18 | xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
static int run_userns_fn(void *data)
{
struct userns_fn_data *d = data;
char c;
// we're not sharing with the parent any more, if it was a thread
close(d->p[1]);
if (read(d->p[0], &c, 1) != 1)
return -1;
close(d->p[0]);
return d->fn(d->arg);
} | 0 | [
"CWE-59",
"CWE-61"
] | lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 8,982,434,736,011,185,000,000,000,000,000,000,000 | 12 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
{
BDRVQcowState *s = bs->opaque;
int ret, csize, nb_csectors, sector_offset;
uint64_t coffset;
coffset = cluster_offset & s->cluster_offset_mask;
if (s->cluster_cache_offset != coffset) {
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
sector_offset = coffset & 511;
csize = nb_csectors * 512 - sector_offset;
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
if (ret < 0) {
return ret;
}
if (decompress_buffer(s->cluster_cache, s->cluster_size,
s->cluster_data + sector_offset, csize) < 0) {
return -EIO;
}
s->cluster_cache_offset = coffset;
}
return 0;
} | 0 | [
"CWE-190"
] | qemu | cab60de930684c33f67d4e32c7509b567f8c445b | 139,648,111,931,561,520,000,000,000,000,000,000,000 | 24 | qcow2: Fix new L1 table size check (CVE-2014-0143)
The size in bytes is assigned to an int later, so check that instead of
the number of entries.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
dnp3_udp_check_header_heur(packet_info *pinfo _U_, tvbuff_t *tvb, int offset _U_, void *data _U_)
{
return check_dnp3_header(tvb, TRUE);
} | 0 | [
"CWE-835"
] | wireshark | 618661b22e34a59b21117db723d8ff91e064d4ba | 275,614,165,649,614,100,000,000,000,000,000,000,000 | 4 | dnp: plug a memory leak.
If we're throwing away the data, *throw away the data* - free it, as
we're not using it as the backing data for a tvbuff. |
UniqueChars normalize_header_value(JSContext *cx, MutableHandleValue value_val, size_t *value_len,
const char *fun_name) {
RootedString value_str(cx, JS::ToString(cx, value_val));
if (!value_str)
return nullptr;
size_t len;
UniqueChars value = encode(cx, value_str, &len);
if (!value)
return nullptr;
char *value_chars = value.get();
size_t start = 0;
size_t end = len;
// We follow Gecko's interpretation of what's a valid header value. After
// stripping leading and trailing whitespace, all interior line breaks and
// `\0` are considered invalid. See
// https://searchfox.org/mozilla-central/rev/9f76a47f4aa935b49754c5608a1c8e72ee358c46/netwerk/protocol/http/nsHttp.cpp#247-260
// for details.
while (start < end) {
unsigned char ch = value_chars[start];
if (ch == '\t' || ch == ' ' || ch == '\r' || ch == '\n') {
start++;
} else {
break;
}
}
while (end > start) {
unsigned char ch = value_chars[end - 1];
if (ch == '\t' || ch == ' ' || ch == '\r' || ch == '\n') {
end--;
} else {
break;
}
}
for (size_t i = start; i < end; i++) {
unsigned char ch = value_chars[i];
if (ch == '\r' || ch == '\n' || ch == '\0') {
JS_ReportErrorUTF8(cx, "%s: Invalid header value '%s'", fun_name, value_chars);
return nullptr;
}
}
if (start != 0 || end != len) {
value_str = JS_NewStringCopyUTF8N(cx, JS::UTF8Chars(value_chars + start, end - start));
if (!value_str)
return nullptr;
}
value_val.setString(value_str);
*value_len = len;
return value;
} | 0 | [
"CWE-94"
] | js-compute-runtime | 65524ffc962644e9fc39f4b368a326b6253912a9 | 158,038,104,199,597,700,000,000,000,000,000,000,000 | 56 | use rangom_get instead of arc4random as arc4random does not work correctly with wizer
wizer causes the seed in arc4random to be the same between executions which is not random |
void t_cpp_generator::generate_local_reflection_pointer(std::ofstream& out, t_type* ttype) {
if (!gen_dense_) {
return;
}
indent(out) << "::apache::thrift::reflection::local::TypeSpec* " << ttype->get_name()
<< "::local_reflection = " << endl << indent() << " &"
<< local_reflection_name("typespec", ttype) << ";" << endl << endl;
} | 0 | [
"CWE-20"
] | thrift | cfaadcc4adcfde2a8232c62ec89870b73ef40df1 | 65,200,413,231,775,310,000,000,000,000,000,000,000 | 8 | THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]> |
void tcp_release_cb(struct sock *sk)
{
unsigned long flags, nflags;
/* perform an atomic operation only if at least one flag is set */
do {
flags = sk->sk_tsq_flags;
if (!(flags & TCP_DEFERRED_ALL))
return;
nflags = flags & ~TCP_DEFERRED_ALL;
} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
if (flags & TCPF_TSQ_DEFERRED) {
tcp_tsq_write(sk);
__sock_put(sk);
}
/* Here begins the tricky part :
* We are called from release_sock() with :
* 1) BH disabled
* 2) sk_lock.slock spinlock held
* 3) socket owned by us (sk->sk_lock.owned == 1)
*
* But following code is meant to be called from BH handlers,
* so we should keep BH disabled, but early release socket ownership
*/
sock_release_ownership(sk);
if (flags & TCPF_WRITE_TIMER_DEFERRED) {
tcp_write_timer_handler(sk);
__sock_put(sk);
}
if (flags & TCPF_DELACK_TIMER_DEFERRED) {
tcp_delack_timer_handler(sk);
__sock_put(sk);
}
if (flags & TCPF_MTU_REDUCED_DEFERRED) {
inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
__sock_put(sk);
}
} | 0 | [
"CWE-190"
] | net | 3b4929f65b0d8249f19a50245cd88ed1a2f78cff | 105,522,381,306,104,990,000,000,000,000,000,000,000 | 40 | tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
std::istream *istream_file(const std::string &fname, bool treat_failure_as_error)
{
LOG_FS << "Streaming " << fname << " for reading.\n";
if (fname.empty()) {
ERR_FS << "Trying to open file with empty name.\n";
bfs::ifstream *s = new bfs::ifstream();
s->clear(std::ios_base::failbit);
return s;
}
//mingw doesn't support std::basic_ifstream::basic_ifstream(const wchar_t* fname)
//that why boost::filesystem::fstream.hpp doesnt work with mingw.
try
{
boost::iostreams::file_descriptor_source fd(iostream_path(fname), std::ios_base::binary);
//TODO: has this still use ?
if (!fd.is_open() && treat_failure_as_error) {
ERR_FS << "Could not open '" << fname << "' for reading.\n";
}
return new boost::iostreams::stream<boost::iostreams::file_descriptor_source>(fd, 4096, 0);
}
catch(const std::exception ex)
{
if(treat_failure_as_error)
{
ERR_FS << "Could not open '" << fname << "' for reading.\n";
}
bfs::ifstream *s = new bfs::ifstream();
s->clear(std::ios_base::failbit);
return s;
}
} | 0 | [
"CWE-200"
] | wesnoth | f8914468182e8d0a1551b430c0879ba236fe4d6d | 318,817,834,109,049,200,000,000,000,000,000,000,000 | 32 | Disallow inclusion of .pbl files from WML (bug #23504)
Note that this will also cause Lua wesnoth.have_file() to return false
on .pbl files. |
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
auto col_params = new CollectiveParams();
auto done_with_cleanup = [col_params, done = std::move(done)]() {
done();
col_params->Unref();
};
OP_REQUIRES_OK_ASYNC(c,
FillCollectiveParams(col_params, BROADCAST_COLLECTIVE,
/*group_size*/ c->input(0),
/*group_key*/ c->input(1),
/*instance_key*/ c->input(2)),
done_with_cleanup);
col_params->is_source = false;
TensorShape output_shape;
OP_REQUIRES_OK_ASYNC(c, tensor::MakeShape(c->input(3), &output_shape),
done_with_cleanup);
col_params->instance.shape = output_shape;
// Add a default value for subdiv offsets, which is the same as the default
// value in the V1 op's attribute.
col_params->instance.impl_details.subdiv_offsets.push_back(0);
VLOG(1) << "CollectiveBcastRecvV2 group_size "
<< col_params->group.group_size << " group_key "
<< col_params->group.group_key << " instance_key "
<< col_params->instance.instance_key;
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(
c, c->allocate_output(0, col_params->instance.shape, &output),
done_with_cleanup);
Run(c, col_params, std::move(done_with_cleanup));
} | 0 | [
"CWE-416"
] | tensorflow | ca38dab9d3ee66c5de06f11af9a4b1200da5ef75 | 168,870,774,202,321,490,000,000,000,000,000,000,000 | 30 | Fix undefined behavior in CollectiveReduceV2 and others
We should not call done after it's moved.
PiperOrigin-RevId: 400838185
Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1 |
void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/*
* Least significant bit (RUN) is the only writable bit of
* the CTRL register, so we can avoid mfspr. 2.06 is not the
* earliest ISA where this is the case, but it's convenient.
*/
mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
} else {
unsigned long ctrl;
/*
* Some architectures (e.g., Cell) have writable fields other
* than RUN, so do the read-modify-write.
*/
ctrl = mfspr(SPRN_CTRLF);
ctrl |= CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
}
ti->local_flags |= _TLF_RUNLATCH;
} | 0 | [
"CWE-862"
] | linux | 8205d5d98ef7f155de211f5e2eb6ca03d95a5a60 | 168,660,922,547,426,400,000,000,000,000,000,000,000 | 25 | powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction
When we take an FP unavailable exception in a transaction we have to
account for the hardware FP TM checkpointed registers being
incorrect. In this case for this process we know the current and
checkpointed FP registers must be the same (since FP wasn't used
inside the transaction) hence in the thread_struct we copy the current
FP registers to the checkpointed ones.
This copy is done in tm_reclaim_thread(). We use thread->ckpt_regs.msr
to determine if FP was on when in userspace. thread->ckpt_regs.msr
represents the state of the MSR when exiting userspace. This is setup
by check_if_tm_restore_required().
Unfortunatley there is an optimisation in giveup_all() which returns
early if tsk->thread.regs->msr (via local variable `usermsr`) has
FP=VEC=VSX=SPE=0. This optimisation means that
check_if_tm_restore_required() is not called and hence
thread->ckpt_regs.msr is not updated and will contain an old value.
This can happen if due to load_fp=255 we start a userspace process
with MSR FP=1 and then we are context switched out. In this case
thread->ckpt_regs.msr will contain FP=1. If that same process is then
context switched in and load_fp overflows, MSR will have FP=0. If that
process now enters a transaction and does an FP instruction, the FP
unavailable will not update thread->ckpt_regs.msr (the bug) and MSR
FP=1 will be retained in thread->ckpt_regs.msr. tm_reclaim_thread()
will then not perform the required memcpy and the checkpointed FP regs
in the thread struct will contain the wrong values.
The code path for this happening is:
Userspace: Kernel
Start userspace
with MSR FP/VEC/VSX/SPE=0 TM=1
< -----
...
tbegin
bne
fp instruction
FP unavailable
---- >
fp_unavailable_tm()
tm_reclaim_current()
tm_reclaim_thread()
giveup_all()
return early since FP/VMX/VSX=0
/* ckpt MSR not updated (Incorrect) */
tm_reclaim()
/* thread_struct ckpt FP regs contain junk (OK) */
/* Sees ckpt MSR FP=1 (Incorrect) */
no memcpy() performed
/* thread_struct ckpt FP regs not fixed (Incorrect) */
tm_recheckpoint()
/* Put junk in hardware checkpoint FP regs */
....
< -----
Return to userspace
with MSR TM=1 FP=1
with junk in the FP TM checkpoint
TM rollback
reads FP junk
This is a data integrity problem for the current process as the FP
registers are corrupted. It's also a security problem as the FP
registers from one process may be leaked to another.
This patch moves up check_if_tm_restore_required() in giveup_all() to
ensure thread->ckpt_regs.msr is updated correctly.
A simple testcase to replicate this will be posted to
tools/testing/selftests/powerpc/tm/tm-poison.c
Similarly for VMX.
This fixes CVE-2019-15030.
Fixes: f48e91e87e67 ("powerpc/tm: Fix FP and VMX register corruption")
Cc: [email protected] # 4.12+
Signed-off-by: Gustavo Romero <[email protected]>
Signed-off-by: Michael Neuling <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected] |
static void cap_release_secctx(char *secdata, u32 seclen)
{
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 270,904,130,318,859,240,000,000,000,000,000,000,000 | 3 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
GF_Err ohdr_box_size(GF_Box *s)
{
GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)s;
ptr->size += 1+1+8+2+2+2;
if (ptr->ContentID) ptr->size += strlen(ptr->ContentID);
if (ptr->RightsIssuerURL) ptr->size += strlen(ptr->RightsIssuerURL);
if (ptr->TextualHeadersLen) ptr->size += ptr->TextualHeadersLen;
return GF_OK;
} | 0 | [
"CWE-703"
] | gpac | f19668964bf422cf5a63e4dbe1d3c6c75edadcbb | 85,305,255,761,790,840,000,000,000,000,000,000,000 | 9 | fixed #1879 |
static inline void msg_init(struct uffd_msg *msg)
{
BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
/*
* Must use memset to zero out the paddings or kernel data is
* leaked to userland.
*/
memset(msg, 0, sizeof(struct uffd_msg));
} | 0 | [
"CWE-416"
] | linux | 384632e67e0829deb8015ee6ad916b180049d252 | 261,395,609,925,486,440,000,000,000,000,000,000,000 | 9 | userfaultfd: non-cooperative: fix fork use after free
When reading the event from the uffd, we put it on a temporary
fork_event list to detect if we can still access it after releasing and
retaking the event_wqh.lock.
If fork aborts and removes the event from the fork_event all is fine as
long as we're still in the userfault read context and fork_event head is
still alive.
We've to put the event allocated in the fork kernel stack, back from
fork_event list-head to the event_wqh head, before returning from
userfaultfd_ctx_read, because the fork_event head lifetime is limited to
the userfaultfd_ctx_read stack lifetime.
Forgetting to move the event back to its event_wqh place then results in
__remove_wait_queue(&ctx->event_wqh, &ewq->wq); in
userfaultfd_event_wait_completion to remove it from a head that has been
already freed from the reader stack.
This could only happen if resolve_userfault_fork failed (for example if
there are no file descriptors available to allocate the fork uffd). If
it succeeded it was put back correctly.
Furthermore, after find_userfault_evt receives a fork event, the forked
userfault context in fork_nctx and uwq->msg.arg.reserved.reserved1 can
be released by the fork thread as soon as the event_wqh.lock is
released. Taking a reference on the fork_nctx before dropping the lock
prevents an use after free in resolve_userfault_fork().
If the fork side aborted and it already released everything, we still
try to succeed resolve_userfault_fork(), if possible.
Fixes: 893e26e61d04eac9 ("userfaultfd: non-cooperative: Add fork() event")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Mark Rutland <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
BSONObj spec() {
return BSON("$eq" << BSON_ARRAY(BSON("$and" << BSON_ARRAY("$a")) << 1));
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 316,172,949,424,793,120,000,000,000,000,000,000,000 | 3 | SERVER-38070 fix infinite loop in agg expression |
SSL_CTX *SSL_CTX_new(const SSL_METHOD *meth)
{
SSL_CTX *ret=NULL;
if (meth == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_NULL_SSL_METHOD_PASSED);
return(NULL);
}
if (SSL_get_ex_data_X509_STORE_CTX_idx() < 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_X509_VERIFICATION_SETUP_PROBLEMS);
goto err;
}
ret=(SSL_CTX *)OPENSSL_malloc(sizeof(SSL_CTX));
if (ret == NULL)
goto err;
memset(ret,0,sizeof(SSL_CTX));
ret->method=meth;
ret->cert_store=NULL;
ret->session_cache_mode=SSL_SESS_CACHE_SERVER;
ret->session_cache_size=SSL_SESSION_CACHE_MAX_SIZE_DEFAULT;
ret->session_cache_head=NULL;
ret->session_cache_tail=NULL;
/* We take the system default */
ret->session_timeout=meth->get_timeout();
ret->new_session_cb=0;
ret->remove_session_cb=0;
ret->get_session_cb=0;
ret->generate_session_id=0;
memset((char *)&ret->stats,0,sizeof(ret->stats));
ret->references=1;
ret->quiet_shutdown=0;
/* ret->cipher=NULL;*/
/* ret->s2->challenge=NULL;
ret->master_key=NULL;
ret->key_arg=NULL;
ret->s2->conn_id=NULL; */
ret->info_callback=NULL;
ret->app_verify_callback=0;
ret->app_verify_arg=NULL;
ret->max_cert_list=SSL_MAX_CERT_LIST_DEFAULT;
ret->read_ahead=0;
ret->msg_callback=0;
ret->msg_callback_arg=NULL;
ret->verify_mode=SSL_VERIFY_NONE;
#if 0
ret->verify_depth=-1; /* Don't impose a limit (but x509_lu.c does) */
#endif
ret->sid_ctx_length=0;
ret->default_verify_callback=NULL;
if ((ret->cert=ssl_cert_new()) == NULL)
goto err;
ret->default_passwd_callback=0;
ret->default_passwd_callback_userdata=NULL;
ret->client_cert_cb=0;
ret->app_gen_cookie_cb=0;
ret->app_verify_cookie_cb=0;
ret->sessions=lh_SSL_SESSION_new();
if (ret->sessions == NULL) goto err;
ret->cert_store=X509_STORE_new();
if (ret->cert_store == NULL) goto err;
ssl_create_cipher_list(ret->method,
&ret->cipher_list,&ret->cipher_list_by_id,
SSL_DEFAULT_CIPHER_LIST);
if (ret->cipher_list == NULL
|| sk_SSL_CIPHER_num(ret->cipher_list) <= 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_LIBRARY_HAS_NO_CIPHERS);
goto err2;
}
ret->param = X509_VERIFY_PARAM_new();
if (!ret->param)
goto err;
if ((ret->rsa_md5=EVP_get_digestbyname("ssl2-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL2_MD5_ROUTINES);
goto err2;
}
if ((ret->md5=EVP_get_digestbyname("ssl3-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES);
goto err2;
}
if ((ret->sha1=EVP_get_digestbyname("ssl3-sha1")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES);
goto err2;
}
if ((ret->client_CA=sk_X509_NAME_new_null()) == NULL)
goto err;
CRYPTO_new_ex_data(CRYPTO_EX_INDEX_SSL_CTX, ret, &ret->ex_data);
ret->extra_certs=NULL;
ret->comp_methods=SSL_COMP_get_compression_methods();
ret->max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH;
#ifndef OPENSSL_NO_TLSEXT
ret->tlsext_servername_callback = 0;
ret->tlsext_servername_arg = NULL;
/* Setup RFC4507 ticket keys */
if ((RAND_pseudo_bytes(ret->tlsext_tick_key_name, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_hmac_key, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_aes_key, 16) <= 0))
ret->options |= SSL_OP_NO_TICKET;
ret->tlsext_status_cb = 0;
ret->tlsext_status_arg = NULL;
#endif
#ifndef OPENSSL_NO_PSK
ret->psk_identity_hint=NULL;
ret->psk_client_callback=NULL;
ret->psk_server_callback=NULL;
#endif
#ifndef OPENSSL_NO_BUF_FREELISTS
ret->freelist_max_len = SSL_MAX_BUF_FREELIST_LEN_DEFAULT;
ret->rbuf_freelist = OPENSSL_malloc(sizeof(SSL3_BUF_FREELIST));
if (!ret->rbuf_freelist)
goto err;
ret->rbuf_freelist->chunklen = 0;
ret->rbuf_freelist->len = 0;
ret->rbuf_freelist->head = NULL;
ret->wbuf_freelist = OPENSSL_malloc(sizeof(SSL3_BUF_FREELIST));
if (!ret->wbuf_freelist)
{
OPENSSL_free(ret->rbuf_freelist);
goto err;
}
ret->wbuf_freelist->chunklen = 0;
ret->wbuf_freelist->len = 0;
ret->wbuf_freelist->head = NULL;
#endif
#ifndef OPENSSL_NO_ENGINE
ret->client_cert_engine = NULL;
#ifdef OPENSSL_SSL_CLIENT_ENGINE_AUTO
#define eng_strx(x) #x
#define eng_str(x) eng_strx(x)
/* Use specific client engine automatically... ignore errors */
{
ENGINE *eng;
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
if (!eng)
{
ERR_clear_error();
ENGINE_load_builtin_engines();
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
}
if (!eng || !SSL_CTX_set_client_cert_engine(ret, eng))
ERR_clear_error();
}
#endif
#endif
return(ret);
err:
SSLerr(SSL_F_SSL_CTX_NEW,ERR_R_MALLOC_FAILURE);
err2:
if (ret != NULL) SSL_CTX_free(ret);
return(NULL);
} | 0 | [] | openssl | 7587347bc48e7e8a1e800e48bb0a658f1557c424 | 26,942,308,240,376,180,000,000,000,000,000,000,000 | 181 | Fix memory leak. |
BOOL wf_cliprdr_uninit(wfContext* wfc, CliprdrClientContext* cliprdr)
{
wfClipboard* clipboard;
if (!wfc || !cliprdr)
return FALSE;
clipboard = wfc->clipboard;
if (!clipboard)
return FALSE;
cliprdr->custom = NULL;
if (clipboard->hwnd)
PostMessage(clipboard->hwnd, WM_QUIT, 0, 0);
if (clipboard->thread)
{
WaitForSingleObject(clipboard->thread, INFINITE);
CloseHandle(clipboard->thread);
}
if (clipboard->response_data_event)
CloseHandle(clipboard->response_data_event);
if (clipboard->req_fevent)
CloseHandle(clipboard->req_fevent);
clear_file_array(clipboard);
clear_format_map(clipboard);
free(clipboard->format_mappings);
free(clipboard);
return TRUE;
} | 0 | [
"CWE-20"
] | FreeRDP | 0d79670a28c0ab049af08613621aa0c267f977e9 | 150,794,869,970,758,970,000,000,000,000,000,000,000 | 35 | Fixed missing input checks for file contents request
reported by Valentino Ricotta (Thalium) |
void PingStats::init() {
boost::array<double, 3> probs = {{0.75, 0.80, 0.95 }};
asQuantile = new asQuantileType(boost::accumulators::tag::extended_p_square::probabilities = probs);
dPing = 0.0;
uiPing = 0;
uiPingSort = 0;
uiUsers = 0;
uiMaxUsers = 0;
uiBandwidth = 0;
uiSent = 0;
uiRecv = 0;
uiVersion = 0;
} | 0 | [
"CWE-59",
"CWE-61"
] | mumble | e59ee87abe249f345908c7d568f6879d16bfd648 | 314,456,827,345,230,200,000,000,000,000,000,000,000 | 14 | FIX(client): Only allow "http"/"https" for URLs in ConnectDialog
Our public server list registration script doesn't have an URL scheme
whitelist for the website field.
Turns out a malicious server can register itself with a dangerous URL in
an attempt to attack a user's machine.
User interaction is required, as the URL has to be opened by
right-clicking on the server entry and clicking on "Open Webpage".
This commit introduces a client-side whitelist, which only allows "http"
and "https" schemes. We will also implement it in our public list.
In future we should probably add a warning QMessageBox informing the
user that there's no guarantee the URL is safe (regardless of the
scheme).
Thanks a lot to https://positive.security for reporting the RCE
vulnerability to us privately. |
static uint32_t phar_tar_number(char *buf, int len) /* {{{ */
{
uint32_t num = 0;
int i = 0;
while (i < len && buf[i] == ' ') {
++i;
}
while (i < len && buf[i] >= '0' && buf[i] <= '7') {
num = num * 8 + (buf[i] - '0');
++i;
}
return num;
} | 0 | [
"CWE-119"
] | php-src | e0f5d62bd6690169998474b62f92a8c5ddf0e699 | 64,057,427,243,023,280,000,000,000,000,000,000,000 | 16 | Fix bug #77586 - phar_tar_writeheaders_int() buffer overflow |
ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
const constantPoolHandle& cpool,
ciSymbol* name,
bool require_local) {
ASSERT_IN_VM;
EXCEPTION_CONTEXT;
// Now we need to check the SystemDictionary
Symbol* sym = name->get_symbol();
if (sym->byte_at(0) == 'L' &&
sym->byte_at(sym->utf8_length()-1) == ';') {
// This is a name from a signature. Strip off the trimmings.
// Call recursive to keep scope of strippedsym.
TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
sym->utf8_length()-2,
KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass));
ciSymbol* strippedname = get_symbol(strippedsym);
return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
}
// Check for prior unloaded klass. The SystemDictionary's answers
// can vary over time but the compiler needs consistency.
ciKlass* unloaded_klass = check_get_unloaded_klass(accessing_klass, name);
if (unloaded_klass != NULL) {
if (require_local) return NULL;
return unloaded_klass;
}
Handle loader(THREAD, (oop)NULL);
Handle domain(THREAD, (oop)NULL);
if (accessing_klass != NULL) {
loader = Handle(THREAD, accessing_klass->loader());
domain = Handle(THREAD, accessing_klass->protection_domain());
}
// setup up the proper type to return on OOM
ciKlass* fail_type;
if (sym->byte_at(0) == '[') {
fail_type = _unloaded_ciobjarrayklass;
} else {
fail_type = _unloaded_ciinstance_klass;
}
Klass* found_klass;
{
ttyUnlocker ttyul; // release tty lock to avoid ordering problems
MutexLocker ml(Compile_lock);
Klass* kls;
if (!require_local) {
kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader,
KILL_COMPILE_ON_FATAL_(fail_type));
} else {
kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain,
KILL_COMPILE_ON_FATAL_(fail_type));
}
found_klass = kls;
}
// If we fail to find an array klass, look again for its element type.
// The element type may be available either locally or via constraints.
// In either case, if we can find the element type in the system dictionary,
// we must build an array type around it. The CI requires array klasses
// to be loaded if their element klasses are loaded, except when memory
// is exhausted.
if (sym->byte_at(0) == '[' &&
(sym->byte_at(1) == '[' || sym->byte_at(1) == 'L')) {
// We have an unloaded array.
// Build it on the fly if the element class exists.
TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
sym->utf8_length()-1,
KILL_COMPILE_ON_FATAL_(fail_type));
// Get element ciKlass recursively.
ciKlass* elem_klass =
get_klass_by_name_impl(accessing_klass,
cpool,
get_symbol(elem_sym),
require_local);
if (elem_klass != NULL && elem_klass->is_loaded()) {
// Now make an array for it
return ciObjArrayKlass::make_impl(elem_klass);
}
}
if (found_klass == NULL && !cpool.is_null() && cpool->has_preresolution()) {
// Look inside the constant pool for pre-resolved class entries.
for (int i = cpool->length() - 1; i >= 1; i--) {
if (cpool->tag_at(i).is_klass()) {
Klass* kls = cpool->resolved_klass_at(i);
if (kls->name() == sym) {
found_klass = kls;
break;
}
}
}
}
if (found_klass != NULL) {
// Found it. Build a CI handle.
return get_klass(found_klass);
}
if (require_local) return NULL;
// Not yet loaded into the VM, or not governed by loader constraints.
// Make a CI representative for it.
return get_unloaded_klass(accessing_klass, name);
} | 0 | [] | jdk11u | 6c0ba0785a2f0900be301f72764cf4dcfa720991 | 394,340,420,668,522,400,000,000,000,000,000,000 | 107 | 8281859: Improve class compilation
Reviewed-by: mbaesken
Backport-of: 3ac62a66efd05d0842076dd4cfbea0e53b12630f |
TRIO_PUBLIC_STRING int trio_contains TRIO_ARGS2((string, substring), TRIO_CONST char* string,
TRIO_CONST char* substring)
{
assert(string);
assert(substring);
return (0 != strstr(string, substring));
} | 0 | [
"CWE-190",
"CWE-125"
] | FreeRDP | 05cd9ea2290d23931f615c1b004d4b2e69074e27 | 182,713,925,492,319,200,000,000,000,000,000,000,000 | 8 | Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this. |
static Window XSelectWindow(Display *display,RectangleInfo *crop_info)
{
#define MinimumCropArea (unsigned int) 9
Cursor
target_cursor;
GC
annotate_context;
int
presses,
x_offset,
y_offset;
Status
status;
Window
root_window,
target_window;
XEvent
event;
XGCValues
context_values;
/*
Initialize graphic context.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(display != (Display *) NULL);
assert(crop_info != (RectangleInfo *) NULL);
root_window=XRootWindow(display,XDefaultScreen(display));
context_values.background=XBlackPixel(display,XDefaultScreen(display));
context_values.foreground=XWhitePixel(display,XDefaultScreen(display));
context_values.function=GXinvert;
context_values.plane_mask=
context_values.background ^ context_values.foreground;
context_values.subwindow_mode=IncludeInferiors;
annotate_context=XCreateGC(display,root_window,(size_t) (GCBackground |
GCForeground | GCFunction | GCSubwindowMode),&context_values);
if (annotate_context == (GC) NULL)
return(MagickFalse);
/*
Grab the pointer using target cursor.
*/
target_cursor=XMakeCursor(display,root_window,XDefaultColormap(display,
XDefaultScreen(display)),(char * ) "white",(char * ) "black");
status=XGrabPointer(display,root_window,MagickFalse,(unsigned int)
(ButtonPressMask | ButtonReleaseMask | ButtonMotionMask),GrabModeSync,
GrabModeAsync,root_window,target_cursor,CurrentTime);
if (status != GrabSuccess)
ThrowXWindowFatalException(XServerError,"UnableToGrabMouse","");
/*
Select a window.
*/
crop_info->width=0;
crop_info->height=0;
presses=0;
target_window=(Window) NULL;
x_offset=0;
y_offset=0;
do
{
if ((crop_info->width*crop_info->height) >= MinimumCropArea)
(void) XDrawRectangle(display,root_window,annotate_context,
(int) crop_info->x,(int) crop_info->y,(unsigned int) crop_info->width-1,
(unsigned int) crop_info->height-1);
/*
Allow another event.
*/
(void) XAllowEvents(display,SyncPointer,CurrentTime);
(void) XWindowEvent(display,root_window,ButtonPressMask |
ButtonReleaseMask | ButtonMotionMask,&event);
if ((crop_info->width*crop_info->height) >= MinimumCropArea)
(void) XDrawRectangle(display,root_window,annotate_context,
(int) crop_info->x,(int) crop_info->y,(unsigned int) crop_info->width-1,
(unsigned int) crop_info->height-1);
switch (event.type)
{
case ButtonPress:
{
target_window=XGetSubwindow(display,event.xbutton.subwindow,
event.xbutton.x,event.xbutton.y);
if (target_window == (Window) NULL)
target_window=root_window;
x_offset=event.xbutton.x_root;
y_offset=event.xbutton.y_root;
crop_info->x=(ssize_t) x_offset;
crop_info->y=(ssize_t) y_offset;
crop_info->width=0;
crop_info->height=0;
presses++;
break;
}
case ButtonRelease:
{
presses--;
break;
}
case MotionNotify:
{
/*
Discard pending button motion events.
*/
while (XCheckMaskEvent(display,ButtonMotionMask,&event)) ;
crop_info->x=(ssize_t) event.xmotion.x;
crop_info->y=(ssize_t) event.xmotion.y;
/*
Check boundary conditions.
*/
if ((int) crop_info->x < x_offset)
crop_info->width=(size_t) (x_offset-crop_info->x);
else
{
crop_info->width=(size_t) (crop_info->x-x_offset);
crop_info->x=(ssize_t) x_offset;
}
if ((int) crop_info->y < y_offset)
crop_info->height=(size_t) (y_offset-crop_info->y);
else
{
crop_info->height=(size_t) (crop_info->y-y_offset);
crop_info->y=(ssize_t) y_offset;
}
}
default:
break;
}
} while ((target_window == (Window) NULL) || (presses > 0));
(void) XUngrabPointer(display,CurrentTime);
(void) XFreeCursor(display,target_cursor);
(void) XFreeGC(display,annotate_context);
if ((crop_info->width*crop_info->height) < MinimumCropArea)
{
crop_info->width=0;
crop_info->height=0;
}
if ((crop_info->width != 0) && (crop_info->height != 0))
target_window=root_window;
return(target_window);
} | 0 | [
"CWE-401"
] | ImageMagick6 | 13801f5d0bd7a6fdb119682d34946636afdb2629 | 124,095,154,954,425,870,000,000,000,000,000,000,000 | 144 | https://github.com/ImageMagick/ImageMagick/issues/1531 |
static inline int keepalive_time_when(const struct tcp_sock *tp)
{
return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
} | 0 | [] | linux | 7bced397510ab569d31de4c70b39e13355046387 | 269,785,835,579,204,040,000,000,000,000,000,000,000 | 4 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
GF_Box *mhap_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_MHACompatibleProfilesBox, GF_ISOM_BOX_TYPE_MHAP);
return (GF_Box *)tmp; | 0 | [
"CWE-476",
"CWE-787"
] | gpac | b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8 | 45,770,181,798,366,430,000,000,000,000,000,000,000 | 5 | fixed #1757 |
Subsets and Splits