func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
cmsHPROFILE CMSEXPORT cmsCreateRGBProfile(const cmsCIExyY* WhitePoint, const cmsCIExyYTRIPLE* Primaries, cmsToneCurve* const TransferFunction[3]) { return cmsCreateRGBProfileTHR(NULL, WhitePoint, Primaries, TransferFunction); }
0
[]
Little-CMS
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
173,727,665,728,203,600,000,000,000,000,000,000,000
6
Memory squeezing fix: lcms2 cmsPipeline construction When creating a new pipeline, lcms would often try to allocate a stage and pass it to cmsPipelineInsertStage without checking whether the allocation succeeded. cmsPipelineInsertStage would then assert (or crash) if it had not. The fix here is to change cmsPipelineInsertStage to check and return an error value. All calling code is then checked to test this return value and cope.
static inline void set_ethernet_addr(rtl8150_t * dev) { u8 node_id[6]; get_registers(dev, IDR, sizeof(node_id), node_id); memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); }
0
[ "CWE-119", "CWE-787" ]
linux
7926aff5c57b577ab0f43364ff0c59d968f6a414
260,768,421,921,614,560,000,000,000,000,000,000,000
7
rtl8150: Use heap buffers for all register access Allocating USB buffers on the stack is not portable, and no longer works on x86_64 (with VMAP_STACK enabled as per default). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void asmlinkage smm_do_relocation(void *arg) { const struct smm_module_params *p; const struct smm_runtime *runtime; int cpu; uintptr_t curr_smbase; uintptr_t perm_smbase; p = arg; runtime = p->runtime; cpu = p->cpu; curr_smbase = runtime->smbase; if (cpu >= CONFIG_MAX_CPUS) { printk(BIOS_CRIT, "Invalid CPU number assigned in SMM stub: %d\n", cpu); return; } /* * The permanent handler runs with all cpus concurrently. Precalculate * the location of the new SMBASE. If using SMM modules then this * calculation needs to match that of the module loader. */ perm_smbase = mp_state.perm_smbase; perm_smbase -= cpu * runtime->save_state_size; printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase); /* Setup code checks this callback for validity. */ mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase); if (CONFIG(STM)) { uintptr_t mseg; mseg = mp_state.perm_smbase + (mp_state.perm_smsize - CONFIG_MSEG_SIZE); stm_setup(mseg, p->cpu, perm_smbase, mp_state.perm_smbase, runtime->start32_offset); } }
1
[ "CWE-269" ]
coreboot
afb7a814783cda12f5b72167163b9109ee1d15a7
169,755,918,165,816,490,000,000,000,000,000,000,000
44
cpu/x86/smm: Introduce SMM module loader version 2 Xeon-SP Skylake Scalable Processor can have 36 CPU threads (18 cores). Current coreboot SMM is unable to handle more than ~32 CPU threads. This patch introduces a version 2 of the SMM module loader which addresses this problem. Having two versions of the SMM module loader prevents any issues to current projects. Future Xeon-SP products will be using this version of the SMM loader. Subsequent patches will enable board specific functionality for Xeon-SP. The reason for moving to version 2 is the state save area begins to encroach upon the SMI handling code when more than 32 CPU threads are in the system. This can cause system hangs, reboots, etc. The second change is related to staggered entry points with simple near jumps. In the current loader, near jumps will not work because the CPU is jumping within the same code segment. In version 2, "far" address jumps are necessary therefore protected mode must be enabled first. The SMM layout and how the CPUs are staggered are documented in the code. By making the modifications above, this allows the smm module loader to expand easily as more CPU threads are added. TEST=build for Tiogapass platform under OCP mainboard. Enable the following in Kconfig. select CPU_INTEL_COMMON_SMM select SOC_INTEL_COMMON_BLOCK_SMM select SMM_TSEG select HAVE_SMI_HANDLER select ACPI_INTEL_HARDWARE_SLEEP_VALUES Debug console will show all 36 cores relocated. Further tested by generating SMI's to port 0xb2 using XDP/ITP HW debugger and ensured all cores entering and exiting SMM properly. In addition, booted to Linux 5.4 kernel and observed no issues during mp init. Change-Id: I00a23a5f2a46110536c344254868390dbb71854c Signed-off-by: Rocky Phagura <[email protected]> Reviewed-on: https://review.coreboot.org/c/coreboot/+/43684 Tested-by: build bot (Jenkins) <[email protected]> Reviewed-by: Angel Pons <[email protected]>
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) { unsigned int max_sectors = queue_max_hw_sectors(rq->q); struct bio *bio; int ret; int j; if (!iov_iter_count(iter)) return -EINVAL; bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS)); if (!bio) return -ENOMEM; bio->bi_opf |= req_op(rq); while (iov_iter_count(iter)) { struct page **pages; ssize_t bytes; size_t offs, added = 0; int npages; bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); if (unlikely(bytes <= 0)) { ret = bytes ? bytes : -EFAULT; goto out_unmap; } npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); if (unlikely(offs & queue_dma_alignment(rq->q))) { ret = -EINVAL; j = 0; } else { for (j = 0; j < npages; j++) { struct page *page = pages[j]; unsigned int n = PAGE_SIZE - offs; bool same_page = false; if (n > bytes) n = bytes; if (!bio_add_hw_page(rq->q, bio, page, n, offs, max_sectors, &same_page)) { if (same_page) put_page(page); break; } added += n; bytes -= n; offs = 0; } iov_iter_advance(iter, added); } /* * release the pages we didn't map into the bio, if any */ while (j < npages) put_page(pages[j++]); kvfree(pages); /* couldn't stuff something into bio? */ if (bytes) break; } ret = blk_rq_append_bio(rq, bio); if (ret) goto out_unmap; return 0; out_unmap: bio_release_pages(bio, false); bio_put(bio); return ret; }
0
[ "CWE-200" ]
linux
cc8f7fe1f5eab010191aa4570f27641876fa1267
112,878,778,378,100,100,000,000,000,000,000,000,000
76
block-map: add __GFP_ZERO flag for alloc_page in function bio_copy_kern Add __GFP_ZERO flag for alloc_page in function bio_copy_kern to initialize the buffer of a bio. Signed-off-by: Haimin Zhang <[email protected]> Reviewed-by: Chaitanya Kulkarni <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
static Image *ReadCUTImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowCUTReaderException(severity,tag) \ { \ if (palette != NULL) \ palette=DestroyImage(palette); \ if (clone_info != NULL) \ clone_info=DestroyImageInfo(clone_info); \ ThrowReaderException(severity,tag); \ } Image *image,*palette; ImageInfo *clone_info; MagickBooleanType status; MagickOffsetType offset; size_t EncodedByte; unsigned char RunCount,RunValue,RunCountMasked; CUTHeader Header; CUTPalHeader PalHeader; ssize_t depth; ssize_t i,j; ssize_t ldblk; unsigned char *BImgBuff=NULL,*ptrB; register Quantum *q; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read CUT image. */ palette=NULL; clone_info=NULL; Header.Width=ReadBlobLSBShort(image); Header.Height=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.Width==0 || Header.Height==0 || Header.Reserved!=0) CUT_KO: ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader"); /*---This code checks first line of image---*/ EncodedByte=ReadBlobLSBShort(image); RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; ldblk=0; while((int) RunCountMasked!=0) /*end of line?*/ { i=1; if((int) RunCount<0x80) i=(ssize_t) RunCountMasked; offset=SeekBlob(image,TellBlob(image)+i,SEEK_SET); if (offset < 0) ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data*/ EncodedByte-=i+1; ldblk+=(ssize_t) RunCountMasked; RunCount=(unsigned char) ReadBlobByte(image); if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data: unexpected eof in line*/ RunCountMasked=RunCount & 0x7F; } if(EncodedByte!=1) goto CUT_KO; /*wrong data: size incorrect*/ i=0; /*guess a number of bit planes*/ if(ldblk==(int) Header.Width) i=8; if(2*ldblk==(int) Header.Width) i=4; if(8*ldblk==(int) Header.Width) i=1; if(i==0) goto CUT_KO; /*wrong data: incorrect bit planes*/ depth=i; image->columns=Header.Width; image->rows=Header.Height; image->depth=8; image->colors=(size_t) (GetQuantumRange(1UL*i)+1); if (image_info->ping != MagickFalse) goto Finish; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* ----- Do something with palette ----- */ if ((clone_info=CloneImageInfo(image_info)) == NULL) goto NoPalette; i=(ssize_t) strlen(clone_info->filename); j=i; while(--i>0) { if(clone_info->filename[i]=='.') { break; } if(clone_info->filename[i]=='/' || clone_info->filename[i]=='\\' || clone_info->filename[i]==':' ) { i=j; break; } } (void) CopyMagickString(clone_info->filename+i,".PAL",(size_t) (MagickPathExtent-i)); if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { (void) CopyMagickString(clone_info->filename+i,".pal",(size_t) (MagickPathExtent-i)); if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { clone_info->filename[i]='\0'; if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { clone_info=DestroyImageInfo(clone_info); clone_info=NULL; goto NoPalette; } } } if( (palette=AcquireImage(clone_info,exception))==NULL ) goto NoPalette; status=OpenBlob(clone_info,palette,ReadBinaryBlobMode,exception); if (status == MagickFalse) { ErasePalette: palette=DestroyImage(palette); palette=NULL; goto NoPalette; } if(palette!=NULL) { (void) ReadBlob(palette,2,(unsigned char *) PalHeader.FileId); if(strncmp(PalHeader.FileId,"AH",2) != 0) goto ErasePalette; PalHeader.Version=ReadBlobLSBShort(palette); PalHeader.Size=ReadBlobLSBShort(palette); PalHeader.FileType=(char) ReadBlobByte(palette); PalHeader.SubType=(char) ReadBlobByte(palette); PalHeader.BoardID=ReadBlobLSBShort(palette); PalHeader.GraphicsMode=ReadBlobLSBShort(palette); PalHeader.MaxIndex=ReadBlobLSBShort(palette); PalHeader.MaxRed=ReadBlobLSBShort(palette); PalHeader.MaxGreen=ReadBlobLSBShort(palette); PalHeader.MaxBlue=ReadBlobLSBShort(palette); (void) ReadBlob(palette,20,(unsigned char *) PalHeader.PaletteId); if (EOFBlob(image)) ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile"); if(PalHeader.MaxIndex<1) goto ErasePalette; image->colors=PalHeader.MaxIndex+1; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) goto NoMemory; if(PalHeader.MaxRed==0) PalHeader.MaxRed=(unsigned int) QuantumRange; /*avoid division by 0*/ if(PalHeader.MaxGreen==0) PalHeader.MaxGreen=(unsigned int) QuantumRange; if(PalHeader.MaxBlue==0) PalHeader.MaxBlue=(unsigned int) QuantumRange; for(i=0;i<=(int) PalHeader.MaxIndex;i++) { /*this may be wrong- I don't know why is palette such strange*/ j=(ssize_t) TellBlob(palette); if((j % 512)>512-6) { j=((j / 512)+1)*512; offset=SeekBlob(palette,j,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image->colormap[i].red=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxRed) { image->colormap[i].red=ClampToQuantum(((double) image->colormap[i].red*QuantumRange+(PalHeader.MaxRed>>1))/ PalHeader.MaxRed); } image->colormap[i].green=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxGreen) { image->colormap[i].green=ClampToQuantum (((double) image->colormap[i].green*QuantumRange+(PalHeader.MaxGreen>>1))/PalHeader.MaxGreen); } image->colormap[i].blue=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxBlue) { image->colormap[i].blue=ClampToQuantum (((double)image->colormap[i].blue*QuantumRange+(PalHeader.MaxBlue>>1))/PalHeader.MaxBlue); } } if (EOFBlob(image)) ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile"); } NoPalette: if(palette==NULL) { image->colors=256; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) { NoMemory: ThrowCUTReaderException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t)image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) i); image->colormap[i].green=ScaleCharToQuantum((unsigned char) i); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i); } } /* ----- Load RLE compressed raster ----- */ BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, sizeof(*BImgBuff)); /*Ldblk was set in the check phase*/ if(BImgBuff==NULL) goto NoMemory; offset=SeekBlob(image,6 /*sizeof(Header)*/,SEEK_SET); if (offset < 0) { if (palette != NULL) palette=DestroyImage(palette); if (clone_info != NULL) clone_info=DestroyImageInfo(clone_info); BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < (int) Header.Height; i++) { EncodedByte=ReadBlobLSBShort(image); ptrB=BImgBuff; j=ldblk; RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; while ((int) RunCountMasked != 0) { if((ssize_t) RunCountMasked>j) { /*Wrong Data*/ RunCountMasked=(unsigned char) j; if(j==0) { break; } } if((int) RunCount>0x80) { RunValue=(unsigned char) ReadBlobByte(image); (void) memset(ptrB,(int) RunValue,(size_t) RunCountMasked); } else { (void) ReadBlob(image,(size_t) RunCountMasked,ptrB); } ptrB+=(int) RunCountMasked; j-=(int) RunCountMasked; if (EOFBlob(image) != MagickFalse) goto Finish; /* wrong data: unexpected eof in line */ RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; } InsertRow(image,depth,BImgBuff,i,exception); } (void) SyncImage(image,exception); /*detect monochrome image*/ if(palette==NULL) { /*attempt to detect binary (black&white) images*/ if ((image->storage_class == PseudoClass) && (SetImageGray(image,exception) != MagickFalse)) { if(GetCutColors(image,exception)==2) { for (i=0; i < (ssize_t)image->colors; i++) { register Quantum sample; sample=ScaleCharToQuantum((unsigned char) i); if(image->colormap[i].red!=sample) goto Finish; if(image->colormap[i].green!=sample) goto Finish; if(image->colormap[i].blue!=sample) goto Finish; } image->colormap[1].red=image->colormap[1].green= image->colormap[1].blue=QuantumRange; for (i=0; i < (ssize_t)image->rows; i++) { q=QueueAuthenticPixels(image,0,i,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (j=0; j < (ssize_t)image->columns; j++) { if (GetPixelRed(image,q) == ScaleCharToQuantum(1)) { SetPixelRed(image,QuantumRange,q); SetPixelGreen(image,QuantumRange,q); SetPixelBlue(image,QuantumRange,q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) goto Finish; } } } } Finish: if (BImgBuff != NULL) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); if (palette != NULL) palette=DestroyImage(palette); if (clone_info != NULL) clone_info=DestroyImageInfo(clone_info); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
1
[ "CWE-20", "CWE-125", "CWE-908" ]
ImageMagick
cdb383749ef7b68a38891440af8cc23e0115306d
319,383,524,387,642,680,000,000,000,000,000,000,000
340
https://github.com/ImageMagick/ImageMagick/issues/1599
ALGdealloc(PyObject *ptr) { ALGobject *self = (ALGobject *)ptr; /* Overwrite the contents of the object */ Py_XDECREF(self->counter); self->counter = NULL; memset(self->IV, 0, BLOCK_SIZE); memset(self->oldCipher, 0, BLOCK_SIZE); memset((char*)&(self->st), 0, sizeof(block_state)); self->mode = self->count = self->segment_size = 0; PyObject_Del(ptr); }
0
[ "CWE-119", "CWE-787" ]
pycrypto
8dbe0dc3eea5c689d4f76b37b93fe216cf1f00d4
22,041,458,807,364,873,000,000,000,000,000,000,000
13
Throw exception when IV is used with ECB or CTR The IV parameter is currently ignored when initializing a cipher in ECB or CTR mode. For CTR mode, it is confusing: it takes some time to see that a different parameter is needed (the counter). For ECB mode, it is outright dangerous. This patch forces an exception to be raised.
static int virtnet_receive(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0; void *buf; while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(vi, rq, buf, len); received++; } if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } return received; }
0
[ "CWE-119", "CWE-787" ]
linux
48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
258,348,315,973,001,170,000,000,000,000,000,000,000
19
virtio-net: drop NETIF_F_FRAGLIST virtio declares support for NETIF_F_FRAGLIST, but assumes that there are at most MAX_SKB_FRAGS + 2 fragments which isn't always true with a fraglist. A longer fraglist in the skb will make the call to skb_to_sgvec overflow the sg array, leading to memory corruption. Drop NETIF_F_FRAGLIST so we only get what we can handle. Cc: Michael S. Tsirkin <[email protected]> Signed-off-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static errno_t sdap_initgr_rfc2307bis_next_base(struct tevent_req *req) { struct tevent_req *subreq; struct sdap_initgr_rfc2307bis_state *state; state = tevent_req_data(req, struct sdap_initgr_rfc2307bis_state); talloc_zfree(state->filter); state->filter = sdap_get_id_specific_filter( state, state->base_filter, state->search_bases[state->base_iter]->filter); if (!state->filter) { return ENOMEM; } DEBUG(SSSDBG_TRACE_FUNC, "Searching for parent groups for user [%s] with base [%s]\n", state->orig_dn, state->search_bases[state->base_iter]->basedn); subreq = sdap_get_generic_send( state, state->ev, state->opts, state->sh, state->search_bases[state->base_iter]->basedn, state->search_bases[state->base_iter]->scope, state->filter, state->attrs, state->opts->group_map, SDAP_OPTS_GROUP, state->timeout, true); if (!subreq) { talloc_zfree(req); return ENOMEM; } tevent_req_set_callback(subreq, sdap_initgr_rfc2307bis_process, req); return EOK; }
0
[ "CWE-264" ]
sssd
0b6b4b7669b46d3d0b0ebefbc0e1621965444717
280,166,590,514,760,600,000,000,000,000,000,000,000
36
IPA: process non-posix nested groups Do not expect objectClass to be posixGroup but rather more general groupofnames. Resolves: https://fedorahosted.org/sssd/ticket/2343 Reviewed-by: Michal Židek <[email protected]> (cherry picked from commit bc8c93ffe881271043492c938c626a9be948000e)
static void alt_fp_init(fp_int* a) { a->size = FP_SIZE_ECC; fp_zero(a); }
0
[ "CWE-200" ]
wolfssl
9b9568d500f31f964af26ba8d01e542e1f27e5ca
83,549,983,069,838,160,000,000,000,000,000,000,000
5
Change ECDSA signing to use blinding.
xmlPushInput(xmlParserCtxtPtr ctxt, xmlParserInputPtr input) { int ret; if (input == NULL) return(-1); if (xmlParserDebugEntities) { if ((ctxt->input != NULL) && (ctxt->input->filename)) xmlGenericError(xmlGenericErrorContext, "%s(%d): ", ctxt->input->filename, ctxt->input->line); xmlGenericError(xmlGenericErrorContext, "Pushing input %d : %.30s\n", ctxt->inputNr+1, input->cur); } ret = inputPush(ctxt, input); GROW; return(ret); }
0
[ "CWE-125" ]
libxml2
77404b8b69bc122d12231807abf1a837d121b551
296,007,713,796,665,900,000,000,000,000,000,000,000
16
Make sure the parser returns when getting a Stop order patch backported from chromiun bug fixes, assuming author is Chris
struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type) { struct rtattr *nest = NLMSG_TAIL(n); addattr_l(n, maxlen, type, NULL, 0); return nest; }
0
[]
iproute2
8c50b728b226f6254251282697ce38a72639a6fc
275,002,288,312,588,350,000,000,000,000,000,000,000
7
libnetlink: fix use-after-free of message buf In __rtnl_talk_iov() main loop, err is a pointer to memory in dynamically allocated 'buf' that is used to store netlink messages. If netlink message is an error message, buf is deallocated before returning with error code. However, on return err->error code is checked one more time to generate return value, after memory which err points to has already been freed. Save error code in temporary variable and use the variable to generate return value. Fixes: c60389e4f9ea ("libnetlink: fix leak and using unused memory on error") Signed-off-by: Vlad Buslov <[email protected]> Signed-off-by: Stephen Hemminger <[email protected]>
u32 sqlite3ExprListFlags(const ExprList *pList){ int i; u32 m = 0; assert( pList!=0 ); for(i=0; i<pList->nExpr; i++){ Expr *pExpr = pList->a[i].pExpr; assert( pExpr!=0 ); m |= pExpr->flags; } return m; }
0
[ "CWE-476" ]
sqlite
57f7ece78410a8aae86aa4625fb7556897db384c
127,459,779,448,965,010,000,000,000,000,000,000,000
11
Fix a problem that comes up when using generated columns that evaluate to a constant in an index and then making use of that index in a join. FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
static void handle_new_recv_msgs(struct ipmi_smi *intf) { struct ipmi_smi_msg *smi_msg; unsigned long flags = 0; int rv; int run_to_completion = intf->run_to_completion; /* See if any waiting messages need to be processed. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); while (!list_empty(&intf->waiting_rcv_msgs)) { smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); list_del(&smi_msg->link); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); rv = handle_one_recv_msg(intf, smi_msg); if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); if (rv > 0) { /* * To preserve message order, quit if we * can't handle a message. Add the message * back at the head, this is safe because this * tasklet is the only thing that pulls the * messages. */ list_add(&smi_msg->link, &intf->waiting_rcv_msgs); break; } else { if (rv == 0) /* Message handled */ ipmi_free_smi_msg(smi_msg); /* If rv < 0, fatal error, del but don't free. */ } } if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); /* * If the pretimout count is non-zero, decrement one from it and * deliver pretimeouts to all the users. */ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { struct ipmi_user *user; int index; index = srcu_read_lock(&intf->users_srcu); list_for_each_entry_rcu(user, &intf->users, link) { if (user->handler->ipmi_watchdog_pretimeout) user->handler->ipmi_watchdog_pretimeout( user->handler_data); } srcu_read_unlock(&intf->users_srcu, index); } }
0
[ "CWE-416", "CWE-284" ]
linux
77f8269606bf95fcb232ee86f6da80886f1dfae8
192,382,054,497,962,340,000,000,000,000,000,000,000
57
ipmi: fix use-after-free of user->release_barrier.rda When we do the following test, we got oops in ipmi_msghandler driver while((1)) do service ipmievd restart & service ipmievd restart done --------------------------------------------------------------- [ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008 [ 294.230188] Mem abort info: [ 294.230190] ESR = 0x96000004 [ 294.230191] Exception class = DABT (current EL), IL = 32 bits [ 294.230193] SET = 0, FnV = 0 [ 294.230194] EA = 0, S1PTW = 0 [ 294.230195] Data abort info: [ 294.230196] ISV = 0, ISS = 0x00000004 [ 294.230197] CM = 0, WnR = 0 [ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a [ 294.230201] [0000803fea6ea008] pgd=0000000000000000 [ 294.230204] Internal error: Oops: 96000004 [#1] SMP [ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio [ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113 [ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 294.297695] pc : __srcu_read_lock+0x38/0x58 [ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.307853] sp : ffff00001001bc80 [ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000 [ 294.316594] x27: 0000000000000000 x26: dead000000000100 [ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800 [ 294.327366] x23: 0000000000000000 x22: 0000000000000000 [ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018 [ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000 [ 294.343523] x17: 0000000000000000 x16: 0000000000000000 [ 294.348908] x15: 0000000000000000 x14: 0000000000000002 [ 294.354293] x13: 0000000000000000 x12: 0000000000000000 [ 294.359679] x11: 0000000000000000 x10: 0000000000100000 [ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004 [ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678 [ 294.375836] x5 : 000000000000000c x4 : 0000000000000000 [ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000 [ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001 [ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293) [ 294.398791] Call trace: [ 294.401266] __srcu_read_lock+0x38/0x58 [ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler] [ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler] [ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler] [ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler] [ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler] [ 294.451618] tasklet_action_common.isra.5+0x88/0x138 [ 294.460661] tasklet_action+0x2c/0x38 [ 294.468191] __do_softirq+0x120/0x2f8 [ 294.475561] irq_exit+0x134/0x140 [ 294.482445] __handle_domain_irq+0x6c/0xc0 [ 294.489954] gic_handle_irq+0xb8/0x178 [ 294.497037] el1_irq+0xb0/0x140 [ 294.503381] arch_cpu_idle+0x34/0x1a8 [ 294.510096] do_idle+0x1d4/0x290 [ 294.516322] cpu_startup_entry+0x28/0x30 [ 294.523230] secondary_start_kernel+0x184/0x1d0 [ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25) [ 294.539746] ---[ end trace 8a7a880dee570b29 ]--- [ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt [ 294.556837] SMP: stopping secondary CPUs [ 294.563996] Kernel Offset: disabled [ 294.570515] CPU features: 0x002,21006008 [ 294.577638] Memory Limit: none [ 294.587178] Starting crashdump kernel... [ 294.594314] Bye! Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda in __srcu_read_lock(), it causes oops. Fix this by calling cleanup_srcu_struct() when the refcount is zero. Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove") Cc: [email protected] # 4.18 Signed-off-by: Yang Yingliang <[email protected]> Signed-off-by: Corey Minyard <[email protected]>
int xenvif_kthread_guest_rx(void *data) { struct xenvif *vif = data; struct sk_buff *skb; while (!kthread_should_stop()) { wait_event_interruptible(vif->wq, rx_work_todo(vif) || vif->disabled || kthread_should_stop()); /* This frontend is found to be rogue, disable it in * kthread context. Currently this is only set when * netback finds out frontend sends malformed packet, * but we cannot disable the interface in softirq * context so we defer it here. */ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) xenvif_carrier_off(vif); if (kthread_should_stop()) break; if (vif->rx_queue_purge) { skb_queue_purge(&vif->rx_queue); vif->rx_queue_purge = false; } if (!skb_queue_empty(&vif->rx_queue)) xenvif_rx_action(vif); if (skb_queue_empty(&vif->rx_queue) && netif_queue_stopped(vif->dev)) { del_timer_sync(&vif->wake_queue); xenvif_start_queue(vif); } cond_resched(); } /* Bin any remaining skbs */ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) dev_kfree_skb(skb); return 0; }
0
[ "CWE-399" ]
net-next
e9d8b2c2968499c1f96563e6522c56958d5a1d0d
214,191,357,812,921,370,000,000,000,000,000,000,000
46
xen-netback: disable rogue vif in kthread context When netback discovers frontend is sending malformed packet it will disables the interface which serves that frontend. However disabling a network interface involving taking a mutex which cannot be done in softirq context, so we need to defer this process to kthread context. This patch does the following: 1. introduce a flag to indicate the interface is disabled. 2. check that flag in TX path, don't do any work if it's true. 3. check that flag in RX path, turn off that interface if it's true. The reason to disable it in RX path is because RX uses kthread. After this change the behavior of netback is still consistent -- it won't do any TX work for a rogue frontend, and the interface will be eventually turned off. Also change a "continue" to "break" after xenvif_fatal_tx_err, as it doesn't make sense to continue processing packets if frontend is rogue. This is a fix for XSA-90. Reported-by: Török Edwin <[email protected]> Signed-off-by: Wei Liu <[email protected]> Cc: Ian Campbell <[email protected]> Reviewed-by: David Vrabel <[email protected]> Acked-by: Ian Campbell <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { struct netlink_sock *nlk; nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!*timeo) { if (!ssk || netlink_is_kernel(ssk)) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&nlk->wait, &wait); if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) *timeo = schedule_timeout(*timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(*timeo); } return 1; } skb_set_owner_r(skb, sk); return 0; }
0
[]
linux-2.6
16e5726269611b71c930054ffe9b858c1cea88eb
31,386,346,188,496,750,000,000,000,000,000,000,000
39
af_unix: dont send SCM_CREDENTIALS by default Since commit 7361c36c5224 (af_unix: Allow credentials to work across user and pid namespaces) af_unix performance dropped a lot. This is because we now take a reference on pid and cred in each write(), and release them in read(), usually done from another process, eventually from another cpu. This triggers false sharing. # Events: 154K cycles # # Overhead Command Shared Object Symbol # ........ ....... .................. ......................... # 10.40% hackbench [kernel.kallsyms] [k] put_pid 8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg 7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg 6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock 4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb 4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns 4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred 2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm 2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count 1.75% hackbench [kernel.kallsyms] [k] fget_light 1.51% hackbench [kernel.kallsyms] [k] __mutex_lock_interruptible_slowpath 1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb This patch includes SCM_CREDENTIALS information in a af_unix message/skb only if requested by the sender, [man 7 unix for details how to include ancillary data using sendmsg() system call] Note: This might break buggy applications that expected SCM_CREDENTIAL from an unaware write() system call, and receiver not using SO_PASSCRED socket option. If SOCK_PASSCRED is set on source or destination socket, we still include credentials for mere write() syscalls. Performance boost in hackbench : more than 50% gain on a 16 thread machine (2 quad-core cpus, 2 threads per core) hackbench 20 thread 2000 4.228 sec instead of 9.102 sec Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Tim Chen <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void net_ns_barrier(void) { down_write(&pernet_ops_rwsem); up_write(&pernet_ops_rwsem); }
0
[ "CWE-200", "CWE-190", "CWE-326" ]
linux
355b98553789b646ed97ad801a619ff898471b92
247,014,387,540,401,000,000,000,000,000,000,000,000
5
netns: provide pure entropy for net_hash_mix() net_hash_mix() currently uses kernel address of a struct net, and is used in many places that could be used to reveal this address to a patient attacker, thus defeating KASLR, for the typical case (initial net namespace, &init_net is not dynamically allocated) I believe the original implementation tried to avoid spending too many cycles in this function, but security comes first. Also provide entropy regardless of CONFIG_NET_NS. Fixes: 0b4419162aa6 ("netns: introduce the net_hash_mix "salt" for hashes") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Amit Klein <[email protected]> Reported-by: Benny Pinkas <[email protected]> Cc: Pavel Emelyanov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7.0*amount*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5.0*amount*previous[u].opacity/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3.0*amount*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); }
0
[ "CWE-125" ]
ImageMagick6
e2a21735e3a3f3930bd431585ec36334c4c2eb77
222,187,553,979,543,900,000,000,000,000,000,000,000
197
https://github.com/ImageMagick/ImageMagick/issues/1540
static void smack_file_free_security(struct file *file) { file->f_security = NULL; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
38,792,868,933,091,823,000,000,000,000,000,000,000
4
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
dir_inspect(VALUE dir) { struct dir_data *dirp; TypedData_Get_Struct(dir, struct dir_data, &dir_data_type, dirp); if (!NIL_P(dirp->path)) { VALUE str = rb_str_new_cstr("#<"); rb_str_append(str, rb_class_name(CLASS_OF(dir))); rb_str_cat2(str, ":"); rb_str_append(str, dirp->path); rb_str_cat2(str, ">"); return str; } return rb_funcall(dir, rb_intern("to_s"), 0, 0); }
0
[ "CWE-22" ]
ruby
143eb22f1877815dd802f7928959c5f93d4c7bb3
160,546,641,759,606,610,000,000,000,000,000,000,000
15
merge revision(s) 62989: dir.c: check NUL bytes * dir.c (GlobPathValue): should be used in rb_push_glob only. other methods should use FilePathValue. https://hackerone.com/reports/302338 * dir.c (rb_push_glob): expand GlobPathValue git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_2@63015 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound3) { addIndex(BSON("arr.k" << 1 << "arr.v" << 1), true); runQuery(fromjson( "{arr: {$all: [" "{$elemMatch: {k: 1, v: 1}}, {$elemMatch: {k: 2, v: 2}}, {$elemMatch: {k: 3, v: 3}}]}}")); assertNumSolutions(4U); assertSolutionExists("{cscan: {dir: 1}}"); assertSolutionExists( "{fetch: {filter: " "{$and:[{arr:{$elemMatch:{k:1,v:1}}}," "{arr:{$elemMatch:{k:2,v:2}}},{arr:{$elemMatch:{k:3,v:3}}}]}," "node: {ixscan: {pattern: {'arr.k': 1, 'arr.v': 1}, filter: null, " "bounds: {'arr.k': [[1,1,true,true]], 'arr.v': [[1,1,true,true]]}}}}}"); assertSolutionExists( "{fetch: {filter: " "{$and:[{arr:{$elemMatch:{k:2,v:2}}}," "{arr:{$elemMatch:{k:1,v:1}}},{arr:{$elemMatch:{k:3,v:3}}}]}," "node: {ixscan: {pattern: {'arr.k': 1, 'arr.v': 1}, filter: null, " "bounds: {'arr.k': [[2,2,true,true]], 'arr.v': [[2,2,true,true]]}}}}}"); assertSolutionExists( "{fetch: {filter: " "{$and:[{arr:{$elemMatch:{k:3,v:3}}}," "{arr:{$elemMatch:{k:1,v:1}}},{arr:{$elemMatch:{k:2,v:2}}}]}," "node: {ixscan: {pattern: {'arr.k': 1, 'arr.v': 1}, filter: null, " "bounds: {'arr.k': [[3,3,true,true]], 'arr.v': [[3,3,true,true]]}}}}}"); }
0
[ "CWE-834" ]
mongo
94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5
177,036,936,681,594,100,000,000,000,000,000,000,000
27
SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch
client_use_key_share(gnutls_session_t session, const gnutls_group_entry_st *group, const uint8_t * data, size_t data_size) { const gnutls_ecc_curve_entry_st *curve; int ret; if (group->pk == GNUTLS_PK_EC) { gnutls_pk_params_st pub; curve = _gnutls_ecc_curve_get_params(group->curve); gnutls_pk_params_init(&pub); if (session->key.kshare.ecdh_params.algo != group->pk || session->key.kshare.ecdh_params.curve != curve->id) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); if (curve->size*2+1 != data_size) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); /* read the server's public key */ ret = _gnutls_ecc_ansi_x962_import(data, data_size, &pub.params[ECC_X], &pub.params[ECC_Y]); if (ret < 0) return gnutls_assert_val(ret); pub.algo = group->pk; pub.curve = curve->id; pub.params_nr = 2; /* generate shared key */ ret = _gnutls_pk_derive_tls13(curve->pk, &session->key.key, &session->key.kshare.ecdh_params, &pub); gnutls_pk_params_release(&pub); if (ret < 0) { return gnutls_assert_val(ret); } ret = 0; } else if (group->pk == GNUTLS_PK_ECDH_X25519 || group->pk == GNUTLS_PK_ECDH_X448) { gnutls_pk_params_st pub; curve = _gnutls_ecc_curve_get_params(group->curve); if (session->key.kshare.ecdhx_params.algo != group->pk || session->key.kshare.ecdhx_params.curve != curve->id) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); if (curve->size != data_size) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); /* read the public key and generate shared */ gnutls_pk_params_init(&pub); pub.algo = group->pk; pub.curve = curve->id; pub.raw_pub.data = (void*)data; pub.raw_pub.size = data_size; /* We don't mask the MSB in the final byte as required * by RFC7748. This will be done internally by nettle 3.3 or later. */ ret = _gnutls_pk_derive_tls13(curve->pk, &session->key.key, &session->key.kshare.ecdhx_params, &pub); if (ret < 0) { return gnutls_assert_val(ret); } ret = 0; } else if (group->pk == GNUTLS_PK_DH) { gnutls_pk_params_st pub; if (session->key.kshare.dh_params.algo != group->pk || session->key.kshare.dh_params.dh_group != group->id) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); if (data_size != group->prime->size) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); /* read the public key and generate shared */ gnutls_pk_params_init(&pub); ret = _gnutls_mpi_init_scan_nz(&pub.params[DH_Y], data, data_size); if (ret < 0) return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); pub.algo = group->pk; /* generate shared key */ ret = _gnutls_pk_derive_tls13(GNUTLS_PK_DH, &session->key.key, &session->key.kshare.dh_params, &pub); _gnutls_mpi_release(&pub.params[DH_Y]); if (ret < 0) return gnutls_assert_val(ret); ret = 0; } else { return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER); } _gnutls_debug_log("EXT[%p]: client generated %s shared key\n", session, group->name); return ret; }
0
[ "CWE-416" ]
gnutls
15beb4b193b2714d88107e7dffca781798684e7e
127,004,582,875,150,040,000,000,000,000,000,000,000
104
key_share: avoid use-after-free around realloc Signed-off-by: Daiki Ueno <[email protected]>
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int err; bool in_sack; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len >= skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); if (err < 0) return err; } return in_sack; }
0
[ "CWE-703", "CWE-189" ]
linux
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
323,497,569,851,616,200,000,000,000,000,000,000,000
45
tcp: fix zero cwnd in tcp_cwnd_reduction Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") introduced a bug that cwnd may become 0 when both inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead to a div-by-zero if the connection starts another cwnd reduction phase by setting tp->prior_cwnd to the current cwnd (0) in tcp_init_cwnd_reduction(). To prevent this we skip PRR operation when nothing is acked or sacked. Then cwnd must be positive in all cases as long as ssthresh is positive: 1) The proportional reduction mode inflight > ssthresh > 0 2) The reduction bound mode a) inflight == ssthresh > 0 b) inflight < ssthresh sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh Therefore in all cases inflight and sndcnt can not both be 0. We check invalid tp->prior_cwnd to avoid potential div0 bugs. In reality this bug is triggered only with a sequence of less common events. For example, the connection is terminating an ECN-triggered cwnd reduction with an inflight 0, then it receives reordered/old ACKs or DSACKs from prior transmission (which acks nothing). Or the connection is in fast recovery stage that marks everything lost, but fails to retransmit due to local issues, then receives data packets from other end which acks nothing. Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") Reported-by: Oleksandr Natalenko <[email protected]> Signed-off-by: Yuchung Cheng <[email protected]> Signed-off-by: Neal Cardwell <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void holtekff_send(struct holtekff_device *holtekff, struct hid_device *hid, const u8 data[HOLTEKFF_MSG_LENGTH]) { int i; for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) { holtekff->field->value[i] = data[i]; } dbg_hid("sending %7ph\n", data); hid_hw_request(hid, holtekff->field->report, HID_REQ_SET_REPORT); }
0
[ "CWE-787" ]
linux
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
206,267,545,337,159,570,000,000,000,000,000,000,000
14
HID: Fix assumption that devices have inputs The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff driver. The problem is caused by the driver's assumption that the device must have an input report. While this will be true for all normal HID input devices, a suitably malicious device can violate the assumption. The same assumption is present in over a dozen other HID drivers. This patch fixes them by checking that the list of hid_inputs for the hid_device is nonempty before allowing it to be used. Reported-and-tested-by: [email protected] Signed-off-by: Alan Stern <[email protected]> CC: <[email protected]> Signed-off-by: Benjamin Tissoires <[email protected]>
QStringList ServerView::mimeTypes() const { QStringList qsl; qsl << QStringList(QLatin1String("text/uri-list")); qsl << QStringList(QLatin1String("text/plain")); return qsl; }
0
[ "CWE-59", "CWE-61" ]
mumble
e59ee87abe249f345908c7d568f6879d16bfd648
129,053,330,503,629,290,000,000,000,000,000,000,000
6
FIX(client): Only allow "http"/"https" for URLs in ConnectDialog Our public server list registration script doesn't have an URL scheme whitelist for the website field. Turns out a malicious server can register itself with a dangerous URL in an attempt to attack a user's machine. User interaction is required, as the URL has to be opened by right-clicking on the server entry and clicking on "Open Webpage". This commit introduces a client-side whitelist, which only allows "http" and "https" schemes. We will also implement it in our public list. In future we should probably add a warning QMessageBox informing the user that there's no guarantee the URL is safe (regardless of the scheme). Thanks a lot to https://positive.security for reporting the RCE vulnerability to us privately.
connection_ap_handshake_rewrite(entry_connection_t *conn, rewrite_result_t *out) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); tor_addr_t addr_tmp; /* Initialize all the fields of 'out' to reasonable defaults */ out->automap = 0; out->exit_source = ADDRMAPSRC_NONE; out->map_expires = TIME_MAX; out->end_reason = 0; out->should_close = 0; out->orig_address[0] = 0; /* We convert all incoming addresses to lowercase. */ tor_strlower(socks->address); /* Remember the original address. */ strlcpy(out->orig_address, socks->address, sizeof(out->orig_address)); log_debug(LD_APP,"Client asked for %s:%d", safe_str_client(socks->address), socks->port); /* Check for whether this is a .exit address. By default, those are * disallowed when they're coming straight from the client, but you're * allowed to have them in MapAddress commands and so forth. */ if (!strcmpend(socks->address, ".exit")) { static ratelim_t exit_warning_limit = RATELIM_INIT(60*15); log_fn_ratelim(&exit_warning_limit, LOG_WARN, LD_APP, "The \".exit\" notation is disabled in Tor due to " "security risks."); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); out->end_reason = END_STREAM_REASON_TORPROTOCOL; out->should_close = 1; return; } /* Remember the original address so we can tell the user about what * they actually said, not just what it turned into. */ /* XXX yes, this is the same as out->orig_address above. One is * in the output, and one is in the connection. */ if (! conn->original_dest_address) { /* Is the 'if' necessary here? XXXX */ conn->original_dest_address = tor_strdup(conn->socks_request->address); } /* First, apply MapAddress and MAPADDRESS mappings. We need to do * these only for non-reverse lookups, since they don't exist for those. * We also need to do this before we consider automapping, since we might * e.g. resolve irc.oftc.net into irconionaddress.onion, at which point * we'd need to automap it. */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR) { const unsigned rewrite_flags = AMR_FLAG_USE_MAPADDRESS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &out->exit_source)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } } /* Now see if we need to create or return an existing Hostname->IP * automapping. Automapping happens when we're asked to resolve a * hostname, and AutomapHostsOnResolve is set, and the hostname has a * suffix listed in AutomapHostsSuffixes. It's a handy feature * that lets you have Tor assign e.g. IPv6 addresses for .onion * names, and return them safely from DNSPort. */ if (socks->command == SOCKS_COMMAND_RESOLVE && tor_addr_parse(&addr_tmp, socks->address)<0 && options->AutomapHostsOnResolve) { /* Check the suffix... */ out->automap = addressmap_address_should_automap(socks->address, options); if (out->automap) { /* If we get here, then we should apply an automapping for this. */ const char *new_addr; /* We return an IPv4 address by default, or an IPv6 address if we * are allowed to do so. */ int addr_type = RESOLVED_TYPE_IPV4; if (conn->socks_request->socks_version != 4) { if (!conn->entry_cfg.ipv4_traffic || (conn->entry_cfg.ipv6_traffic && conn->entry_cfg.prefer_ipv6) || conn->entry_cfg.prefer_ipv6_virtaddr) addr_type = RESOLVED_TYPE_IPV6; } /* Okay, register the target address as automapped, and find the new * address we're supposed to give as a resolve answer. (Return a cached * value if we've looked up this address before. */ new_addr = addressmap_register_virtual_address( addr_type, tor_strdup(socks->address)); if (! new_addr) { log_warn(LD_APP, "Unable to automap address %s", escaped_safe_str(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } log_info(LD_APP, "Automapping %s to %s", escaped_safe_str_client(socks->address), safe_str_client(new_addr)); strlcpy(socks->address, new_addr, sizeof(socks->address)); } } /* Now handle reverse lookups, if they're in the cache. This doesn't * happen too often, since client-side DNS caching is off by default, * and very deprecated. */ if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { unsigned rewrite_flags = 0; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite_reverse(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires)) { char *result = tor_strdup(socks->address); /* remember _what_ is supposed to have been resolved. */ tor_snprintf(socks->address, sizeof(socks->address), "REVERSE[%s]", out->orig_address); connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_HOSTNAME, strlen(result), (uint8_t*)result, -1, out->map_expires); tor_free(result); out->end_reason = END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } /* Hang on, did we find an answer saying that this is a reverse lookup for * an internal address? If so, we should reject it if we're configured to * do so. */ if (options->ClientDNSRejectInternalAddresses) { /* Don't let clients try to do a reverse lookup on 10.0.0.1. */ tor_addr_t addr; int ok; ok = tor_addr_parse_PTR_name( &addr, socks->address, AF_UNSPEC, 1); if (ok == 1 && tor_addr_is_internal(&addr, 0)) { connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR, 0, NULL, -1, TIME_MAX); out->end_reason = END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } } } /* If we didn't automap it before, then this is still the address that * came straight from the user, mapped according to any * MapAddress/MAPADDRESS commands. Now apply other mappings, * including previously registered Automap entries (IP back to * hostname), TrackHostExits entries, and client-side DNS cache * entries (if they're turned on). */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR && !out->automap) { unsigned rewrite_flags = AMR_FLAG_USE_AUTOMAP | AMR_FLAG_USE_TRACKEXIT; addressmap_entry_source_t exit_source2; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &exit_source2)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } if (out->exit_source == ADDRMAPSRC_NONE) { /* If it wasn't a .exit before, maybe it turned into a .exit. Remember * the original source of a .exit. */ out->exit_source = exit_source2; } } /* Check to see whether we're about to use an address in the virtual * range without actually having gotten it from an Automap. */ if (!out->automap && address_is_in_virtual_range(socks->address)) { /* This address was probably handed out by * client_dns_get_unmapped_address, but the mapping was discarded for some * reason. Or the user typed in a virtual address range manually. We * *don't* want to send the address through Tor; that's likely to fail, * and may leak information. */ log_warn(LD_APP,"Missing mapping for virtual address '%s'. Refusing.", safe_str_client(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } }
0
[ "CWE-532" ]
tor
80c404c4b79f3bcba3fc4585d4c62a62a04f3ed9
703,422,749,735,528,400,000,000,000,000,000,000
195
Log warning when connecting to soon-to-be-deprecated v2 onions.
process_triblt(STREAM s, TRIBLT_ORDER * os, uint32 present, RD_BOOL delta) { RD_HBITMAP bitmap; BRUSH brush; if (present & 0x000001) { in_uint8(s, os->cache_id); in_uint8(s, os->colour_table); } if (present & 0x000002) rdp_in_coord(s, &os->x, delta); if (present & 0x000004) rdp_in_coord(s, &os->y, delta); if (present & 0x000008) rdp_in_coord(s, &os->cx, delta); if (present & 0x000010) rdp_in_coord(s, &os->cy, delta); if (present & 0x000020) in_uint8(s, os->opcode); if (present & 0x000040) rdp_in_coord(s, &os->srcx, delta); if (present & 0x000080) rdp_in_coord(s, &os->srcy, delta); if (present & 0x000100) rdp_in_colour(s, &os->bgcolour); if (present & 0x000200) rdp_in_colour(s, &os->fgcolour); rdp_parse_brush(s, &os->brush, present >> 10); if (present & 0x008000) in_uint16_le(s, os->cache_idx); if (present & 0x010000) in_uint16_le(s, os->unknown); DEBUG(("TRIBLT(op=0x%x,x=%d,y=%d,cx=%d,cy=%d,id=%d,idx=%d,bs=%d,bg=0x%x,fg=0x%x)\n", os->opcode, os->x, os->y, os->cx, os->cy, os->cache_id, os->cache_idx, os->brush.style, os->bgcolour, os->fgcolour)); bitmap = cache_get_bitmap(os->cache_id, os->cache_idx); if (bitmap == NULL) return; setup_brush(&brush, &os->brush); ui_triblt(os->opcode, os->x, os->y, os->cx, os->cy, bitmap, os->srcx, os->srcy, &brush, os->bgcolour, os->fgcolour); }
0
[ "CWE-787" ]
rdesktop
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
313,171,441,162,403,900,000,000,000,000,000,000,000
59
Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182
xmlSchemaFormatNodeForError(xmlChar ** msg, xmlSchemaAbstractCtxtPtr actxt, xmlNodePtr node) { xmlChar *str = NULL; *msg = NULL; if ((node != NULL) && (node->type != XML_ELEMENT_NODE) && (node->type != XML_ATTRIBUTE_NODE)) { /* * Don't try to format other nodes than element and * attribute nodes. * Play save and return an empty string. */ *msg = xmlStrdup(BAD_CAST ""); return(*msg); } if (node != NULL) { /* * Work on tree nodes. */ if (node->type == XML_ATTRIBUTE_NODE) { xmlNodePtr elem = node->parent; *msg = xmlStrdup(BAD_CAST "Element '"); if (elem->ns != NULL) *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, elem->ns->href, elem->name)); else *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, NULL, elem->name)); FREE_AND_NULL(str); *msg = xmlStrcat(*msg, BAD_CAST "', "); *msg = xmlStrcat(*msg, BAD_CAST "attribute '"); } else { *msg = xmlStrdup(BAD_CAST "Element '"); } if (node->ns != NULL) *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, node->ns->href, node->name)); else *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, NULL, node->name)); FREE_AND_NULL(str); *msg = xmlStrcat(*msg, BAD_CAST "': "); } else if (actxt->type == XML_SCHEMA_CTXT_VALIDATOR) { xmlSchemaValidCtxtPtr vctxt = (xmlSchemaValidCtxtPtr) actxt; /* * Work on node infos. */ if (vctxt->inode->nodeType == XML_ATTRIBUTE_NODE) { xmlSchemaNodeInfoPtr ielem = vctxt->elemInfos[vctxt->depth]; *msg = xmlStrdup(BAD_CAST "Element '"); *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, ielem->nsName, ielem->localName)); FREE_AND_NULL(str); *msg = xmlStrcat(*msg, BAD_CAST "', "); *msg = xmlStrcat(*msg, BAD_CAST "attribute '"); } else { *msg = xmlStrdup(BAD_CAST "Element '"); } *msg = xmlStrcat(*msg, xmlSchemaFormatQName(&str, vctxt->inode->nsName, vctxt->inode->localName)); FREE_AND_NULL(str); *msg = xmlStrcat(*msg, BAD_CAST "': "); } else if (actxt->type == XML_SCHEMA_CTXT_PARSER) { /* * Hmm, no node while parsing? * Return an empty string, in case NULL will break something. */ *msg = xmlStrdup(BAD_CAST ""); } else { TODO return (NULL); } /* * VAL TODO: The output of the given schema component is currently * disabled. */ #if 0 if ((type != NULL) && (xmlSchemaIsGlobalItem(type))) { *msg = xmlStrcat(*msg, BAD_CAST " ["); *msg = xmlStrcat(*msg, xmlSchemaFormatItemForReport(&str, NULL, type, NULL, 0)); FREE_AND_NULL(str) *msg = xmlStrcat(*msg, BAD_CAST "]"); } #endif return (*msg); }
1
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
232,451,949,434,367,400,000,000,000,000,000,000,000
94
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
static int SN_Client_HandlePacket(MqttClient* client, SN_MsgType packet_type, void* packet_obj, int timeout) { int rc = MQTT_CODE_SUCCESS; word16 packet_id = 0; (void)timeout; switch ((int)packet_type) { case SN_MSG_TYPE_GWINFO: { SN_GwInfo info, *p_info = &info; if (packet_obj) { p_info = (SN_GwInfo*)packet_obj; } else { XMEMSET(p_info, 0, sizeof(SN_GwInfo)); } rc = SN_Decode_GWInfo(client->rx_buf, client->packet.buf_len, p_info); if (rc <= 0) { return rc; } break; } case SN_MSG_TYPE_CONNACK: { /* Decode connect ack */ SN_ConnectAck connect_ack, *p_connect_ack = &connect_ack; if (packet_obj) { p_connect_ack = (SN_ConnectAck*)packet_obj; } else { XMEMSET(p_connect_ack, 0, sizeof(SN_ConnectAck)); } p_connect_ack->return_code = client->rx_buf[client->packet.buf_len-1]; break; } case SN_MSG_TYPE_WILLTOPICREQ: { rc = SN_Decode_WillTopicReq(client->rx_buf, client->packet.buf_len); break; } case SN_MSG_TYPE_WILLMSGREQ: { rc = SN_Decode_WillMsgReq(client->rx_buf, client->packet.buf_len); break; } case SN_MSG_TYPE_REGISTER: { /* Decode register */ SN_Register reg_s; int len; XMEMSET(&reg_s, 0, sizeof(SN_Register)); rc = SN_Decode_Register(client->rx_buf, client->packet.buf_len, &reg_s); if (rc > 0) { /* Initialize the regack */ reg_s.regack.packet_id = reg_s.packet_id; reg_s.regack.topicId = reg_s.topicId; reg_s.regack.return_code = SN_RC_NOTSUPPORTED; /* Call the register callback to allow app to handle new topic ID assignment. */ if (client->reg_cb != NULL) { rc = client->reg_cb(reg_s.topicId, reg_s.topicName, client->reg_ctx); /* Set the regack return code */ reg_s.regack.return_code = (rc >= 0) ? SN_RC_ACCEPTED : SN_RC_INVTOPICNAME; } #ifdef WOLFMQTT_MULTITHREAD /* Lock send socket mutex */ rc = wm_SemLock(&client->lockSend); if (rc != 0) { return rc; } #endif /* Encode the register acknowledgment */ rc = SN_Encode_RegAck(client->tx_buf, client->tx_buf_len, &reg_s.regack); #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d), ID %d", rc, SN_Packet_TypeDesc(SN_MSG_TYPE_REGACK), SN_MSG_TYPE_REGACK, reg_s.packet_id); #endif if (rc <= 0) { #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif return rc; } len = rc; /* Send regack packet */ rc = MqttPacket_Write(client, client->tx_buf, len); #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif if (rc != len) { return rc; } } break; } case SN_MSG_TYPE_REGACK: { /* Decode register ack */ SN_RegAck regack_s, *p_regack = &regack_s; if (packet_obj) { p_regack = (SN_RegAck*)packet_obj; } else { XMEMSET(p_regack, 0, sizeof(SN_RegAck)); } rc = SN_Decode_RegAck(client->rx_buf, client->packet.buf_len, p_regack); if (rc > 0) { packet_id = p_regack->packet_id; } break; } case SN_MSG_TYPE_PUBLISH: { SN_Publish pub, *p_pub = &pub; if (packet_obj) { p_pub = (SN_Publish*)packet_obj; } else { XMEMSET(p_pub, 0, sizeof(SN_Publish)); } /* Decode publish message */ rc = SN_Decode_Publish(client->rx_buf, client->packet.buf_len, p_pub); if (rc <= 0) { return rc; } /* Issue callback for new message */ if (client->msg_cb) { /* if using the temp publish message buffer, then populate message context with client context */ if (&client->msgSN.publish == p_pub) p_pub->ctx = client->ctx; rc = client->msg_cb(client, (MqttMessage*)p_pub, 1, 1); if (rc != MQTT_CODE_SUCCESS) { return rc; }; } /* Handle Qos */ if (p_pub->qos > MQTT_QOS_0) { SN_MsgType type; packet_id = p_pub->packet_id; /* Determine packet type to write */ type = (p_pub->qos == MQTT_QOS_1) ? SN_MSG_TYPE_PUBACK : SN_MSG_TYPE_PUBREC; p_pub->resp.packet_id = packet_id; #ifdef WOLFMQTT_MULTITHREAD /* Lock send socket mutex */ rc = wm_SemLock(&client->lockSend); if (rc != 0) { return rc; } #endif /* Encode publish response */ rc = SN_Encode_PublishResp(client->tx_buf, client->tx_buf_len, type, &p_pub->resp); #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d), ID %d," " QoS %d", rc, SN_Packet_TypeDesc(type), type, packet_id, p_pub->qos); #endif if (rc <= 0) { #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif return rc; } client->packet.buf_len = rc; /* Send packet */ rc = MqttPacket_Write(client, client->tx_buf, client->packet.buf_len); #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif } break; } case SN_MSG_TYPE_PUBACK: case SN_MSG_TYPE_PUBCOMP: case SN_MSG_TYPE_PUBREC: case SN_MSG_TYPE_PUBREL: { SN_PublishResp publish_resp; XMEMSET(&publish_resp, 0, sizeof(SN_PublishResp)); /* Decode publish response message */ rc = SN_Decode_PublishResp(client->rx_buf, client->packet.buf_len, packet_type, &publish_resp); if (rc <= 0) { return rc; } packet_id = publish_resp.packet_id; /* If Qos then send response */ if (packet_type == SN_MSG_TYPE_PUBREC || packet_type == SN_MSG_TYPE_PUBREL) { byte resp_type = (packet_type == SN_MSG_TYPE_PUBREC) ? SN_MSG_TYPE_PUBREL : SN_MSG_TYPE_PUBCOMP; #ifdef WOLFMQTT_MULTITHREAD /* Lock send socket mutex */ rc = wm_SemLock(&client->lockSend); if (rc != 0) { return rc; } #endif /* Encode publish response */ publish_resp.packet_id = packet_id; rc = SN_Encode_PublishResp(client->tx_buf, client->tx_buf_len, resp_type, &publish_resp); #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d), ID %d", rc, MqttPacket_TypeDesc(resp_type), resp_type, packet_id); #endif if (rc <= 0) { #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif return rc; } client->packet.buf_len = rc; /* Send packet */ rc = MqttPacket_Write(client, client->tx_buf, client->packet.buf_len); #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif } break; } case SN_MSG_TYPE_SUBACK: { /* Decode subscribe ack */ SN_SubAck subscribe_ack, *p_subscribe_ack = &subscribe_ack; if (packet_obj) { p_subscribe_ack = (SN_SubAck*)packet_obj; } else { XMEMSET(p_subscribe_ack, 0, sizeof(SN_SubAck)); } rc = SN_Decode_SubscribeAck(client->rx_buf, client->packet.buf_len, p_subscribe_ack); if (rc <= 0) { return rc; } packet_id = p_subscribe_ack->packet_id; break; } case SN_MSG_TYPE_UNSUBACK: { /* Decode unsubscribe ack */ SN_UnsubscribeAck unsubscribe_ack, *p_unsubscribe_ack = &unsubscribe_ack; if (packet_obj) { p_unsubscribe_ack = (SN_UnsubscribeAck*)packet_obj; } else { XMEMSET(p_unsubscribe_ack, 0, sizeof(SN_UnsubscribeAck)); } rc = SN_Decode_UnsubscribeAck(client->rx_buf, client->packet.buf_len, p_unsubscribe_ack); if (rc <= 0) { return rc; } packet_id = p_unsubscribe_ack->packet_id; break; } case SN_MSG_TYPE_PING_RESP: { /* Decode ping */ rc = SN_Decode_Ping(client->rx_buf, client->packet.buf_len); break; } case SN_MSG_TYPE_PING_REQ: { int len; /* Decode ping */ rc = SN_Decode_Ping(client->rx_buf, client->packet.buf_len); if (rc <= 0) { return rc; } #ifdef WOLFMQTT_MULTITHREAD /* Lock send socket mutex */ rc = wm_SemLock(&client->lockSend); if (rc != 0) { return rc; } #endif /* Encode the ping packet as a response */ rc = SN_Encode_Ping(client->tx_buf, client->tx_buf_len, NULL, SN_MSG_TYPE_PING_RESP); #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d)", rc, SN_Packet_TypeDesc(SN_MSG_TYPE_PING_RESP), SN_MSG_TYPE_PING_RESP); #endif if (rc <= 0) { #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif return rc; } len = rc; /* Send ping resp packet */ rc = MqttPacket_Write(client, client->tx_buf, len); #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif if (rc != len) { return rc; } break; } case SN_MSG_TYPE_WILLTOPICRESP: { /* Decode Will Topic Response */ SN_WillTopicResp resp_s, *resp = &resp_s; if (packet_obj) { resp = (SN_WillTopicResp*)packet_obj; } else { XMEMSET(resp, 0, sizeof(SN_WillTopicResp)); } rc = SN_Decode_WillTopicResponse(client->rx_buf, client->packet.buf_len, &resp->return_code); break; } case SN_MSG_TYPE_WILLMSGRESP: { /* Decode Will Message Response */ SN_WillMsgResp resp_s, *resp = &resp_s; if (packet_obj) { resp = (SN_WillMsgResp*)packet_obj; } else { XMEMSET(resp, 0, sizeof(SN_WillMsgResp)); } rc = SN_Decode_WillMsgResponse(client->rx_buf, client->packet.buf_len, &resp->return_code); break; } case SN_MSG_TYPE_DISCONNECT: { /* Decode Disconnect */ rc = SN_Decode_Disconnect(client->rx_buf, client->packet.buf_len); break; } default: { /* Other types are server side only, ignore */ #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("SN_Client_HandlePacket: Invalid client packet type %u!", packet_type); #endif break; } } /* switch (packet_type) */ (void)packet_id; return rc; }
0
[ "CWE-787" ]
wolfMQTT
84d4b53122e0fa0280c7872350b89d5777dabbb2
134,841,339,718,711,300,000,000,000,000,000,000,000
400
Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free
static void io_req_task_queue(struct io_kiocb *req) { int ret; init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs); ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk; init_task_work(&req->task_work, io_req_task_cancel); tsk = io_wq_get_task(req->ctx->io_wq); task_work_add(tsk, &req->task_work, 0); wake_up_process(tsk); } }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
18,785,984,705,621,867,000,000,000,000,000,000,000
17
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) { struct kvm_s390_vm_tod_clock gtod; if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) return -EINVAL; kvm_s390_set_tod_clock(kvm, &gtod); VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", gtod.epoch_idx, gtod.tod); return 0; }
0
[ "CWE-416" ]
linux
0774a964ef561b7170d8d1b1bfe6f88002b6d219
151,563,610,059,494,700,000,000,000,000,000,000,000
16
KVM: Fix out of range accesses to memslots Reset the LRU slot if it becomes invalid when deleting a memslot to fix an out-of-bounds/use-after-free access when searching through memslots. Explicitly check for there being no used slots in search_memslots(), and in the caller of s390's approximation variant. Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots") Reported-by: Qian Cai <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Acked-by: Christian Borntraeger <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static int l2cap_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } switch (chan->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; chan->state = BT_LISTEN; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; }
0
[ "CWE-200" ]
linux
792039c73cf176c8e39a6e8beef2c94ff46522ed
147,804,009,172,410,110,000,000,000,000,000,000,000
43
Bluetooth: L2CAP - Fix info leak via getsockname() The L2CAP code fails to initialize the l2_bdaddr_type member of struct sockaddr_l2 and the padding byte added for alignment. It that for leaks two bytes kernel stack via the getsockname() syscall. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Cc: Marcel Holtmann <[email protected]> Cc: Gustavo Padovan <[email protected]> Cc: Johan Hedberg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
zfont_get_to_unicode_map(gs_font_dir *dir) { const gs_unicode_decoder *pud = (gs_unicode_decoder *)dir->glyph_to_unicode_table; return (pud == NULL ? NULL : &pud->data); }
0
[ "CWE-704" ]
ghostpdl
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
262,895,240,972,604,000,000,000,000,000,000,000,000
6
PS interpreter - add some type checking These were 'probably' safe anyway, since they mostly treat the objects as integers without checking, which at least can't result in a crash. Nevertheless, we ought to check. The return from comparedictkeys could be wrong if one of the keys had a value which was not an array, it could incorrectly decide the two were in fact the same.
static int network_stats_read (void) /* {{{ */ { derive_t copy_octets_rx; derive_t copy_octets_tx; derive_t copy_packets_rx; derive_t copy_packets_tx; derive_t copy_values_dispatched; derive_t copy_values_not_dispatched; derive_t copy_values_sent; derive_t copy_values_not_sent; derive_t copy_receive_list_length; value_list_t vl = VALUE_LIST_INIT; value_t values[2]; copy_octets_rx = stats_octets_rx; copy_octets_tx = stats_octets_tx; copy_packets_rx = stats_packets_rx; copy_packets_tx = stats_packets_tx; copy_values_dispatched = stats_values_dispatched; copy_values_not_dispatched = stats_values_not_dispatched; copy_values_sent = stats_values_sent; copy_values_not_sent = stats_values_not_sent; copy_receive_list_length = receive_list_length; /* Initialize `vl' */ vl.values = values; vl.values_len = 2; vl.time = 0; sstrncpy (vl.host, hostname_g, sizeof (vl.host)); sstrncpy (vl.plugin, "network", sizeof (vl.plugin)); /* Octets received / sent */ vl.values[0].derive = (derive_t) copy_octets_rx; vl.values[1].derive = (derive_t) copy_octets_tx; sstrncpy (vl.type, "if_octets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Packets received / send */ vl.values[0].derive = (derive_t) copy_packets_rx; vl.values[1].derive = (derive_t) copy_packets_tx; sstrncpy (vl.type, "if_packets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Values (not) dispatched and (not) send */ sstrncpy (vl.type, "total_values", sizeof (vl.type)); vl.values_len = 1; vl.values[0].derive = (derive_t) copy_values_dispatched; sstrncpy (vl.type_instance, "dispatch-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_dispatched; sstrncpy (vl.type_instance, "dispatch-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_sent; sstrncpy (vl.type_instance, "send-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_sent; sstrncpy (vl.type_instance, "send-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); /* Receive queue length */ vl.values[0].gauge = (gauge_t) copy_receive_list_length; sstrncpy (vl.type, "queue_length", sizeof (vl.type)); vl.type_instance[0] = 0; plugin_dispatch_values (&vl); return (0); } /* }}} int network_stats_read */
0
[ "CWE-119", "CWE-787" ]
collectd
b589096f907052b3a4da2b9ccc9b0e2e888dfc18
230,859,391,336,085,000,000,000,000,000,000,000,000
75
network plugin: Fix heap overflow in parse_packet(). Emilien Gaspar has identified a heap overflow in parse_packet(), the function used by the network plugin to parse incoming network packets. This is a vulnerability in collectd, though the scope is not clear at this point. At the very least specially crafted network packets can be used to crash the daemon. We can't rule out a potential remote code execution though. Fixes: CVE-2016-6254
irc_server_set_buffer_title (struct t_irc_server *server) { char *title; int length; if (server && server->buffer) { if (server->is_connected) { length = 16 + ((server->current_address) ? strlen (server->current_address) : 16) + 16 + ((server->current_ip) ? strlen (server->current_ip) : 16) + 1; title = malloc (length); if (title) { snprintf (title, length, "IRC: %s/%d (%s)", server->current_address, server->current_port, (server->current_ip) ? server->current_ip : ""); weechat_buffer_set (server->buffer, "title", title); free (title); } } else { weechat_buffer_set (server->buffer, "title", ""); } } }
0
[ "CWE-120", "CWE-787" ]
weechat
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
82,939,458,564,927,440,000,000,000,000,000,000,000
29
irc: fix crash when a new message 005 is received with longer nick prefixes Thanks to Stuart Nevans Locke for reporting the issue.
FloydSteinbergInitG(gx_device_printer * pdev) { int i; gx_device_bjc_printer *dev = (gx_device_bjc_printer *)pdev; dev->FloydSteinbergErrorsG = (int *) gs_alloc_bytes(pdev->memory, sizeof(int)*(pdev->width+3), "bjc error buffer"); if (dev->FloydSteinbergErrorsG == 0) /* can't allocate error buffer */ return -1; dev->FloydSteinbergDirectionForward=true; for (i=0; i < pdev->width+3; i++) dev->FloydSteinbergErrorsG[i] = 0; /* clear */ bjc_rgb_to_gray(dev->paperColor.red, dev->paperColor.green, dev->paperColor.blue, &dev->FloydSteinbergG); dev->FloydSteinbergG = (255 - dev->FloydSteinbergG) << 4; /* Maybe */ bjc_init_tresh(dev, dev->rnd); return 0; }
0
[ "CWE-787" ]
ghostpdl
bf72f1a3dd5392ee8291e3b1518a0c2c5dc6ba39
204,168,419,588,655,100,000,000,000,000,000,000,000
22
Fix valgrind problems with gdevbjca.c 2 problems here. Firstly, we could access off the end of a row while looking for runs. Change the indexing to fix this. Secondly, we could overrun our gamma tables due to unexpectedly large values. Add some clamping.
void LibRaw::parse_exif(int base) { unsigned entries, tag, type, len, save, c; double expo, ape; unsigned kodak = !strncmp(make, "EASTMAN", 7) && tiff_nifds < 3; entries = get2(); if (!strncmp(make, "Hasselblad", 10) && (tiff_nifds > 3) && (entries > 512)) return; INT64 fsize = ifp->size(); while (entries--) { tiff_get(base, &tag, &type, &len, &save); INT64 savepos = ftell(ifp); if (len > 8 && savepos + len > fsize * 2) { fseek(ifp, save, SEEK_SET); // Recover tiff-read position!! continue; } if (callbacks.exif_cb) { callbacks.exif_cb(callbacks.exifparser_data, tag, type, len, order, ifp, base); fseek(ifp, savepos, SEEK_SET); } switch (tag) { case 0xA005: // Interoperability IFD fseek(ifp, get4() + base, SEEK_SET); parse_exif_interop(base); break; case 0xA001: // ExifIFD.ColorSpace c = get2(); if (c == 1 && imgdata.color.ExifColorSpace == LIBRAW_COLORSPACE_Unknown) imgdata.color.ExifColorSpace = LIBRAW_COLORSPACE_sRGB; else if (c == 2) imgdata.color.ExifColorSpace = LIBRAW_COLORSPACE_AdobeRGB; break; case 0x9400: imCommon.exifAmbientTemperature = getreal(type); if ((imCommon.CameraTemperature > -273.15f) && ((OlyID == OlyID_TG_5) || (OlyID == OlyID_TG_6)) ) imCommon.CameraTemperature += imCommon.exifAmbientTemperature; break; case 0x9401: imCommon.exifHumidity = getreal(type); break; case 0x9402: imCommon.exifPressure = getreal(type); break; case 0x9403: imCommon.exifWaterDepth = getreal(type); break; case 0x9404: imCommon.exifAcceleration = getreal(type); break; case 0x9405: imCommon.exifCameraElevationAngle = getreal(type); break; case 0xa405: // FocalLengthIn35mmFormat imgdata.lens.FocalLengthIn35mmFormat = get2(); break; case 0xa431: // BodySerialNumber stmread(imgdata.shootinginfo.BodySerial, len, ifp); break; case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard imgdata.lens.MinFocal = getreal(type); imgdata.lens.MaxFocal = getreal(type); imgdata.lens.MaxAp4MinFocal = getreal(type); imgdata.lens.MaxAp4MaxFocal = getreal(type); break; case 0xa435: // LensSerialNumber stmread(imgdata.lens.LensSerial, len, ifp); if (!strncmp(imgdata.lens.LensSerial, "----", 4)) imgdata.lens.LensSerial[0] = '\0'; break; case 0xa420: /* 42016, ImageUniqueID */ stmread(imgdata.color.ImageUniqueID, len, ifp); break; case 0xc65d: /* 50781, RawDataUniqueID */ imgdata.color.RawDataUniqueID[16] = 0; fread(imgdata.color.RawDataUniqueID, 1, 16, ifp); break; case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard imgdata.lens.dng.MinFocal = getreal(type); imgdata.lens.dng.MaxFocal = getreal(type); imgdata.lens.dng.MaxAp4MinFocal = getreal(type); imgdata.lens.dng.MaxAp4MaxFocal = getreal(type); break; case 0xc68b: /* 50827, OriginalRawFileName */ stmread(imgdata.color.OriginalRawFileName, len, ifp); break; case 0xa433: // LensMake stmread(imgdata.lens.LensMake, len, ifp); break; case 0xa434: // LensModel stmread(imgdata.lens.Lens, len, ifp); if (!strncmp(imgdata.lens.Lens, "----", 4)) imgdata.lens.Lens[0] = '\0'; break; case 0x9205: imgdata.lens.EXIF_MaxAp = libraw_powf64l(2.0f, (getreal(type) / 2.0f)); break; case 0x829a: // 33434 shutter = getreal(type); if (tiff_nifds > 0 && tiff_nifds <= LIBRAW_IFD_MAXCOUNT) tiff_ifd[tiff_nifds - 1].t_shutter = shutter; break; case 0x829d: // 33437, FNumber aperture = getreal(type); break; case 0x8827: // 34855 iso_speed = get2(); break; case 0x8831: // 34865 if (iso_speed == 0xffff && !strncasecmp(make, "FUJI", 4)) iso_speed = getreal(type); break; case 0x8832: // 34866 if (iso_speed == 0xffff && (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "CANON", 5))) iso_speed = getreal(type); break; case 0x9003: // 36867 case 0x9004: // 36868 get_timestamp(0); break; case 0x9201: // 37377 if ((expo = -getreal(type)) < 128 && shutter == 0.) { shutter = libraw_powf64l(2.0, expo); if (tiff_nifds > 0 && tiff_nifds <= LIBRAW_IFD_MAXCOUNT) tiff_ifd[tiff_nifds - 1].t_shutter = shutter; } break; case 0x9202: // 37378 ApertureValue if ((fabs(ape = getreal(type)) < 256.0) && (!aperture)) aperture = libraw_powf64l(2.0, ape / 2); break; case 0x9209: // 37385 flash_used = getreal(type); break; case 0x920a: // 37386 focal_len = getreal(type); break; case 0x927c: // 37500 if (((make[0] == '\0') && !strncmp(model, "ov5647", 6)) || (!strncmp(make, "RaspberryPi", 11) && (!strncmp(model, "RP_OV5647", 9) || !strncmp(model, "RP_imx219", 9)))) { char mn_text[512]; char *pos; char ccms[512]; ushort l; float num; fgets(mn_text, MIN(len, 511), ifp); mn_text[511] = 0; pos = strstr(mn_text, "gain_r="); if (pos) cam_mul[0] = atof(pos + 7); pos = strstr(mn_text, "gain_b="); if (pos) cam_mul[2] = atof(pos + 7); if ((cam_mul[0] > 0.001f) && (cam_mul[2] > 0.001f)) cam_mul[1] = cam_mul[3] = 1.0f; else cam_mul[0] = cam_mul[2] = 0.0f; pos = strstr(mn_text, "ccm="); if (pos) { pos += 4; char *pos2 = strstr(pos, " "); if (pos2) { l = pos2 - pos; memcpy(ccms, pos, l); ccms[l] = '\0'; #ifdef LIBRAW_WIN32_CALLS // Win32 strtok is already thread-safe pos = strtok(ccms, ","); #else char *last = 0; pos = strtok_r(ccms, ",", &last); #endif if (pos) { for (l = 0; l < 4; l++) { num = 0.0; for (c = 0; c < 3; c++) { imgdata.color.ccm[l][c] = (float)atoi(pos); num += imgdata.color.ccm[l][c]; #ifdef LIBRAW_WIN32_CALLS pos = strtok(NULL, ","); #else pos = strtok_r(NULL, ",", &last); #endif if (!pos) goto end; // broken } if (num > 0.01) FORC3 imgdata.color.ccm[l][c] = imgdata.color.ccm[l][c] / num; } } } } end:; } else if (!strncmp(make, "SONY", 4) && (!strncmp(model, "DSC-V3", 6) || !strncmp(model, "DSC-F828", 8))) { parseSonySRF(len); break; } else if ((len == 1) && !strncmp(make, "NIKON", 5)) { c = get4(); if (c) fseek(ifp, c, SEEK_SET); is_NikonTransfer = 1; } parse_makernote(base, 0); break; case 0xa002: // 40962 if (kodak) raw_width = get4(); break; case 0xa003: // 40963 if (kodak) raw_height = get4(); break; case 0xa302: // 41730 if (get4() == 0x20002) for (exif_cfa = c = 0; c < 8; c += 2) exif_cfa |= fgetc(ifp) * 0x01010101U << c; } fseek(ifp, save, SEEK_SET); } }
0
[ "CWE-787" ]
LibRaw
55f0a0c08974b8b79ebfa7762b555a1704b25fb2
124,973,419,188,282,150,000,000,000,000,000,000,000
250
possible buffer underrun in exif parser
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; }
0
[ "CWE-200", "CWE-362" ]
linux
ee8413b01045c74340aa13ad5bdf905de32be736
248,649,744,109,780,530,000,000,000,000,000,000,000
95
ALSA: timer: Fix double unlink of active_list ALSA timer instance object has a couple of linked lists and they are unlinked unconditionally at snd_timer_stop(). Meanwhile snd_timer_interrupt() unlinks it, but it calls list_del() which leaves the element list itself unchanged. This ends up with unlinking twice, and it was caught by syzkaller fuzzer. The fix is to use list_del_init() variant properly there, too. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
Unit *unit_following(Unit *u) { assert(u); if (UNIT_VTABLE(u)->following) return UNIT_VTABLE(u)->following(u); return NULL; }
0
[ "CWE-269" ]
systemd
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
257,105,261,533,684,800,000,000,000,000,000,000,000
8
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service Let's be safe, rather than sorry. This way DynamicUser=yes services can neither take benefit of, nor create SUID/SGID binaries. Given that DynamicUser= is a recent addition only we should be able to get away with turning this on, even though this is strictly speaking a binary compatibility breakage.
static int ssl_encrypt_buf( ssl_context *ssl ) { size_t i; cipher_mode_t mode; int auth_done = 0; SSL_DEBUG_MSG( 2, ( "=> encrypt buf" ) ); if( ssl->session_out == NULL || ssl->transform_out == NULL ) { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } mode = cipher_get_cipher_mode( &ssl->transform_out->cipher_ctx_enc ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); /* * Add MAC before if needed */ #if defined(POLARSSL_SOME_MODES_USE_MAC) if( mode == POLARSSL_MODE_STREAM || ( mode == POLARSSL_MODE_CBC #if defined(POLARSSL_SSL_ENCRYPT_THEN_MAC) && ssl->session_out->encrypt_then_mac == SSL_ETM_DISABLED #endif ) ) { #if defined(POLARSSL_SSL_PROTO_SSL3) if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { ssl_mac( &ssl->transform_out->md_ctx_enc, ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); } else #endif #if defined(POLARSSL_SSL_PROTO_TLS1) || defined(POLARSSL_SSL_PROTO_TLS1_1) || \ defined(POLARSSL_SSL_PROTO_TLS1_2) if( ssl->minor_ver >= SSL_MINOR_VERSION_1 ) { md_hmac_update( &ssl->transform_out->md_ctx_enc, ssl->out_ctr, 13 ); md_hmac_update( &ssl->transform_out->md_ctx_enc, ssl->out_msg, ssl->out_msglen ); md_hmac_finish( &ssl->transform_out->md_ctx_enc, ssl->out_msg + ssl->out_msglen ); md_hmac_reset( &ssl->transform_out->md_ctx_enc ); } else #endif { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } SSL_DEBUG_BUF( 4, "computed mac", ssl->out_msg + ssl->out_msglen, ssl->transform_out->maclen ); ssl->out_msglen += ssl->transform_out->maclen; auth_done++; } #endif /* AEAD not the only option */ /* * Encrypt */ #if defined(POLARSSL_ARC4_C) || defined(POLARSSL_CIPHER_NULL_CIPHER) if( mode == POLARSSL_MODE_STREAM ) { int ret; size_t olen = 0; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); if( ( ret = cipher_crypt( &ssl->transform_out->cipher_ctx_enc, ssl->transform_out->iv_enc, ssl->transform_out->ivlen, ssl->out_msg, ssl->out_msglen, ssl->out_msg, &olen ) ) != 0 ) { SSL_DEBUG_RET( 1, "cipher_crypt", ret ); return( ret ); } if( ssl->out_msglen != olen ) { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } } else #endif /* POLARSSL_ARC4_C || POLARSSL_CIPHER_NULL_CIPHER */ #if defined(POLARSSL_GCM_C) || defined(POLARSSL_CCM_C) if( mode == POLARSSL_MODE_GCM || mode == POLARSSL_MODE_CCM ) { int ret; size_t enc_msglen, olen; unsigned char *enc_msg; unsigned char add_data[13]; unsigned char taglen = ssl->transform_out->ciphersuite_info->flags & POLARSSL_CIPHERSUITE_SHORT_TAG ? 8 : 16; memcpy( add_data, ssl->out_ctr, 8 ); add_data[8] = ssl->out_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->out_msglen >> 8 ) & 0xFF; add_data[12] = ssl->out_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); /* * Generate IV */ #if defined(POLARSSL_SSL_AEAD_RANDOM_IV) ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); if( ret != 0 ) return( ret ); memcpy( ssl->out_iv, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); #else if( ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen != 8 ) { /* Reminder if we ever add an AEAD mode with a different size */ SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } memcpy( ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->out_ctr, 8 ); memcpy( ssl->out_iv, ssl->out_ctr, 8 ); #endif SSL_DEBUG_BUF( 4, "IV used", ssl->out_iv, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); /* * Encrypt and authenticate */ if( ( ret = cipher_auth_encrypt( &ssl->transform_out->cipher_ctx_enc, ssl->transform_out->iv_enc, ssl->transform_out->ivlen, add_data, 13, enc_msg, enc_msglen, enc_msg, &olen, enc_msg + enc_msglen, taglen ) ) != 0 ) { SSL_DEBUG_RET( 1, "cipher_auth_encrypt", ret ); return( ret ); } if( olen != enc_msglen ) { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } ssl->out_msglen += taglen; auth_done++; SSL_DEBUG_BUF( 4, "after encrypt: tag", enc_msg + enc_msglen, taglen ); } else #endif /* POLARSSL_GCM_C || POLARSSL_CCM_C */ #if defined(POLARSSL_CIPHER_MODE_CBC) && \ ( defined(POLARSSL_AES_C) || defined(POLARSSL_CAMELLIA_C) ) if( mode == POLARSSL_MODE_CBC ) { int ret; unsigned char *enc_msg; size_t enc_msglen, padlen, olen = 0; padlen = ssl->transform_out->ivlen - ( ssl->out_msglen + 1 ) % ssl->transform_out->ivlen; if( padlen == ssl->transform_out->ivlen ) padlen = 0; for( i = 0; i <= padlen; i++ ) ssl->out_msg[ssl->out_msglen + i] = (unsigned char) padlen; ssl->out_msglen += padlen + 1; enc_msglen = ssl->out_msglen; enc_msg = ssl->out_msg; #if defined(POLARSSL_SSL_PROTO_TLS1_1) || defined(POLARSSL_SSL_PROTO_TLS1_2) /* * Prepend per-record IV for block cipher in TLS v1.1 and up as per * Method 1 (6.2.3.2. in RFC4346 and RFC5246) */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { /* * Generate IV */ ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); if( ret != 0 ) return( ret ); memcpy( ssl->out_iv, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen; } #endif /* POLARSSL_SSL_PROTO_TLS1_1 || POLARSSL_SSL_PROTO_TLS1_2 */ SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of IV and %d bytes of padding", ssl->out_msglen, ssl->transform_out->ivlen, padlen + 1 ) ); if( ( ret = cipher_crypt( &ssl->transform_out->cipher_ctx_enc, ssl->transform_out->iv_enc, ssl->transform_out->ivlen, enc_msg, enc_msglen, enc_msg, &olen ) ) != 0 ) { SSL_DEBUG_RET( 1, "cipher_crypt", ret ); return( ret ); } if( enc_msglen != olen ) { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } #if defined(POLARSSL_SSL_PROTO_SSL3) || defined(POLARSSL_SSL_PROTO_TLS1) if( ssl->minor_ver < SSL_MINOR_VERSION_2 ) { /* * Save IV in SSL3 and TLS1 */ memcpy( ssl->transform_out->iv_enc, ssl->transform_out->cipher_ctx_enc.iv, ssl->transform_out->ivlen ); } #endif #if defined(POLARSSL_SSL_ENCRYPT_THEN_MAC) if( auth_done == 0 ) { /* * MAC(MAC_write_key, seq_num + * TLSCipherText.type + * TLSCipherText.version + * length_of( (IV +) ENC(...) ) + * IV + // except for TLS 1.0 * ENC(content + padding + padding_length)); */ unsigned char pseudo_hdr[13]; SSL_DEBUG_MSG( 3, ( "using encrypt then mac" ) ); memcpy( pseudo_hdr + 0, ssl->out_ctr, 8 ); memcpy( pseudo_hdr + 8, ssl->out_hdr, 3 ); pseudo_hdr[11] = (unsigned char)( ( ssl->out_msglen >> 8 ) & 0xFF ); pseudo_hdr[12] = (unsigned char)( ( ssl->out_msglen ) & 0xFF ); SSL_DEBUG_BUF( 4, "MAC'd meta-data", pseudo_hdr, 13 ); md_hmac_update( &ssl->transform_out->md_ctx_enc, pseudo_hdr, 13 ); md_hmac_update( &ssl->transform_out->md_ctx_enc, ssl->out_iv, ssl->out_msglen ); md_hmac_finish( &ssl->transform_out->md_ctx_enc, ssl->out_iv + ssl->out_msglen ); md_hmac_reset( &ssl->transform_out->md_ctx_enc ); ssl->out_msglen += ssl->transform_out->maclen; auth_done++; } #endif /* POLARSSL_SSL_ENCRYPT_THEN_MAC */ } else #endif /* POLARSSL_CIPHER_MODE_CBC && ( POLARSSL_AES_C || POLARSSL_CAMELLIA_C ) */ { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } /* Make extra sure authentication was performed, exactly once */ if( auth_done != 1 ) { SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( POLARSSL_ERR_SSL_INTERNAL_ERROR ); } for( i = 8; i > 0; i-- ) if( ++ssl->out_ctr[i - 1] != 0 ) break; /* The loops goes to its end iff the counter is wrapping */ if( i == 0 ) { SSL_DEBUG_MSG( 1, ( "outgoing message counter would wrap" ) ); return( POLARSSL_ERR_SSL_COUNTER_WRAPPING ); } SSL_DEBUG_MSG( 2, ( "<= encrypt buf" ) ); return( 0 ); }
0
[ "CWE-119" ]
mbedtls
c988f32adde62a169ba340fee0da15aecd40e76e
166,055,663,214,966,270,000,000,000,000,000,000,000
333
Added max length checking of hostname
save_incsearch_state(void) { saved_search_match_endcol = search_match_endcol; saved_search_match_lines = search_match_lines; }
0
[ "CWE-416" ]
vim
409510c588b1eec1ae33511ae97a21eb8e110895
158,410,495,455,693,800,000,000,000,000,000,000,000
5
patch 8.2.5050: using freed memory when searching for pattern in path Problem: Using freed memory when searching for pattern in path. Solution: Make a copy of the line.
STATIC void S_put_range(pTHX_ SV *sv, UV start, const UV end, const bool allow_literals) { /* Appends to 'sv' a displayable version of the range of code points from * 'start' to 'end'. Mnemonics (like '\r') are used for the few controls * that have them, when they occur at the beginning or end of the range. * It uses hex to output the remaining code points, unless 'allow_literals' * is true, in which case the printable ASCII ones are output as-is (though * some of these will be escaped by put_code_point()). * * NOTE: This is designed only for printing ranges of code points that fit * inside an ANYOF bitmap. Higher code points are simply suppressed */ const unsigned int min_range_count = 3; assert(start <= end); PERL_ARGS_ASSERT_PUT_RANGE; while (start <= end) { UV this_end; const char * format; if (end - start < min_range_count) { /* Output chars individually when they occur in short ranges */ for (; start <= end; start++) { put_code_point(sv, start); } break; } /* If permitted by the input options, and there is a possibility that * this range contains a printable literal, look to see if there is * one. */ if (allow_literals && start <= MAX_PRINT_A) { /* If the character at the beginning of the range isn't an ASCII * printable, effectively split the range into two parts: * 1) the portion before the first such printable, * 2) the rest * and output them separately. */ if (! isPRINT_A(start)) { UV temp_end = start + 1; /* There is no point looking beyond the final possible * printable, in MAX_PRINT_A */ UV max = MIN(end, MAX_PRINT_A); while (temp_end <= max && ! isPRINT_A(temp_end)) { temp_end++; } /* Here, temp_end points to one beyond the first printable if * found, or to one beyond 'max' if not. If none found, make * sure that we use the entire range */ if (temp_end > MAX_PRINT_A) { temp_end = end + 1; } /* Output the first part of the split range: the part that * doesn't have printables, with the parameter set to not look * for literals (otherwise we would infinitely recurse) */ put_range(sv, start, temp_end - 1, FALSE); /* The 2nd part of the range (if any) starts here. */ start = temp_end; /* We do a continue, instead of dropping down, because even if * the 2nd part is non-empty, it could be so short that we want * to output it as individual characters, as tested for at the * top of this loop. */ continue; } /* Here, 'start' is a printable ASCII. If it is an alphanumeric, * output a sub-range of just the digits or letters, then process * the remaining portion as usual. */ if (isALPHANUMERIC_A(start)) { UV mask = (isDIGIT_A(start)) ? _CC_DIGIT : isUPPER_A(start) ? _CC_UPPER : _CC_LOWER; UV temp_end = start + 1; /* Find the end of the sub-range that includes just the * characters in the same class as the first character in it */ while (temp_end <= end && _generic_isCC_A(temp_end, mask)) { temp_end++; } temp_end--; /* For short ranges, don't duplicate the code above to output * them; just call recursively */ if (temp_end - start < min_range_count) { put_range(sv, start, temp_end, FALSE); } else { /* Output as a range */ put_code_point(sv, start); sv_catpvs(sv, "-"); put_code_point(sv, temp_end); } start = temp_end + 1; continue; } /* We output any other printables as individual characters */ if (isPUNCT_A(start) || isSPACE_A(start)) { while (start <= end && (isPUNCT_A(start) || isSPACE_A(start))) { put_code_point(sv, start); start++; } continue; } } /* End of looking for literals */ /* Here is not to output as a literal. Some control characters have * mnemonic names. Split off any of those at the beginning and end of * the range to print mnemonically. It isn't possible for many of * these to be in a row, so this won't overwhelm with output */ if ( start <= end && (isMNEMONIC_CNTRL(start) || isMNEMONIC_CNTRL(end))) { while (isMNEMONIC_CNTRL(start) && start <= end) { put_code_point(sv, start); start++; } /* If this didn't take care of the whole range ... */ if (start <= end) { /* Look backwards from the end to find the final non-mnemonic * */ UV temp_end = end; while (isMNEMONIC_CNTRL(temp_end)) { temp_end--; } /* And separately output the interior range that doesn't start * or end with mnemonics */ put_range(sv, start, temp_end, FALSE); /* Then output the mnemonic trailing controls */ start = temp_end + 1; while (start <= end) { put_code_point(sv, start); start++; } break; } } /* As a final resort, output the range or subrange as hex. */ this_end = (end < NUM_ANYOF_CODE_POINTS) ? end : NUM_ANYOF_CODE_POINTS - 1; #if NUM_ANYOF_CODE_POINTS > 256 format = (this_end < 256) ? "\\x%02" UVXf "-\\x%02" UVXf : "\\x{%04" UVXf "}-\\x{%04" UVXf "}"; #else format = "\\x%02" UVXf "-\\x%02" UVXf; #endif GCC_DIAG_IGNORE_STMT(-Wformat-nonliteral); Perl_sv_catpvf(aTHX_ sv, format, start, this_end); GCC_DIAG_RESTORE_STMT; break; }
0
[ "CWE-190", "CWE-787" ]
perl5
897d1f7fd515b828e4b198d8b8bef76c6faf03ed
19,098,804,307,912,680,000,000,000,000,000,000,000
173
regcomp.c: Prevent integer overflow from nested regex quantifiers. (CVE-2020-10543) On 32bit systems the size calculations for nested regular expression quantifiers could overflow causing heap memory corruption. Fixes: Perl/perl5-security#125 (cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71)
rb_str_partition(argc, argv, str) int argc; VALUE *argv; VALUE str; { VALUE sep; long pos; if (argc == 0) return rb_call_super(argc, argv); rb_scan_args(argc, argv, "1", &sep); if (TYPE(sep) != T_REGEXP) { VALUE tmp; tmp = rb_check_string_type(sep); if (NIL_P(tmp)) { rb_raise(rb_eTypeError, "type mismatch: %s given", rb_obj_classname(sep)); } sep = get_arg_pat(tmp); } pos = rb_reg_search(sep, str, 0, 0); if (pos < 0) { failed: return rb_ary_new3(3, str, rb_str_new(0,0),rb_str_new(0,0)); } sep = rb_str_subpat(str, sep, 0); if (pos == 0 && RSTRING(sep)->len == 0) goto failed; return rb_ary_new3(3, rb_str_substr(str, 0, pos), sep, rb_str_substr(str, pos+RSTRING(sep)->len, RSTRING(str)->len-pos-RSTRING(sep)->len)); }
0
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
198,034,689,651,176,760,000,000,000,000,000,000,000
32
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) { GetByteContext gb; int ret; if (data >= data_end) return -1; bytestream2_init(&gb, data, data_end - data); ret = amf_tag_skip(&gb); if (ret < 0 || bytestream2_get_bytes_left(&gb) <= 0) return -1; av_assert0(bytestream2_tell(&gb) >= 0 && bytestream2_tell(&gb) <= data_end - data); return bytestream2_tell(&gb); }
0
[ "CWE-20" ]
FFmpeg
ffcc82219cef0928bed2d558b19ef6ea35634130
65,776,833,187,186,350,000,000,000,000,000,000,000
16
avformat/rtmppkt: Convert ff_amf_get_field_value() to bytestream2 Fixes: out of array accesses Found-by: JunDong Xie of Ant-financial Light-Year Security Lab Signed-off-by: Michael Niedermayer <[email protected]>
s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, struct inode *inode, u32 mask) { int ret = 0; int newly_watched; /* don't allow invalid bits: we don't want flags set */ mask &= IN_ALL_EVENTS | IN_ONESHOT; if (unlikely(!mask)) return -EINVAL; watch->mask = mask; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* Initialize a new watch */ ret = inotify_handle_get_wd(ih, watch); if (unlikely(ret)) goto out; ret = watch->wd; /* save a reference to handle and bump the count to make it official */ get_inotify_handle(ih); watch->ih = ih; /* * Save a reference to the inode and bump the ref count to make it * official. We hold a reference to nameidata, which makes this safe. */ watch->inode = igrab(inode); /* Add the watch to the handle's and the inode's list */ newly_watched = !inotify_inode_watched(inode); list_add(&watch->h_list, &ih->watches); list_add(&watch->i_list, &inode->inotify_watches); /* * Set child flags _after_ adding the watch, so there is no race * windows where newly instantiated children could miss their parent's * watched flag. */ if (newly_watched) set_dentry_child_flags(inode, 1); out: mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); return ret; }
0
[ "CWE-362" ]
linux-2.6
8f7b0ba1c853919b85b54774775f567f30006107
136,472,251,429,427,290,000,000,000,000,000,000,000
48
Fix inotify watch removal/umount races Inotify watch removals suck violently. To kick the watch out we need (in this order) inode->inotify_mutex and ih->mutex. That's fine if we have a hold on inode; however, for all other cases we need to make damn sure we don't race with umount. We can *NOT* just grab a reference to a watch - inotify_unmount_inodes() will happily sail past it and we'll end with reference to inode potentially outliving its superblock. Ideally we just want to grab an active reference to superblock if we can; that will make sure we won't go into inotify_umount_inodes() until we are done. Cleanup is just deactivate_super(). However, that leaves a messy case - what if we *are* racing with umount() and active references to superblock can't be acquired anymore? We can bump ->s_count, grab ->s_umount, which will almost certainly wait until the superblock is shut down and the watch in question is pining for fjords. That's fine, but there is a problem - we might have hit the window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock is past the point of no return and is heading for shutdown) and the moment when deactivate_super() acquires ->s_umount. We could just do drop_super() yield() and retry, but that's rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having found that we'd got there first (i.e. that ->s_root is non-NULL) we know that we won't race with inotify_umount_inodes(). So we could grab a reference to watch and do the rest as above, just with drop_super() instead of deactivate_super(), right? Wrong. We had to drop ih->mutex before we could grab ->s_umount. So the watch could've been gone already. That still can be dealt with - we need to save watch->wd, do idr_find() and compare its result with our pointer. If they match, we either have the damn thing still alive or we'd lost not one but two races at once, the watch had been killed and a new one got created with the same ->wd at the same address. That couldn't have happened in inotify_destroy(), but inotify_rm_wd() could run into that. Still, "new one got created" is not a problem - we have every right to kill it or leave it alone, whatever's more convenient. So we can use idr_find(...) == watch && watch->inode->i_sb == sb as "grab it and kill it" check. If it's been our original watch, we are fine, if it's a newcomer - nevermind, just pretend that we'd won the race and kill the fscker anyway; we are safe since we know that its superblock won't be going away. And yes, this is far beyond mere "not very pretty"; so's the entire concept of inotify to start with. Signed-off-by: Al Viro <[email protected]> Acked-by: Greg KH <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TEST_P(TcpTunnelingIntegrationTest, DeferTransmitDataUntilSuccessConnectResponseIsReceived) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Send some data straight away. ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); // Wait a bit, no data should go through. ASSERT_FALSE(upstream_request_->waitForData(*dispatcher_, 1, std::chrono::milliseconds(100))); upstream_request_->encodeHeaders(default_response_headers_, false); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // If the upstream now sends 'end stream' the connection is fully closed. upstream_request_->encodeData(0, true); } }
0
[ "CWE-416" ]
envoy
ce0ae309057a216aba031aff81c445c90c6ef145
126,533,026,795,020,290,000,000,000,000,000,000,000
29
CVE-2021-43826 Signed-off-by: Yan Avlasov <[email protected]>
process_downstream_ack(int userid, int down_seq, int down_frag) /* Process acks from downstream fragments. After this, .offset and .fragment are updated (if ack correct), or .len is set to zero when all is done. */ { if (users[userid].outpacket.len <= 0) /* No packet to apply acks to */ return; if (users[userid].outpacket.seqno != down_seq || users[userid].outpacket.fragment != down_frag) /* Not the ack we're waiting for; probably duplicate of old ack, happens a lot with ping packets */ return; /* Received proper ack */ users[userid].outpacket.offset += users[userid].outpacket.sentlen; users[userid].outpacket.sentlen = 0; users[userid].outpacket.fragment++; users[userid].outfragresent = 0; /* Is packet done? */ if (users[userid].outpacket.offset >= users[userid].outpacket.len) { users[userid].outpacket.len = 0; users[userid].outpacket.offset = 0; users[userid].outpacket.fragment--; /* unneeded ++ above */ /* ^keep last seqno/frag, are always returned on pings */ /* users[userid].outfragresent = 0; already above */ #ifdef OUTPACKETQ_LEN /* Possibly get new packet from queue */ get_from_outpacketq(userid); #endif } }
0
[]
iodine
b715be5cf3978fbe589b03b09c9398d0d791f850
87,928,796,154,101,600,000,000,000,000,000,000,000
36
Fix authentication bypass bug The client could bypass the password check by continuing after getting error from the server and guessing the network parameters. The server would still accept the rest of the setup and also network traffic. Add checks for normal and raw mode that user has authenticated before allowing any other communication. Problem found by Oscar Reparaz.
static int smm_create_map(uintptr_t smbase, unsigned int num_cpus, const struct smm_loader_params *params) { unsigned int i; struct rmodule smm_stub; unsigned int ss_size = params->per_cpu_save_state_size, stub_size; unsigned int smm_entry_offset = params->smm_main_entry_offset; unsigned int seg_count = 0, segments = 0, available; unsigned int cpus_in_segment = 0; unsigned int base = smbase; if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) { printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__); return 0; } stub_size = rmodule_memory_size(&smm_stub); /* How many CPUs can fit into one 64K segment? */ available = 0xFFFF - smm_entry_offset - ss_size - stub_size; if (available > 0) { cpus_in_segment = available / ss_size; /* minimum segments needed will always be 1 */ segments = num_cpus / cpus_in_segment + 1; printk(BIOS_DEBUG, "%s: cpus allowed in one segment %d\n", __func__, cpus_in_segment); printk(BIOS_DEBUG, "%s: min # of segments needed %d\n", __func__, segments); } else { printk(BIOS_ERR, "%s: not enough space in SMM to setup all CPUs\n", __func__); printk(BIOS_ERR, " save state & stub size need to be reduced\n"); printk(BIOS_ERR, " or increase SMRAM size\n"); return 0; } if (sizeof(cpus) / sizeof(struct cpu_smm_info) < num_cpus) { printk(BIOS_ERR, "%s: increase MAX_CPUS in Kconfig\n", __func__); return 0; } for (i = 0; i < num_cpus; i++) { cpus[i].smbase = base; cpus[i].entry = base + smm_entry_offset; cpus[i].ss_start = cpus[i].entry + (smm_entry_offset - ss_size); cpus[i].code_start = cpus[i].entry; cpus[i].code_end = cpus[i].entry + stub_size; cpus[i].active = 1; base -= ss_size; seg_count++; if (seg_count >= cpus_in_segment) { base -= smm_entry_offset; seg_count = 0; } } if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) { seg_count = 0; for (i = 0; i < num_cpus; i++) { printk(BIOS_DEBUG, "CPU 0x%x\n", i); printk(BIOS_DEBUG, " smbase %zx entry %zx\n", cpus[i].smbase, cpus[i].entry); printk(BIOS_DEBUG, " ss_start %zx code_end %zx\n", cpus[i].ss_start, cpus[i].code_end); seg_count++; if (seg_count >= cpus_in_segment) { printk(BIOS_DEBUG, "-------------NEW CODE SEGMENT --------------\n"); seg_count = 0; } } } return 1; }
0
[ "CWE-269" ]
coreboot
afb7a814783cda12f5b72167163b9109ee1d15a7
213,732,572,083,948,200,000,000,000,000,000,000,000
75
cpu/x86/smm: Introduce SMM module loader version 2 Xeon-SP Skylake Scalable Processor can have 36 CPU threads (18 cores). Current coreboot SMM is unable to handle more than ~32 CPU threads. This patch introduces a version 2 of the SMM module loader which addresses this problem. Having two versions of the SMM module loader prevents any issues to current projects. Future Xeon-SP products will be using this version of the SMM loader. Subsequent patches will enable board specific functionality for Xeon-SP. The reason for moving to version 2 is the state save area begins to encroach upon the SMI handling code when more than 32 CPU threads are in the system. This can cause system hangs, reboots, etc. The second change is related to staggered entry points with simple near jumps. In the current loader, near jumps will not work because the CPU is jumping within the same code segment. In version 2, "far" address jumps are necessary therefore protected mode must be enabled first. The SMM layout and how the CPUs are staggered are documented in the code. By making the modifications above, this allows the smm module loader to expand easily as more CPU threads are added. TEST=build for Tiogapass platform under OCP mainboard. Enable the following in Kconfig. select CPU_INTEL_COMMON_SMM select SOC_INTEL_COMMON_BLOCK_SMM select SMM_TSEG select HAVE_SMI_HANDLER select ACPI_INTEL_HARDWARE_SLEEP_VALUES Debug console will show all 36 cores relocated. Further tested by generating SMI's to port 0xb2 using XDP/ITP HW debugger and ensured all cores entering and exiting SMM properly. In addition, booted to Linux 5.4 kernel and observed no issues during mp init. Change-Id: I00a23a5f2a46110536c344254868390dbb71854c Signed-off-by: Rocky Phagura <[email protected]> Reviewed-on: https://review.coreboot.org/c/coreboot/+/43684 Tested-by: build bot (Jenkins) <[email protected]> Reviewed-by: Angel Pons <[email protected]>
static inline int security_msg_queue_associate(struct msg_queue *msq, int msqflg) { return 0; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
114,971,551,065,169,310,000,000,000,000,000,000,000
5
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
Spawner(const ConfigPtr &_config) : config(_config), creationTime(SystemTime::getUsec()) { }
0
[ "CWE-200", "CWE-61" ]
passenger
4043718264095cde6623c2cbe8c644541036d7bf
286,538,299,604,539,820,000,000,000,000,000,000,000
4
Disable unused feature.
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, int async_submit) { struct btrfs_dio_private *dip = bio->bi_private; int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (async_submit) async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); bio_get(bio); if (!write) { ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); if (ret) goto err; } if (skip_sum) goto map; if (write && async_submit) { ret = btrfs_wq_submit_bio(root->fs_info, inode, rw, bio, 0, 0, file_offset, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); goto err; } else if (write) { /* * If we aren't doing async submit, calculate the csum of the * bio now. */ ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); if (ret) goto err; } else { ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio, file_offset); if (ret) goto err; } map: ret = btrfs_map_bio(root, rw, bio, 0, async_submit); err: bio_put(bio); return ret; }
0
[ "CWE-200" ]
linux
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
3,837,057,902,672,529,000,000,000,000,000,000,000
51
Btrfs: fix truncation of compressed and inlined extents When truncating a file to a smaller size which consists of an inline extent that is compressed, we did not discard (or made unusable) the data between the new file size and the old file size, wasting metadata space and allowing for the truncated data to be leaked and the data corruption/loss mentioned below. We were also not correctly decrementing the number of bytes used by the inode, we were setting it to zero, giving a wrong report for callers of the stat(2) syscall. The fsck tool also reported an error about a mismatch between the nbytes of the file versus the real space used by the file. Now because we weren't discarding the truncated region of the file, it was possible for a caller of the clone ioctl to actually read the data that was truncated, allowing for a security breach without requiring root access to the system, using only standard filesystem operations. The scenario is the following: 1) User A creates a file which consists of an inline and compressed extent with a size of 2000 bytes - the file is not accessible to any other users (no read, write or execution permission for anyone else); 2) The user truncates the file to a size of 1000 bytes; 3) User A makes the file world readable; 4) User B creates a file consisting of an inline extent of 2000 bytes; 5) User B issues a clone operation from user A's file into its own file (using a length argument of 0, clone the whole range); 6) User B now gets to see the 1000 bytes that user A truncated from its file before it made its file world readbale. User B also lost the bytes in the range [1000, 2000[ bytes from its own file, but that might be ok if his/her intention was reading stale data from user A that was never supposed to be public. Note that this contrasts with the case where we truncate a file from 2000 bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In this case reading any byte from the range [1000, 2000[ will return a value of 0x00, instead of the original data. This problem exists since the clone ioctl was added and happens both with and without my recent data loss and file corruption fixes for the clone ioctl (patch "Btrfs: fix file corruption and data loss after cloning inline extents"). So fix this by truncating the compressed inline extents as we do for the non-compressed case, which involves decompressing, if the data isn't already in the page cache, compressing the truncated version of the extent, writing the compressed content into the inline extent and then truncate it. The following test case for fstests reproduces the problem. In order for the test to pass both this fix and my previous fix for the clone ioctl that forbids cloning a smaller inline extent into a larger one, which is titled "Btrfs: fix file corruption and data loss after cloning inline extents", are needed. Without that other fix the test fails in a different way that does not leak the truncated data, instead part of destination file gets replaced with zeroes (because the destination file has a larger inline extent than the source). seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount "-o compress" # Create our test files. File foo is going to be the source of a clone operation # and consists of a single inline extent with an uncompressed size of 512 bytes, # while file bar consists of a single inline extent with an uncompressed size of # 256 bytes. For our test's purpose, it's important that file bar has an inline # extent with a size smaller than foo's inline extent. $XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \ -c "pwrite -S 0x2a 128 384" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io # Now durably persist all metadata and data. We do this to make sure that we get # on disk an inline extent with a size of 512 bytes for file foo. sync # Now truncate our file foo to a smaller size. Because it consists of a # compressed and inline extent, btrfs did not shrink the inline extent to the # new size (if the extent was not compressed, btrfs would shrink it to 128 # bytes), it only updates the inode's i_size to 128 bytes. $XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo # Now clone foo's inline extent into bar. # This clone operation should fail with errno EOPNOTSUPP because the source # file consists only of an inline extent and the file's size is smaller than # the inline extent of the destination (128 bytes < 256 bytes). However the # clone ioctl was not prepared to deal with a file that has a size smaller # than the size of its inline extent (something that happens only for compressed # inline extents), resulting in copying the full inline extent from the source # file into the destination file. # # Note that btrfs' clone operation for inline extents consists of removing the # inline extent from the destination inode and copy the inline extent from the # source inode into the destination inode, meaning that if the destination # inode's inline extent is larger (N bytes) than the source inode's inline # extent (M bytes), some bytes (N - M bytes) will be lost from the destination # file. Btrfs could copy the source inline extent's data into the destination's # inline extent so that we would not lose any data, but that's currently not # done due to the complexity that would be needed to deal with such cases # (specially when one or both extents are compressed), returning EOPNOTSUPP, as # it's normally not a very common case to clone very small files (only case # where we get inline extents) and copying inline extents does not save any # space (unlike for normal, non-inlined extents). $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Now because the above clone operation used to succeed, and due to foo's inline # extent not being shinked by the truncate operation, our file bar got the whole # inline extent copied from foo, making us lose the last 128 bytes from bar # which got replaced by the bytes in range [128, 256[ from foo before foo was # truncated - in other words, data loss from bar and being able to read old and # stale data from foo that should not be possible to read anymore through normal # filesystem operations. Contrast with the case where we truncate a file from a # size N to a smaller size M, truncate it back to size N and then read the range # [M, N[, we should always get the value 0x00 for all the bytes in that range. # We expected the clone operation to fail with errno EOPNOTSUPP and therefore # not modify our file's bar data/metadata. So its content should be 256 bytes # long with all bytes having the value 0xbb. # # Without the btrfs bug fix, the clone operation succeeded and resulted in # leaking truncated data from foo, the bytes that belonged to its range # [128, 256[, and losing data from bar in that same range. So reading the # file gave us the following content: # # 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 # * # 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a # * # 0000400 echo "File bar's content after the clone operation:" od -t x1 $SCRATCH_MNT/bar # Also because the foo's inline extent was not shrunk by the truncate # operation, btrfs' fsck, which is run by the fstests framework everytime a # test completes, failed reporting the following error: # # root 5 inode 257 errors 400, nbytes wrong status=0 exit Cc: [email protected] Signed-off-by: Filipe Manana <[email protected]>
static int setcos_putdata(struct sc_card *card, struct sc_cardctl_setcos_data_obj* data_obj) { int r; struct sc_apdu apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); memset(&apdu, 0, sizeof(apdu)); apdu.cse = SC_APDU_CASE_3_SHORT; apdu.cla = 0x00; apdu.ins = 0xDA; apdu.p1 = data_obj->P1; apdu.p2 = data_obj->P2; apdu.lc = data_obj->DataLen; apdu.datalen = data_obj->DataLen; apdu.data = data_obj->Data; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "PUT_DATA returned error"); LOG_FUNC_RETURN(card->ctx, r); }
0
[ "CWE-125" ]
OpenSC
c3f23b836e5a1766c36617fe1da30d22f7b63de2
189,352,224,938,655,130,000,000,000,000,000,000,000
25
fixed UNKNOWN READ Reported by OSS-Fuzz https://oss-fuzz.com/testcase-detail/5681169970757632
static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) { const struct cred *cred = current_cred(); if (!strncmp(name, XATTR_SECURITY_PREFIX, sizeof XATTR_SECURITY_PREFIX - 1)) { if (!strcmp(name, XATTR_NAME_CAPS)) { if (!capable(CAP_SETFCAP)) return -EPERM; } else if (!capable(CAP_SYS_ADMIN)) { /* A different attribute in the security namespace. Restrict to administrator. */ return -EPERM; } } /* Not an attribute we recognize, so just check the ordinary setattr permission. */ return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
329,281,241,670,150,080,000,000,000,000,000,000,000
20
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item **where_item, Item **having_item) { SELECT_LEX *select_lex= join->select_lex; uint cols_num= left_expr->cols(); /* The non-transformed HAVING clause of 'join' may be stored in two ways during JOIN::optimize: this->tmp_having= this->having; this->having= 0; */ Item* join_having= join->having ? join->having : join->tmp_having; bool is_having_used= (join_having || select_lex->with_sum_func || select_lex->group_list.first || !select_lex->table_list.elements); DBUG_ENTER("Item_in_subselect::create_row_in_to_exists_cond"); DBUG_ASSERT(thd == join->thd); *where_item= NULL; *having_item= NULL; if (is_having_used) { /* TODO: say here explicitly if the order of AND parts matters or not. */ Item *item_having_part2= 0; for (uint i= 0; i < cols_num; i++) { DBUG_ASSERT((left_expr->fixed && select_lex->ref_pointer_array[i]->fixed) || (select_lex->ref_pointer_array[i]->type() == REF_ITEM && ((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() == Item_ref::OUTER_REF)); if (select_lex->ref_pointer_array[i]-> check_cols(left_expr->element_index(i)->cols())) DBUG_RETURN(true); Item *item_eq= new (thd->mem_root) Item_func_eq(thd, new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, (*optimizer->get_cache())-> addr(i), (char *)"<no matter>", (char *)in_left_expr_name), new (thd->mem_root) Item_ref(thd, &select_lex->context, &select_lex->ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); Item *item_isnull= new (thd->mem_root) Item_func_isnull(thd, new (thd->mem_root) Item_ref(thd, &select_lex->context, &select_lex->ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); Item *col_item= new (thd->mem_root) Item_cond_or(thd, item_eq, item_isnull); if (!abort_on_null && left_expr->element_index(i)->maybe_null && get_cond_guard(i)) { disable_cond_guard_for_const_null_left_expr(i); if (!(col_item= new (thd->mem_root) Item_func_trig_cond(thd, col_item, get_cond_guard(i)))) DBUG_RETURN(true); } *having_item= and_items(thd, *having_item, col_item); Item *item_nnull_test= new (thd->mem_root) Item_is_not_null_test(thd, this, new (thd->mem_root) Item_ref(thd, &select_lex->context, &select_lex-> ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); if (!abort_on_null && left_expr->element_index(i)->maybe_null && get_cond_guard(i) ) { disable_cond_guard_for_const_null_left_expr(i); if (!(item_nnull_test= new (thd->mem_root) Item_func_trig_cond(thd, item_nnull_test, get_cond_guard(i)))) DBUG_RETURN(true); } item_having_part2= and_items(thd, item_having_part2, item_nnull_test); item_having_part2->top_level_item(); } *having_item= and_items(thd, *having_item, item_having_part2); } else { for (uint i= 0; i < cols_num; i++) { Item *item, *item_isnull; DBUG_ASSERT((left_expr->fixed && select_lex->ref_pointer_array[i]->fixed) || (select_lex->ref_pointer_array[i]->type() == REF_ITEM && ((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() == Item_ref::OUTER_REF)); if (select_lex->ref_pointer_array[i]-> check_cols(left_expr->element_index(i)->cols())) DBUG_RETURN(true); item= new (thd->mem_root) Item_func_eq(thd, new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, (*optimizer->get_cache())-> addr(i), (char *)"<no matter>", (char *)in_left_expr_name), new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, &select_lex-> ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); if (!abort_on_null && select_lex->ref_pointer_array[i]->maybe_null) { Item *having_col_item= new (thd->mem_root) Item_is_not_null_test(thd, this, new (thd->mem_root) Item_ref(thd, &select_lex->context, &select_lex->ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); item_isnull= new (thd->mem_root) Item_func_isnull(thd, new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, &select_lex-> ref_pointer_array[i], (char *)"<no matter>", (char *)"<list ref>")); item= new (thd->mem_root) Item_cond_or(thd, item, item_isnull); if (left_expr->element_index(i)->maybe_null && get_cond_guard(i)) { disable_cond_guard_for_const_null_left_expr(i); if (!(item= new (thd->mem_root) Item_func_trig_cond(thd, item, get_cond_guard(i)))) DBUG_RETURN(true); if (!(having_col_item= new (thd->mem_root) Item_func_trig_cond(thd, having_col_item, get_cond_guard(i)))) DBUG_RETURN(true); } *having_item= and_items(thd, *having_item, having_col_item); } if (!abort_on_null && left_expr->element_index(i)->maybe_null && get_cond_guard(i)) { if (!(item= new (thd->mem_root) Item_func_trig_cond(thd, item, get_cond_guard(i)))) DBUG_RETURN(true); } *where_item= and_items(thd, *where_item, item); } } if (*where_item) { if (!(*where_item)->fixed && (*where_item)->fix_fields(thd, 0)) DBUG_RETURN(true); (*where_item)->top_level_item(); } if (*having_item) { if (!join_having) (*having_item)->name= (char*) in_having_cond; if (fix_having(*having_item, select_lex)) DBUG_RETURN(true); (*having_item)->top_level_item(); } DBUG_RETURN(false); }
0
[ "CWE-89" ]
server
3c209bfc040ddfc41ece8357d772547432353fd2
9,049,669,821,144,552,000,000,000,000,000,000,000
181
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause When single-row subquery fails with "Subquery reutrns more than 1 row" error, it will raise an error and return NULL. On the other hand, Item_singlerow_subselect sets item->maybe_null=0 for table-less subqueries like "(SELECT not_null_value)" (*) This discrepancy (item with maybe_null=0 returning NULL) causes the code in Type_handler_decimal_result::make_sort_key_part() to crash. Fixed this by allowing inference (*) only when the subquery is NOT a UNION.
static void get_old_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu, i; for_each_possible_cpu(cpu) { i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt); ++i; } cond_resched(); } }
0
[ "CWE-476" ]
linux
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
334,783,348,587,653,200,000,000,000,000,000,000,000
18
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: [email protected] Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
static size_t mg_mqtt_next_topic(struct mg_mqtt_message *msg, struct mg_str *topic, uint8_t *qos, size_t pos) { unsigned char *buf = (unsigned char *) msg->dgram.ptr + pos; size_t new_pos; if (pos >= msg->dgram.len) return 0; topic->len = (size_t) (((unsigned) buf[0]) << 8 | buf[1]); topic->ptr = (char *) buf + 2; new_pos = pos + 2 + topic->len + (qos == NULL ? 0 : 1); if ((size_t) new_pos > msg->dgram.len) return 0; if (qos != NULL) *qos = buf[2 + topic->len]; return new_pos; }
0
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
212,674,505,380,449,060,000,000,000,000,000,000,000
14
Protect against the directory traversal in mg_upload()
static DH *load_dh_param(const char *dhfile) { DH *ret=NULL; BIO *bio; if ((bio=BIO_new_file(dhfile,"r")) == NULL) goto err; ret=PEM_read_bio_DHparams(bio,NULL,NULL,NULL); err: if (bio != NULL) BIO_free(bio); return(ret); }
0
[]
openssl
a70da5b3ecc3160368529677006801c58cb369db
3,432,769,260,868,062,300,000,000,000,000,000,000
12
New functions to check a hostname email or IP address against a certificate. Add options to s_client, s_server and x509 utilities to print results of checks.
bgp_update_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp bgp; const u_char *p; int withdrawn_routes_len; int len; int i; ND_TCHECK2(dat[0], BGP_SIZE); if (length < BGP_SIZE) goto trunc; memcpy(&bgp, dat, BGP_SIZE); p = dat + BGP_SIZE; /*XXX*/ length -= BGP_SIZE; /* Unfeasible routes */ ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; withdrawn_routes_len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len) { /* * Without keeping state from the original NLRI message, * it's not possible to tell if this a v4 or v6 route, * so only try to decode it if we're not v6 enabled. */ ND_TCHECK2(p[0], withdrawn_routes_len); if (length < withdrawn_routes_len) goto trunc; ND_PRINT((ndo, "\n\t Withdrawn routes: %d bytes", withdrawn_routes_len)); p += withdrawn_routes_len; length -= withdrawn_routes_len; } ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len == 0 && len == 0 && length == 0) { /* No withdrawn routes, no path attributes, no NLRI */ ND_PRINT((ndo, "\n\t End-of-Rib Marker (empty NLRI)")); return; } if (len) { /* do something more useful!*/ while (len) { int aflags, atype, alenlen, alen; ND_TCHECK2(p[0], 2); if (len < 2) goto trunc; if (length < 2) goto trunc; aflags = *p; atype = *(p + 1); p += 2; len -= 2; length -= 2; alenlen = bgp_attr_lenlen(aflags, p); ND_TCHECK2(p[0], alenlen); if (len < alenlen) goto trunc; if (length < alenlen) goto trunc; alen = bgp_attr_len(aflags, p); p += alenlen; len -= alenlen; length -= alenlen; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_attr_values, "Unknown Attribute", atype), atype, alen)); if (aflags) { ND_PRINT((ndo, ", Flags [%s%s%s%s", aflags & 0x80 ? "O" : "", aflags & 0x40 ? "T" : "", aflags & 0x20 ? "P" : "", aflags & 0x10 ? "E" : "")); if (aflags & 0xf) ND_PRINT((ndo, "+%x", aflags & 0xf)); ND_PRINT((ndo, "]: ")); } if (len < alen) goto trunc; if (length < alen) goto trunc; if (!bgp_attr_print(ndo, atype, p, alen)) goto trunc; p += alen; len -= alen; length -= alen; } } if (length) { /* * XXX - what if they're using the "Advertisement of * Multiple Paths in BGP" feature: * * https://datatracker.ietf.org/doc/draft-ietf-idr-add-paths/ * * http://tools.ietf.org/html/draft-ietf-idr-add-paths-06 */ ND_PRINT((ndo, "\n\t Updated routes:")); while (length) { char buf[MAXHOSTNAMELEN + 100]; i = decode_prefix4(ndo, p, length, buf, sizeof(buf)); if (i == -1) { ND_PRINT((ndo, "\n\t (illegal prefix length)")); break; } else if (i == -2) goto trunc; else if (i == -3) goto trunc; /* bytes left, but not enough */ else { ND_PRINT((ndo, "\n\t %s", buf)); p += i; length -= i; } } } return; trunc: ND_PRINT((ndo, "[|BGP]")); }
0
[ "CWE-125" ]
tcpdump
86326e880d31b328a151d45348c35220baa9a1ff
159,432,847,398,619,430,000,000,000,000,000,000,000
135
(for 4.9.3) CVE-2018-14881/BGP: Fix BGP_CAPCODE_RESTART. Add a bounds check and a comment to bgp_capabilities_print(). This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s).
static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0026, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, val, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0038, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); }
0
[ "CWE-476" ]
linux
a246b4d547708f33ff4d4b9a7a5dbac741dc89d8
155,969,851,343,622,030,000,000,000,000,000,000,000
13
media: xirlink_cit: add missing descriptor sanity checks Make sure to check that we have two alternate settings and at least one endpoint before accessing the second altsetting structure and dereferencing the endpoint arrays. This specifically avoids dereferencing NULL-pointers or corrupting memory when a device does not have the expected descriptors. Note that the sanity check in cit_get_packet_size() is not redundant as the driver is mixing looking up altsettings by index and by number, which may not coincide. Fixes: 659fefa0eb17 ("V4L/DVB: gspca_xirlink_cit: Add support for camera with a bcd version of 0.01") Fixes: 59f8b0bf3c12 ("V4L/DVB: gspca_xirlink_cit: support bandwidth changing for devices with 1 alt setting") Cc: stable <[email protected]> # 2.6.37 Cc: Hans de Goede <[email protected]> Signed-off-by: Johan Hovold <[email protected]> Signed-off-by: Hans Verkuil <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static int __init max_swapfiles_check(void) { MAX_SWAPFILES_CHECK(); return 0; }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
54,085,502,764,490,420,000,000,000,000,000,000,000
5
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38+] Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
const CImg<T>& save_rgba(const char *const filename) const { return _save_rgba(0,filename); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
116,371,817,060,268,380,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
void updateGenre(TextIdentificationFrame *frame) { StringList fields = frame->fieldList(); StringList newfields; for(StringList::ConstIterator it = fields.begin(); it != fields.end(); ++it) { String s = *it; int end = s.find(")"); if(s.startsWith("(") && end > 0) { // "(12)Genre" String text = s.substr(end + 1); bool ok; int number = s.substr(1, end - 1).toInt(&ok); if(ok && number >= 0 && number <= 255 && !(ID3v1::genre(number) == text)) newfields.append(s.substr(1, end - 1)); if(!text.isEmpty()) newfields.append(text); } else { // "Genre" or "12" newfields.append(s); } } if(newfields.isEmpty()) fields.append(String()); frame->setText(newfields); }
0
[ "CWE-434", "CWE-352" ]
taglib
cb9f07d9dcd791b63e622da43f7b232adaec0a9a
20,671,769,213,865,817,000,000,000,000,000,000,000
30
Don't assume TDRC is an instance of TextIdentificationFrame (#831) If TDRC is encrypted, FrameFactory::createFrame() returns UnknownFrame which causes problems in rebuildAggregateFrames() when it is assumed that TDRC is a TextIdentificationFrame
listener_set_property (GSListener *listener, DBusConnection *connection, DBusMessage *message, guint prop_id) { const char *path; int type; gboolean rc; DBusMessageIter iter; DBusMessage *reply; path = dbus_message_get_path (message); dbus_message_iter_init (message, &iter); type = dbus_message_iter_get_arg_type (&iter); rc = FALSE; switch (type) { case DBUS_TYPE_BOOLEAN: { dbus_bool_t v; dbus_message_iter_get_basic (&iter, &v); rc = listener_property_set_bool (listener, prop_id, v); break; } default: gs_debug ("Unsupported property type %d", type); break; } if (! rc) { raise_property_type_error (connection, message, path); return DBUS_HANDLER_RESULT_HANDLED; } reply = dbus_message_new_method_return (message); if (reply == NULL) { g_error ("No memory"); } if (! dbus_connection_send (connection, reply, NULL)) { g_error ("No memory"); } dbus_message_unref (reply); return DBUS_HANDLER_RESULT_HANDLED; }
0
[]
gnome-screensaver
284c9924969a49dbf2d5fae1d680d3310c4df4a3
102,811,412,292,871,840,000,000,000,000,000,000,000
49
Remove session inhibitors if the originator falls of the bus This fixes a problem where totem leaves inhibitors behind, see bug 600488.
long MemIo::write(BasicIo& src) { if (static_cast<BasicIo*>(this) == &src) return 0; if (!src.isopen()) return 0; byte buf[4096]; long readCount = 0; long writeTotal = 0; while ((readCount = src.read(buf, sizeof(buf)))) { write(buf, readCount); writeTotal += readCount; } return writeTotal; }
0
[ "CWE-125" ]
exiv2
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
129,936,204,898,793,170,000,000,000,000,000,000,000
15
Fix https://github.com/Exiv2/exiv2/issues/55
static int alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index, uint16_t **refcount_block) { BDRVQcowState *s = bs->opaque; unsigned int refcount_table_index; int ret; BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); /* Find the refcount block for the given cluster */ refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); if (refcount_table_index < s->refcount_table_size) { uint64_t refcount_block_offset = s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; /* If it's already there, we're done */ if (refcount_block_offset) { return load_refcount_block(bs, refcount_block_offset, (void**) refcount_block); } } /* * If we came here, we need to allocate something. Something is at least * a cluster for the new refcount block. It may also include a new refcount * table if the old refcount table is too small. * * Note that allocating clusters here needs some special care: * * - We can't use the normal qcow2_alloc_clusters(), it would try to * increase the refcount and very likely we would end up with an endless * recursion. Instead we must place the refcount blocks in a way that * they can describe them themselves. * * - We need to consider that at this point we are inside update_refcounts * and doing the initial refcount increase. This means that some clusters * have already been allocated by the caller, but their refcount isn't * accurate yet. free_cluster_index tells us where this allocation ends * as long as we don't overwrite it by freeing clusters. * * - alloc_clusters_noref and qcow2_free_clusters may load a different * refcount block into the cache */ *refcount_block = NULL; /* We write to the refcount table, so we might depend on L2 tables */ ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; } /* Allocate the refcount block itself and mark it as used */ int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); if (new_block < 0) { return new_block; } #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 " at %" PRIx64 "\n", refcount_table_index, cluster_index << s->cluster_bits, new_block); #endif if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { /* Zero the new refcount block before updating it */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); /* The block describes itself, need to update the cache */ int block_index = (new_block >> s->cluster_bits) & ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); (*refcount_block)[block_index] = cpu_to_be16(1); } else { /* Described somewhere else. This can recurse at most twice before we * arrive at a block that describes itself. */ ret = update_refcount(bs, new_block, s->cluster_size, 1, QCOW2_DISCARD_NEVER); if (ret < 0) { goto fail_block; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* Initialize the new refcount block only after updating its refcount, * update_refcount uses the refcount cache itself */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); } /* Now the new refcount block needs to be written to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* If the refcount table is big enough, just hook the block up there */ if (refcount_table_index < s->refcount_table_size) { uint64_t data64 = cpu_to_be64(new_block); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), &data64, sizeof(data64)); if (ret < 0) { goto fail_block; } s->refcount_table[refcount_table_index] = new_block; return 0; } ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); if (ret < 0) { goto fail_block; } /* * If we come here, we need to grow the refcount table. Again, a new * refcount table needs some space and we can't simply allocate to avoid * endless recursion. * * Therefore let's grab new refcount blocks at the end of the image, which * will describe themselves and the new refcount table. This way we can * reference them only in the new table and do the switch to the new * refcount table at once without producing an inconsistent state in * between. */ BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); /* Calculate the number of refcount blocks needed so far */ uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT); uint64_t blocks_used = (s->free_cluster_index + refcount_block_clusters - 1) / refcount_block_clusters; /* And now we need at least one block more for the new metadata */ uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); uint64_t last_table_size; uint64_t blocks_clusters; do { uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); blocks_clusters = 1 + ((table_clusters + refcount_block_clusters - 1) / refcount_block_clusters); uint64_t meta_clusters = table_clusters + blocks_clusters; last_table_size = table_size; table_size = next_refcount_table_size(s, blocks_used + ((meta_clusters + refcount_block_clusters - 1) / refcount_block_clusters)); } while (last_table_size != table_size); #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", s->refcount_table_size, table_size); #endif /* Create the new refcount table and blocks */ uint64_t meta_offset = (blocks_used * refcount_block_clusters) * s->cluster_size; uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size); uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t)); assert(meta_offset >= (s->free_cluster_index * s->cluster_size)); /* Fill the new refcount table */ memcpy(new_table, s->refcount_table, s->refcount_table_size * sizeof(uint64_t)); new_table[refcount_table_index] = new_block; int i; for (i = 0; i < blocks_clusters; i++) { new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); } /* Fill the refcount blocks */ uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); int block = 0; for (i = 0; i < table_clusters + blocks_clusters; i++) { new_blocks[block++] = cpu_to_be16(1); } /* Write refcount blocks to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, blocks_clusters * s->cluster_size); g_free(new_blocks); if (ret < 0) { goto fail_table; } /* Write refcount table to disk */ for(i = 0; i < table_size; i++) { cpu_to_be64s(&new_table[i]); } BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, table_size * sizeof(uint64_t)); if (ret < 0) { goto fail_table; } for(i = 0; i < table_size; i++) { be64_to_cpus(&new_table[i]); } /* Hook up the new refcount table in the qcow2 header */ uint8_t data[12]; cpu_to_be64w((uint64_t*)data, table_offset); cpu_to_be32w((uint32_t*)(data + 8), table_clusters); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), data, sizeof(data)); if (ret < 0) { goto fail_table; } /* And switch it in memory */ uint64_t old_table_offset = s->refcount_table_offset; uint64_t old_table_size = s->refcount_table_size; g_free(s->refcount_table); s->refcount_table = new_table; s->refcount_table_size = table_size; s->refcount_table_offset = table_offset; /* Free old table. Remember, we must not change free_cluster_index */ uint64_t old_free_cluster_index = s->free_cluster_index; qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); s->free_cluster_index = old_free_cluster_index; ret = load_refcount_block(bs, new_block, (void**) refcount_block); if (ret < 0) { return ret; } return 0; fail_table: g_free(new_table); fail_block: if (*refcount_block != NULL) { qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); } return ret; }
1
[ "CWE-190" ]
qemu
b106ad9185f35fc4ad669555ad0e79e276083bd7
96,331,514,894,509,600,000,000,000,000,000,000,000
267
qcow2: Don't rely on free_cluster_index in alloc_refcount_block() (CVE-2014-0147) free_cluster_index is only correct if update_refcount() was called from an allocation function, and even there it's brittle because it's used to protect unfinished allocations which still have a refcount of 0 - if it moves in the wrong place, the unfinished allocation can be corrupted. So not using it any more seems to be a good idea. Instead, use the first requested cluster to do the calculations. Return -EAGAIN if unfinished allocations could become invalid and let the caller restart its search for some free clusters. The context of creating a snapsnot is one situation where update_refcount() is called outside of a cluster allocation. For this case, the change fixes a buffer overflow if a cluster is referenced in an L2 table that cannot be represented by an existing refcount block. (new_table[refcount_table_index] was out of bounds) [Bump the qemu-iotests 026 refblock_alloc.write leak count from 10 to 11. --Stefan] Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
longlong Item_func_sleep::val_int() { THD *thd= current_thd; Interruptible_wait timed_cond(thd); mysql_cond_t cond; double timeout; int error; DBUG_ASSERT(fixed == 1); timeout= args[0]->val_real(); /* On 64-bit OSX mysql_cond_timedwait() waits forever if passed abstime time has already been exceeded by the system time. When given a very short timeout (< 10 mcs) just return immediately. We assume that the lines between this test and the call to mysql_cond_timedwait() will be executed in less than 0.00001 sec. */ if (timeout < 0.00001) return 0; timed_cond.set_timeout((ulonglong) (timeout * 1000000000.0)); mysql_cond_init(key_item_func_sleep_cond, &cond, NULL); mysql_mutex_lock(&LOCK_item_func_sleep); THD_STAGE_INFO(thd, stage_user_sleep); thd->mysys_var->current_mutex= &LOCK_item_func_sleep; thd->mysys_var->current_cond= &cond; error= 0; thd_wait_begin(thd, THD_WAIT_SLEEP); while (!thd->killed) { error= timed_cond.wait(&cond, &LOCK_item_func_sleep); if (error == ETIMEDOUT || error == ETIME) break; error= 0; } thd_wait_end(thd); mysql_mutex_unlock(&LOCK_item_func_sleep); mysql_mutex_lock(&thd->mysys_var->mutex); thd->mysys_var->current_mutex= 0; thd->mysys_var->current_cond= 0; mysql_mutex_unlock(&thd->mysys_var->mutex); mysql_cond_destroy(&cond); DBUG_EXECUTE_IF("sleep_inject_query_done_debug_sync", { debug_sync_set_action (thd, STRING_WITH_LEN("dispatch_command_end SIGNAL query_done")); };); return MY_TEST(!error); // Return 1 killed }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
114,012,906,243,143,770,000,000,000,000,000,000,000
57
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
static int selinux_secmark_relabel_packet(u32 sid) { const struct task_security_struct *__tsec; u32 tsid; __tsec = selinux_cred(current_cred()); tsid = __tsec->sid; return avc_has_perm(&selinux_state, tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL); }
0
[ "CWE-349" ]
linux
fb73974172ffaaf57a7c42f35424d9aece1a5af6
277,466,080,250,657,900,000,000,000,000,000,000,000
12
selinux: properly handle multiple messages in selinux_netlink_send() Fix the SELinux netlink_send hook to properly handle multiple netlink messages in a single sk_buff; each message is parsed and subject to SELinux access control. Prior to this patch, SELinux only inspected the first message in the sk_buff. Cc: [email protected] Reported-by: Dmitry Vyukov <[email protected]> Reviewed-by: Stephen Smalley <[email protected]> Signed-off-by: Paul Moore <[email protected]>
dissect_kafka_txn_offset_commit_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset, kafka_api_version_t api_version) { proto_item *subti; proto_tree *subtree; offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, api_version >= 3, NULL, NULL); offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset, api_version >= 3, NULL, NULL); proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN); offset += 8; proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; if (api_version >= 3) { proto_tree_add_item(tree, hf_kafka_generation_id, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; } if (api_version >= 3) { offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset, 1,NULL, NULL); } if (api_version >= 3) { offset = dissect_kafka_string(tree, hf_kafka_consumer_group_instance, tvb, pinfo, offset, 1,NULL, NULL); } subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topics, &subti, "Topics"); offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version >= 3, api_version, &dissect_kafka_txn_offset_commit_request_topic, NULL); proto_item_set_end(subti, tvb, offset); if (api_version >= 3) { offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0); } return offset; }
0
[ "CWE-401" ]
wireshark
f4374967bbf9c12746b8ec3cd54dddada9dd353e
162,925,838,333,484,630,000,000,000,000,000,000,000
42
Kafka: Limit our decompression size. Don't assume that the Internet has our best interests at heart when it gives us the size of our decompression buffer. Assign an arbitrary limit of 50 MB. This fixes #16739 in that it takes care of ** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start" which is different from the original error output. It looks like *that* might have taken care of in one of the other recent Kafka bug fixes. The decompression routines return a success or failure status. Use gbooleans instead of ints for that.
processDSNSResponse(struct module_qstate* qstate, int id, struct module_qstate* forq) { struct iter_qstate* foriq = (struct iter_qstate*)forq->minfo[id]; /* if the finished (iq->response) query has no NS set: continue * up to look for the right dp; nothing to change, do DPNSstate */ if(qstate->return_rcode != LDNS_RCODE_NOERROR) return; /* seek further */ /* find the NS RRset (without allowing CNAMEs) */ if(!reply_find_rrset(qstate->return_msg->rep, qstate->qinfo.qname, qstate->qinfo.qname_len, LDNS_RR_TYPE_NS, qstate->qinfo.qclass)){ return; /* seek further */ } /* else, store as DP and continue at querytargets */ foriq->state = QUERYTARGETS_STATE; foriq->dp = delegpt_from_message(qstate->return_msg, forq->region); if(!foriq->dp) { log_err("out of memory in dsns dp alloc"); errinf(qstate, "malloc failure, in DS search"); return; /* dp==NULL in QUERYTARGETS makes SERVFAIL */ } /* success, go query the querytargets in the new dp (and go down) */ }
0
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
62,923,338,559,114,280,000,000,000,000,000,000,000
26
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
ga_concat_shorten_esc(garray_T *gap, char_u *str) { char_u *p; char_u *s; int c; int clen; char_u buf[NUMBUFLEN]; int same_len; if (str == NULL) { ga_concat(gap, (char_u *)"NULL"); return; } for (p = str; *p != NUL; ++p) { same_len = 1; s = p; c = mb_cptr2char_adv(&s); clen = s - p; while (*s != NUL && c == mb_ptr2char(s)) { ++same_len; s += clen; } if (same_len > 20) { ga_concat(gap, (char_u *)"\\["); ga_concat_esc(gap, p, clen); ga_concat(gap, (char_u *)" occurs "); vim_snprintf((char *)buf, NUMBUFLEN, "%d", same_len); ga_concat(gap, buf); ga_concat(gap, (char_u *)" times]"); p = s - 1; } else ga_concat_esc(gap, p, clen); } }
0
[ "CWE-121", "CWE-787" ]
vim
34f8117dec685ace52cd9e578e2729db278163fc
255,743,422,040,651,240,000,000,000,000,000,000,000
40
patch 8.2.4397: crash when using many composing characters in error message Problem: Crash when using many composing characters in error message. Solution: Use mb_cptr2char_adv() instead of mb_ptr2char_adv().
int unit_load_related_unit(Unit *u, const char *type, Unit **_found) { _cleanup_free_ char *t = NULL; int r; assert(u); assert(type); assert(_found); r = unit_name_change_suffix(u->id, type, &t); if (r < 0) return r; if (unit_has_name(u, t)) return -EINVAL; r = manager_load_unit(u->manager, t, NULL, NULL, _found); assert(r < 0 || *_found != u); return r; }
0
[ "CWE-269" ]
systemd
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
312,496,528,522,476,900,000,000,000,000,000,000,000
18
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service Let's be safe, rather than sorry. This way DynamicUser=yes services can neither take benefit of, nor create SUID/SGID binaries. Given that DynamicUser= is a recent addition only we should be able to get away with turning this on, even though this is strictly speaking a binary compatibility breakage.
SAPI_API int sapi_header_op(sapi_header_op_enum op, void *arg TSRMLS_DC) { sapi_header_struct sapi_header; char *colon_offset; char *header_line; uint header_line_len; int http_response_code; if (SG(headers_sent) && !SG(request_info).no_headers) { const char *output_start_filename = php_output_get_start_filename(TSRMLS_C); int output_start_lineno = php_output_get_start_lineno(TSRMLS_C); if (output_start_filename) { sapi_module.sapi_error(E_WARNING, "Cannot modify header information - headers already sent by (output started at %s:%d)", output_start_filename, output_start_lineno); } else { sapi_module.sapi_error(E_WARNING, "Cannot modify header information - headers already sent"); } return FAILURE; } switch (op) { case SAPI_HEADER_SET_STATUS: sapi_update_response_code((int)(zend_intptr_t) arg TSRMLS_CC); return SUCCESS; case SAPI_HEADER_ADD: case SAPI_HEADER_REPLACE: case SAPI_HEADER_DELETE: { sapi_header_line *p = arg; if (!p->line || !p->line_len) { return FAILURE; } header_line = p->line; header_line_len = p->line_len; http_response_code = p->response_code; break; } case SAPI_HEADER_DELETE_ALL: if (sapi_module.header_handler) { sapi_module.header_handler(&sapi_header, op, &SG(sapi_headers) TSRMLS_CC); } zend_llist_clean(&SG(sapi_headers).headers); return SUCCESS; default: return FAILURE; } header_line = estrndup(header_line, header_line_len); /* cut off trailing spaces, linefeeds and carriage-returns */ if (header_line_len && isspace(header_line[header_line_len-1])) { do { header_line_len--; } while(header_line_len && isspace(header_line[header_line_len-1])); header_line[header_line_len]='\0'; } if (op == SAPI_HEADER_DELETE) { if (strchr(header_line, ':')) { efree(header_line); sapi_module.sapi_error(E_WARNING, "Header to delete may not contain colon."); return FAILURE; } if (sapi_module.header_handler) { sapi_header.header = header_line; sapi_header.header_len = header_line_len; sapi_module.header_handler(&sapi_header, op, &SG(sapi_headers) TSRMLS_CC); } sapi_remove_header(&SG(sapi_headers).headers, header_line, header_line_len); efree(header_line); return SUCCESS; } else { /* new line/NUL character safety check */ int i; for (i = 0; i < header_line_len; i++) { /* RFC 7230 ch. 3.2.4 deprecates folding support */ if (header_line[i] == '\n' || header_line[i] == '\r') { efree(header_line); sapi_module.sapi_error(E_WARNING, "Header may not contain " "more than a single header, new line detected"); return FAILURE; } if (header_line[i] == '\0') { efree(header_line); sapi_module.sapi_error(E_WARNING, "Header may not contain NUL bytes"); return FAILURE; } } } sapi_header.header = header_line; sapi_header.header_len = header_line_len; /* Check the header for a few cases that we have special support for in SAPI */ if (header_line_len>=5 && !strncasecmp(header_line, "HTTP/", 5)) { /* filter out the response code */ sapi_update_response_code(sapi_extract_response_code(header_line) TSRMLS_CC); /* sapi_update_response_code doesn't free the status line if the code didn't change */ if (SG(sapi_headers).http_status_line) { efree(SG(sapi_headers).http_status_line); } SG(sapi_headers).http_status_line = header_line; return SUCCESS; } else { colon_offset = strchr(header_line, ':'); if (colon_offset) { *colon_offset = 0; if (!STRCASECMP(header_line, "Content-Type")) { char *ptr = colon_offset+1, *mimetype = NULL, *newheader; size_t len = header_line_len - (ptr - header_line), newlen; while (*ptr == ' ') { ptr++; len--; } /* Disable possible output compression for images */ if (!strncmp(ptr, "image/", sizeof("image/")-1)) { zend_alter_ini_entry("zlib.output_compression", sizeof("zlib.output_compression"), "0", sizeof("0") - 1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } mimetype = estrdup(ptr); newlen = sapi_apply_default_charset(&mimetype, len TSRMLS_CC); if (!SG(sapi_headers).mimetype){ SG(sapi_headers).mimetype = estrdup(mimetype); } if (newlen != 0) { newlen += sizeof("Content-type: "); newheader = emalloc(newlen); PHP_STRLCPY(newheader, "Content-type: ", newlen, sizeof("Content-type: ")-1); strlcat(newheader, mimetype, newlen); sapi_header.header = newheader; sapi_header.header_len = newlen - 1; efree(header_line); } efree(mimetype); SG(sapi_headers).send_default_content_type = 0; } else if (!STRCASECMP(header_line, "Content-Length")) { /* Script is setting Content-length. The script cannot reasonably * know the size of the message body after compression, so it's best * do disable compression altogether. This contributes to making scripts * portable between setups that have and don't have zlib compression * enabled globally. See req #44164 */ zend_alter_ini_entry("zlib.output_compression", sizeof("zlib.output_compression"), "0", sizeof("0") - 1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } else if (!STRCASECMP(header_line, "Location")) { if ((SG(sapi_headers).http_response_code < 300 || SG(sapi_headers).http_response_code > 399) && SG(sapi_headers).http_response_code != 201) { /* Return a Found Redirect if one is not already specified */ if (http_response_code) { /* user specified redirect code */ sapi_update_response_code(http_response_code TSRMLS_CC); } else if (SG(request_info).proto_num > 1000 && SG(request_info).request_method && strcmp(SG(request_info).request_method, "HEAD") && strcmp(SG(request_info).request_method, "GET")) { sapi_update_response_code(303 TSRMLS_CC); } else { sapi_update_response_code(302 TSRMLS_CC); } } } else if (!STRCASECMP(header_line, "WWW-Authenticate")) { /* HTTP Authentication */ sapi_update_response_code(401 TSRMLS_CC); /* authentication-required */ } if (sapi_header.header==header_line) { *colon_offset = ':'; } } } if (http_response_code) { sapi_update_response_code(http_response_code TSRMLS_CC); } sapi_header_add_op(op, &sapi_header TSRMLS_CC); return SUCCESS; }
0
[ "CWE-601" ]
php-src
98b9dfaec95e6f910f125ed172cdbd25abd006ec
73,640,582,394,331,490,000,000,000,000,000,000,000
180
Fix for HTTP_PROXY issue. The following changes are made: - _SERVER/_ENV only has HTTP_PROXY if the local environment has it, and only one from the environment. - getenv('HTTP_PROXY') only returns one from the local environment - getenv has optional second parameter, telling it to only consider local environment
put_device(parent); } static int iscsi_is_conn_dev(const struct device *dev)
0
[ "CWE-787" ]
linux
ec98ea7070e94cc25a422ec97d1421e28d97b7ee
145,594,505,669,541,970,000,000,000,000,000,000,000
4
scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE As the iSCSI parameters are exported back through sysfs, it should be enforcing that they never are more than PAGE_SIZE (which should be more than enough) before accepting updates through netlink. Change all iSCSI sysfs attributes to use sysfs_emit(). Cc: [email protected] Reported-by: Adam Nichols <[email protected]> Reviewed-by: Lee Duncan <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Reviewed-by: Mike Christie <[email protected]> Signed-off-by: Chris Leech <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static inline size_t AllocationPolicy(size_t size) { register size_t blocksize; /* The linear distribution. */ assert(size != 0); assert(size % (4*sizeof(size_t)) == 0); if (size <= BlockThreshold) return(size/(4*sizeof(size_t))); /* Check for the largest block size. */ if (size > (size_t) (BlockThreshold*(1L << (MaxBlockExponent-1L)))) return(MaxBlocks-1L); /* Otherwise use a power of two distribution. */ blocksize=BlockThreshold/(4*sizeof(size_t)); for ( ; size > BlockThreshold; size/=2) blocksize++; assert(blocksize > (BlockThreshold/(4*sizeof(size_t)))); assert(blocksize < (MaxBlocks-1L)); return(blocksize); }
0
[ "CWE-190", "CWE-189", "CWE-703" ]
ImageMagick
0f6fc2d5bf8f500820c3dbcf0d23ee14f2d9f734
213,731,320,499,471,650,000,000,000,000,000,000,000
27
static void read_client_command(uev_t *w, void *arg, int events) { char *command, *argument; ctrl_t *ctrl = (ctrl_t *)arg; ftp_cmd_t *cmd; if (UEV_ERROR == events || UEV_HUP == events) { uev_io_start(w); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); if (recv_msg(w->fd, ctrl->buf, ctrl->bufsz, &command, &argument)) { DBG("Short read, exiting."); uev_exit(ctrl->ctx); return; } if (!string_valid(command)) return; if (string_match(command, "FF F4")) { DBG("Ignoring IAC command, client should send ABOR as well."); return; } for (cmd = &supported[0]; cmd->command; cmd++) { if (string_compare(command, cmd->command)) { cmd->cb(ctrl, argument); return; } } handle_UNKNOWN(ctrl, command); }
0
[ "CWE-120", "CWE-787" ]
uftpd
0fb2c031ce0ace07cc19cd2cb2143c4b5a63c9dd
128,683,353,084,899,500,000,000,000,000,000,000,000
37
FTP: Fix buffer overflow in PORT parser, reported by Aaron Esau Signed-off-by: Joachim Nilsson <[email protected]>
TEST(HeaderMapImplTest, MoveIntoInline) { HeaderMapImpl headers; HeaderString key; key.setCopy(Headers::get().CacheControl.get()); HeaderString value; value.setCopy("hello", 5); headers.addViaMove(std::move(key), std::move(value)); EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); EXPECT_EQ("hello", headers.CacheControl()->value().getStringView()); HeaderString key2; key2.setCopy(Headers::get().CacheControl.get().c_str(), Headers::get().CacheControl.get().size()); HeaderString value2; value2.setCopy("there", 5); headers.addViaMove(std::move(key2), std::move(value2)); EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); EXPECT_EQ("hello,there", headers.CacheControl()->value().getStringView()); }
1
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
329,821,614,915,755,500,000,000,000,000,000,000,000
18
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
zcliprestore(i_ctx_t *i_ctx_p) { return gs_cliprestore(igs); }
0
[]
ghostpdl
4f83478c88c2e05d6e8d79ca4557eb039354d2f3
320,164,629,091,156,500,000,000,000,000,000,000,000
4
Bug 697799: have .eqproc check its parameters The Ghostscript custom operator .eqproc was not check the number or type of the parameters it was given.
pixBlockconvGray(PIX *pixs, PIX *pixacc, l_int32 wc, l_int32 hc) { l_int32 w, h, d, wpl, wpla; l_uint32 *datad, *dataa; PIX *pixd, *pixt; PROCNAME("pixBlockconvGray"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, NULL); pixGetDimensions(pixs, &w, &h, &d); if (d != 8) return (PIX *)ERROR_PTR("pixs not 8 bpp", procName, NULL); if (wc < 0) wc = 0; if (hc < 0) hc = 0; if (wc == 0 && hc == 0) /* no-op */ return pixCopy(NULL, pixs); if (w < 2 * wc + 1 || h < 2 * hc + 1) { L_WARNING("kernel too large; returning a copy\n", procName); L_INFO("w = %d, wc = %d, h = %d, hc = %d\n", procName, w, wc, h, hc); return pixCopy(NULL, pixs); } if (pixacc) { if (pixGetDepth(pixacc) == 32) { pixt = pixClone(pixacc); } else { L_WARNING("pixacc not 32 bpp; making new one\n", procName); if ((pixt = pixBlockconvAccum(pixs)) == NULL) return (PIX *)ERROR_PTR("pixt not made", procName, NULL); } } else { if ((pixt = pixBlockconvAccum(pixs)) == NULL) return (PIX *)ERROR_PTR("pixt not made", procName, NULL); } if ((pixd = pixCreateTemplate(pixs)) == NULL) { pixDestroy(&pixt); return (PIX *)ERROR_PTR("pixd not made", procName, NULL); } wpl = pixGetWpl(pixs); wpla = pixGetWpl(pixt); datad = pixGetData(pixd); dataa = pixGetData(pixt); blockconvLow(datad, w, h, wpl, dataa, wpla, wc, hc); pixDestroy(&pixt); return pixd; }
0
[]
leptonica
480f5e74c24fdc2003c42a4e15d1f24c9e6ea469
249,091,133,157,588,700,000,000,000,000,000,000,000
53
Fixed issue 21972 (oss-fuzz) Divide by zero in pixBlockconvGray().
testcase_str2repo(Pool *pool, const char *str) { int repoid; Repo *repo = 0; if (str[0] == '#' && (str[1] >= '0' && str[1] <= '9')) { int j; repoid = 0; for (j = 1; str[j] >= '0' && str[j] <= '9'; j++) repoid = repoid * 10 + (str[j] - '0'); if (!str[j] && repoid > 0 && repoid < pool->nrepos) repo = pool_id2repo(pool, repoid); } if (!repo) { FOR_REPOS(repoid, repo) { int i, l; if (!repo->name) continue; l = strlen(repo->name); for (i = 0; i < l; i++) { int c = repo->name[i]; if (c == ' ' || c == '\t') c = '_'; if (c != str[i]) break; } if (i == l && !str[l]) break; } if (repoid >= pool->nrepos) repo = 0; } return repo; }
0
[ "CWE-120" ]
libsolv
0077ef29eb46d2e1df2f230fc95a1d9748d49dec
209,927,191,045,213,400,000,000,000,000,000,000,000
37
testcase_read: error out if repos are added or the system is changed too late We must not add new solvables after the considered map was created, the solver was created, or jobs were added. We may not changed the system after jobs have been added. (Jobs may point inside the whatproviedes array, so we must not invalidate this area.)
void QPaintEngineEx::stroke(const QVectorPath &path, const QPen &inPen) { #ifdef QT_DEBUG_DRAW qDebug() << "QPaintEngineEx::stroke()" << pen; #endif Q_D(QPaintEngineEx); if (path.isEmpty()) return; if (!d->strokeHandler) { d->strokeHandler = new StrokeHandler(path.elementCount()+4); d->stroker.setMoveToHook(qpaintengineex_moveTo); d->stroker.setLineToHook(qpaintengineex_lineTo); d->stroker.setCubicToHook(qpaintengineex_cubicTo); } QRectF clipRect; QPen pen = inPen; if (pen.style() > Qt::SolidLine) { QRectF cpRect = path.controlPointRect(); const QTransform &xf = state()->matrix; if (pen.isCosmetic()) { clipRect = d->exDeviceRect; cpRect.translate(xf.dx(), xf.dy()); } else { clipRect = xf.inverted().mapRect(QRectF(d->exDeviceRect)); } // Check to avoid generating unwieldy amount of dashes that will not be visible anyway QRectF extentRect = cpRect & clipRect; qreal extent = qMax(extentRect.width(), extentRect.height()); qreal patternLength = 0; const QList<qreal> pattern = pen.dashPattern(); const int patternSize = qMin(pattern.size(), 32); for (int i = 0; i < patternSize; i++) patternLength += qMax(pattern.at(i), qreal(0)); if (pen.widthF()) patternLength *= pen.widthF(); if (qFuzzyIsNull(patternLength)) { pen.setStyle(Qt::NoPen); } else if (qFuzzyIsNull(extent) || extent / patternLength > 10000) { // approximate stream of tiny dashes with semi-transparent solid line pen.setStyle(Qt::SolidLine); QColor color(pen.color()); color.setAlpha(color.alpha() / 2); pen.setColor(color); } } if (!qpen_fast_equals(pen, d->strokerPen)) { d->strokerPen = pen; d->stroker.setJoinStyle(pen.joinStyle()); d->stroker.setCapStyle(pen.capStyle()); d->stroker.setMiterLimit(pen.miterLimit()); qreal penWidth = pen.widthF(); if (penWidth == 0) d->stroker.setStrokeWidth(1); else d->stroker.setStrokeWidth(penWidth); Qt::PenStyle style = pen.style(); if (style == Qt::SolidLine) { d->activeStroker = &d->stroker; } else if (style == Qt::NoPen) { d->activeStroker = nullptr; } else { d->dasher.setDashPattern(pen.dashPattern()); d->dasher.setDashOffset(pen.dashOffset()); d->activeStroker = &d->dasher; } } if (!d->activeStroker) { return; } if (!clipRect.isNull()) d->activeStroker->setClipRect(clipRect); if (d->activeStroker == &d->stroker) d->stroker.setForceOpen(path.hasExplicitOpen()); const QPainterPath::ElementType *types = path.elements(); const qreal *points = path.points(); int pointCount = path.elementCount(); const qreal *lastPoint = points + (pointCount<<1); d->strokeHandler->types.reset(); d->strokeHandler->pts.reset(); // Some engines might decide to optimize for the non-shape hint later on... uint flags = QVectorPath::WindingFill; if (path.elementCount() > 2) flags |= QVectorPath::NonConvexShapeMask; if (d->stroker.capStyle() == Qt::RoundCap || d->stroker.joinStyle() == Qt::RoundJoin) flags |= QVectorPath::CurvedShapeMask; // ### Perspective Xforms are currently not supported... if (!pen.isCosmetic()) { // We include cosmetic pens in this case to avoid having to // change the current transform. Normal transformed, // non-cosmetic pens will be transformed as part of fill // later, so they are also covered here.. d->activeStroker->setCurveThresholdFromTransform(state()->matrix); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: d->activeStroker->moveTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::LineToElement: d->activeStroker->lineTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::CurveToElement: d->activeStroker->cubicTo(points[0], points[1], points[2], points[3], points[4], points[5]); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; default: break; } } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } else { d->activeStroker->moveTo(points[0], points[1]); points += 2; while (points < lastPoint) { d->activeStroker->lineTo(points[0], points[1]); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } d->activeStroker->end(); if (!d->strokeHandler->types.size()) // an empty path... return; QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); fill(strokePath, pen.brush()); } else { // For cosmetic pens we need a bit of trickery... We to process xform the input points if (state()->matrix.type() >= QTransform::TxProject) { QPainterPath painterPath = state()->matrix.map(path.convertToPainterPath()); d->activeStroker->strokePath(painterPath, d->strokeHandler, QTransform()); } else { d->activeStroker->setCurveThresholdFromTransform(QTransform()); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->moveTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::LineToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::CurveToElement: { QPointF c1 = ((const QPointF *) points)[0] * state()->matrix; QPointF c2 = ((const QPointF *) points)[1] * state()->matrix; QPointF e = ((const QPointF *) points)[2] * state()->matrix; d->activeStroker->cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; } default: break; } } if (path.hasImplicitClose()) { QPointF pt = * ((const QPointF *) path.points()) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); } } else { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->moveTo(p.x(), p.y()); points += 2; while (points < lastPoint) { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->lineTo(p.x(), p.y()); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(p.x(), p.y()); } d->activeStroker->end(); } QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); QTransform xform = state()->matrix; state()->matrix = QTransform(); transformChanged(); QBrush brush = pen.brush(); if (qbrush_style(brush) != Qt::SolidPattern) brush.setTransform(brush.transform() * xform); fill(strokePath, brush); state()->matrix = xform; transformChanged(); } }
0
[ "CWE-787" ]
qtbase
6b400e3147dcfd8cc3a393ace1bd118c93762e0c
208,992,809,461,842,860,000,000,000,000,000,000,000
235
Improve fix for avoiding huge number of tiny dashes Some pathological cases were not caught by the previous fix. Fixes: QTBUG-95239 Pick-to: 6.2 6.1 5.15 Change-Id: I0337ee3923ff93ccb36c4d7b810a9c0667354cc5 Reviewed-by: Robert Löhning <[email protected]>
static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); }
0
[ "CWE-190", "CWE-189" ]
linux
fdc81f45e9f57858da6351836507fbcf1b7583ee
120,263,391,180,876,470,000,000,000,000,000,000,000
4
sg_start_req(): use import_iovec() Signed-off-by: Al Viro <[email protected]>
GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { char *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_media_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size-1)*sizeof(char)); no_emulation_buf_size = avc_remove_emulation_bytes(slc->data+1, no_emulation_buf, slc->size-1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset>=8); while (bit_offset-8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } /*check VUI*/ flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, 1, 1); /*vui_parameters_present_flag*/ if (flag) { /*aspect_ratio_info_present_flag*/ if (gf_bs_read_int(orig, 1)) { s32 aspect_ratio_idc = gf_bs_read_int(orig, 8); if (aspect_ratio_idc == 255) { gf_bs_read_int(orig, 16); /*AR num*/ gf_bs_read_int(orig, 16); /*AR den*/ } } } if ((ar_d<0) || (ar_n<0)) { /*no AR signaled*/ gf_bs_write_int(mod, 0, 1); } else { u32 sarx; gf_bs_write_int(mod, 1, 1); sarx = avc_get_sar_idx((u32) ar_n, (u32) ar_d); gf_bs_write_int(mod, sarx, 8); if (sarx==0xFF) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } } /*no VUI in input bitstream, set all vui flags to 0*/ if (!flag) { gf_bs_write_int(mod, 0, 1); /*overscan_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*video_signal_type_present_flag */ gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, (char **) &no_emulation_buf, &flag); emulation_bytes = avc_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = avc_add_emulation_bytes(no_emulation_buf, slc->data+1, flag)+1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; }
0
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
278,826,953,995,335,100,000,000,000,000,000,000,000
98
fix some exploitable overflows (#994, #997)
int mnt_match_options(const char *optstr, const char *pattern) { char *name, *pat = (char *) pattern; char *buf, *patval; size_t namesz = 0, patvalsz = 0; int match = 1; if (!pattern && !optstr) return 1; if (!pattern) return 0; buf = malloc(strlen(pattern) + 1); if (!buf) return 0; /* walk on pattern string */ while (match && !mnt_optstr_next_option(&pat, &name, &namesz, &patval, &patvalsz)) { char *val; size_t sz; int no = 0, rc; if (*name == '+') name++, namesz--; else if ((no = (startswith(name, "no") != NULL))) name += 2, namesz -= 2; xstrncpy(buf, name, namesz + 1); rc = mnt_optstr_get_option(optstr, buf, &val, &sz); /* check also value (if the pattern is "foo=value") */ if (rc == 0 && patvalsz > 0 && (patvalsz != sz || strncmp(patval, val, sz) != 0)) rc = 1; switch (rc) { case 0: /* found */ match = no == 0 ? 1 : 0; break; case 1: /* not found */ match = no == 1 ? 1 : 0; break; default: /* parse error */ match = 0; break; } } free(buf); return match; }
0
[ "CWE-552", "CWE-703" ]
util-linux
57202f5713afa2af20ffbb6ab5331481d0396f8d
33,899,877,013,615,016,000,000,000,000,000,000,000
55
libmount: fix UID check for FUSE umount [CVE-2021-3995] Improper UID check allows an unprivileged user to unmount FUSE filesystems of users with similar UID. Signed-off-by: Karel Zak <[email protected]>
NDIS_STATUS ParaNdis6_SendPauseRestart( PARANDIS_ADAPTER *pContext, BOOLEAN bPause, ONPAUSECOMPLETEPROC Callback ) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; DEBUG_ENTRY(4); if (bPause) { ParaNdis_DebugHistory(pContext, hopInternalSendPause, NULL, 1, 0, 0); if (pContext->SendState == srsEnabled) { { CNdisPassiveWriteAutoLock tLock(pContext->m_PauseLock); pContext->SendState = srsPausing; pContext->SendPauseCompletionProc = Callback; } for (UINT i = 0; i < pContext->nPathBundles; i++) { if (!pContext->pPathBundles[i].txPath.Pause()) { status = NDIS_STATUS_PENDING; } } if (status == NDIS_STATUS_SUCCESS) { pContext->SendState = srsDisabled; } } if (status == NDIS_STATUS_SUCCESS) { ParaNdis_DebugHistory(pContext, hopInternalSendPause, NULL, 0, 0, 0); } } else { pContext->SendState = srsEnabled; ParaNdis_DebugHistory(pContext, hopInternalSendResume, NULL, 0, 0, 0); } return status; }
0
[ "CWE-20" ]
kvm-guest-drivers-windows
723416fa4210b7464b28eab89cc76252e6193ac1
243,941,848,682,634,500,000,000,000,000,000,000,000
45
NetKVM: BZ#1169718: Checking the length only on read Signed-off-by: Joseph Hindin <[email protected]>
QStringList JlCompress::extractDir(QString fileCompressed, QString dir) { // Apro lo zip QuaZip zip(fileCompressed); return extractDir(zip, dir); }
0
[ "CWE-22" ]
quazip
5d2fc16a1976e5bf78d2927b012f67a2ae047a98
128,032,481,982,514,340,000,000,000,000,000,000,000
5
Fixed the Zip Slip vulnerability in JlCompress When extracting a file with a dangerous path like "../evil.exe" from a ZIP archive with JlCompress::extractDir(), the target file would be created outside of the target directory, potentially even overwriting an existing file there.
void LibRaw::raw2image_start() { // restore color,sizes and internal data into raw_image fields memmove(&imgdata.color,&imgdata.rawdata.color,sizeof(imgdata.color)); memmove(&imgdata.sizes,&imgdata.rawdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.idata,&imgdata.rawdata.iparams,sizeof(imgdata.idata)); memmove(&libraw_internal_data.internal_output_params,&imgdata.rawdata.ioparams,sizeof(libraw_internal_data.internal_output_params)); if (O.user_flip >= 0) S.flip = O.user_flip; switch ((S.flip+3600) % 360) { case 270: S.flip = 5; break; case 180: S.flip = 3; break; case 90: S.flip = 6; break; } // adjust for half mode! IO.shrink = P1.filters && (O.half_size || ((O.threshold || O.aber[0] != 1 || O.aber[2] != 1) )); S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; }
0
[ "CWE-129" ]
LibRaw
89d065424f09b788f443734d44857289489ca9e2
232,439,219,222,036,220,000,000,000,000,000,000,000
26
fixed two more problems found by fuzzer
inline unsigned int prand(const double z) { cimg::mutex(4); const unsigned int res = cimg::prand(z,&cimg::rng()); cimg::mutex(4,0); return res; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
300,149,867,907,741,780,000,000,000,000,000,000,000
6
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
bool AbstractSqlMigrationReader::migrateTo(AbstractSqlMigrationWriter *writer) { if (!transaction()) { qWarning() << "AbstractSqlMigrationReader::migrateTo(): unable to start reader's transaction!"; return false; } if (!writer->transaction()) { qWarning() << "AbstractSqlMigrationReader::migrateTo(): unable to start writer's transaction!"; rollback(); // close the reader transaction; return false; } _writer = writer; // due to the incompatibility across Migration objects we can't run this in a loop... :/ QuasselUserMO quasselUserMo; if (!transferMo(QuasselUser, quasselUserMo)) return false; IdentityMO identityMo; if (!transferMo(Identity, identityMo)) return false; IdentityNickMO identityNickMo; if (!transferMo(IdentityNick, identityNickMo)) return false; NetworkMO networkMo; if (!transferMo(Network, networkMo)) return false; BufferMO bufferMo; if (!transferMo(Buffer, bufferMo)) return false; SenderMO senderMo; if (!transferMo(Sender, senderMo)) return false; BacklogMO backlogMo; if (!transferMo(Backlog, backlogMo)) return false; IrcServerMO ircServerMo; if (!transferMo(IrcServer, ircServerMo)) return false; UserSettingMO userSettingMo; if (!transferMo(UserSetting, userSettingMo)) return false; if (!_writer->postProcess()) abortMigration(); return finalizeMigration(); }
0
[ "CWE-89" ]
quassel
aa1008be162cb27da938cce93ba533f54d228869
188,984,235,160,416,460,000,000,000,000,000,000,000
55
Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL. Properly detects whether Qt performs slash escaping in SQL queries or not, and then configures PostgreSQL accordingly. This bug was a introduced due to a bugfix in Qt 4.8.5 disables slash escaping when binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076 Thanks to brot and Tucos. [Fixes #1244]
uint32_t CompactProtocolWriter::writeMapBegin( const TType keyType, TType valType, uint32_t size) { uint32_t wsize = 0; if (size == 0) { wsize += writeByte(0); } else { wsize += apache::thrift::util::writeVarint(out_, size); wsize += writeByte( detail::compact::TTypeToCType[keyType] << 4 | detail::compact::TTypeToCType[valType]); } return wsize; }
0
[ "CWE-703", "CWE-770" ]
fbthrift
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
252,737,648,357,731,000,000,000,000,000,000,000,000
16
Better handling of truncated data when reading strings Summary: Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB). This diff changes the logic to check if we have enough data in the buffer before allocating the string. This is a second part of a fix for CVE-2019-3553. Reviewed By: vitaut Differential Revision: D14393393 fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
_copyDropRoleStmt(const DropRoleStmt *from) { DropRoleStmt *newnode = makeNode(DropRoleStmt); COPY_NODE_FIELD(roles); COPY_SCALAR_FIELD(missing_ok); return newnode; }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
305,596,317,715,488,160,000,000,000,000,000,000,000
9
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
StringVal decrypt(FunctionContext* ctx, const StringVal& src, const StringVal& key, const StringVal& iv, EncryptionMode mode) { if (src.len == 0 || src.is_null) { return StringVal::null(); } int cipher_len = src.len; std::unique_ptr<char[]> p; p.reset(new char[cipher_len]); int ret_code = 0; if (mode != AES_128_ECB && mode != AES_192_ECB && mode != AES_256_ECB && mode != AES_256_ECB && mode != SM4_128_ECB) { if (iv.len == 0 || iv.is_null) { return StringVal::null(); } int iv_len = 32; // max key length 256 / 8 std::unique_ptr<char[]> init_vec; init_vec.reset(new char[iv_len]); std::memset(init_vec.get(), 0, iv.len + 1); memcpy(init_vec.get(), iv.ptr, iv.len); ret_code = EncryptionUtil::decrypt( mode, (unsigned char*)src.ptr, src.len, (unsigned char*)key.ptr, key.len, (unsigned char*)init_vec.get(), true, (unsigned char*)p.get()); } else { ret_code = EncryptionUtil::decrypt(mode, (unsigned char*)src.ptr, src.len, (unsigned char*)key.ptr, key.len, nullptr, true, (unsigned char*)p.get()); } if (ret_code < 0) { return StringVal::null(); } return AnyValUtil::from_buffer_temp(ctx, p.get(), ret_code); }
1
[ "CWE-200" ]
incubator-doris
246ac4e37aa4da6836b7850cb990f02d1c3725a3
230,954,700,657,740,430,000,000,000,000,000,000,000
32
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; struct netfront_info *np = netdev_priv(dev); struct bpf_prog *old_prog; unsigned int i, err; if (dev->mtu > max_mtu) { netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu); return -EINVAL; } if (!np->netback_has_xdp_headroom) return 0; xenbus_switch_state(np->xbdev, XenbusStateReconfiguring); err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE : NETBACK_XDP_HEADROOM_DISABLE); if (err) return err; /* avoid the race with XDP headroom adjustment */ wait_event(module_wq, xenbus_read_driver_state(np->xbdev->otherend) == XenbusStateReconfigured); np->netfront_xdp_enabled = true; old_prog = rtnl_dereference(np->queues[0].xdp_prog); if (prog) bpf_prog_add(prog, dev->real_num_tx_queues); for (i = 0; i < dev->real_num_tx_queues; ++i) rcu_assign_pointer(np->queues[i].xdp_prog, prog); if (old_prog) for (i = 0; i < dev->real_num_tx_queues; ++i) bpf_prog_put(old_prog); xenbus_switch_state(np->xbdev, XenbusStateConnected); return 0; }
0
[]
linux
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
160,285,555,620,056,060,000,000,000,000,000,000,000
45
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() The commit referenced below moved the invocation past the "next" label, without any explanation. In fact this allows misbehaving backends undue control over the domain the frontend runs in, as earlier detected errors require the skb to not be freed (it may be retained for later processing via xennet_move_rx_slot(), or it may simply be unsafe to have it freed). This is CVE-2022-33743 / XSA-405. Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront") Signed-off-by: Jan Beulich <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
int git_tree_entry_dup(git_tree_entry **dest, const git_tree_entry *source) { size_t total_size; git_tree_entry *copy; assert(source); total_size = sizeof(git_tree_entry) + source->filename_len + 1; copy = git__malloc(total_size); GITERR_CHECK_ALLOC(copy); memcpy(copy, source, total_size); *dest = copy; return 0; }
0
[ "CWE-20" ]
libgit2
928429c5c96a701bcbcafacb2421a82602b36915
212,791,120,385,077,100,000,000,000,000,000,000,000
17
tree: Check for `.git` with case insensitivy
static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb, ext4_group_t group) { return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group);
0
[ "CWE-787" ]
linux
c37e9e013469521d9adb932d17a1795c139b36db
140,269,723,221,269,960,000,000,000,000,000,000,000
5
ext4: add more inode number paranoia checks If there is a directory entry pointing to a system inode (such as a journal inode), complain and declare the file system to be corrupted. Also, if the superblock's first inode number field is too small, refuse to mount the file system. This addresses CVE-2018-10882. https://bugzilla.kernel.org/show_bug.cgi?id=200069 Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
mailimf_from_parse(const char * message, size_t length, size_t * indx, struct mailimf_from ** result) { struct mailimf_mailbox_list * mb_list; struct mailimf_from * from; size_t cur_token; int r; int res; cur_token = * indx; r = mailimf_token_case_insensitive_parse(message, length, &cur_token, "From"); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_mailbox_list_parse(message, length, &cur_token, &mb_list); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_unstrict_crlf_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_mb_list; } from = mailimf_from_new(mb_list); if (from == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_mb_list; } * result = from; * indx = cur_token; return MAILIMF_NO_ERROR; free_mb_list: mailimf_mailbox_list_free(mb_list); err: return res; }
0
[ "CWE-476" ]
libetpan
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
87,159,097,528,908,480,000,000,000,000,000,000,000
53
Fixed crash #274
FunctionContext::FunctionContext() : _impl(new doris::FunctionContextImpl(this)) {}
0
[ "CWE-200" ]
incubator-doris
246ac4e37aa4da6836b7850cb990f02d1c3725a3
237,755,216,591,800,730,000,000,000,000,000,000,000
1
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
bool check_allow_all_hosts() { return (!hostname || (hostname[0] == wild_many && !hostname[1])); }
0
[]
mysql-server
25d1b7e03b9b375a243fabdf0556c063c7282361
57,916,573,559,514,850,000,000,000,000,000,000,000
5
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
_validate_summary_for_collection_id (GVariant *summary_v, const char *collection_id, GError **error) { VarSummaryRef summary; summary = var_summary_from_gvariant (summary_v); if (!flatpak_summary_find_ref_map (summary, collection_id, NULL)) return flatpak_fail_error (error, FLATPAK_ERROR_INVALID_DATA, _("Configured collection ID ‘%s’ not in summary file"), collection_id); return TRUE; }
0
[ "CWE-74" ]
flatpak
fb473cad801c6b61706353256cab32330557374a
277,208,714,149,904,300,000,000,000,000,000,000,000
13
dir: Pass environment via bwrap --setenv when running apply_extra This means we can systematically pass the environment variables through bwrap(1), even if it is setuid and thus is filtering out security-sensitive environment variables. bwrap ends up being run with an empty environment instead. As with the previous commit, this regressed while fixing CVE-2021-21261. Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments" Signed-off-by: Simon McVittie <[email protected]>
boot_defclass(mrb_state *mrb, struct RClass *super) { struct RClass *c; c = MRB_OBJ_ALLOC(mrb, MRB_TT_CLASS, mrb->class_class); if (super) { c->super = super; mrb_field_write_barrier(mrb, (struct RBasic*)c, (struct RBasic*)super); c->flags |= MRB_FL_CLASS_IS_INHERITED; } else { c->super = mrb->object_class; } c->mt = mt_new(mrb); return c; }
0
[ "CWE-787" ]
mruby
b1d0296a937fe278239bdfac840a3fd0e93b3ee9
310,957,737,911,344,000,000,000,000,000,000,000,000
16
class.c: clear method cache after `remove_method`.
void CiffDirectory::doAddComponent(UniquePtr component) { components_.push_back(component.release()); }
0
[ "CWE-125" ]
exiv2
9628f82084ed30d494ddd4f7360d233801e22967
96,679,817,377,721,400,000,000,000,000,000,000,000
4
Avoid integer overflow. (cherry picked from commit c0ecc2ae36f34462be98623deb85ba1747ae2175)
rfbClient* rfbGetClient(int bitsPerSample,int samplesPerPixel, int bytesPerPixel) { #ifdef WIN32 WSADATA unused; #endif rfbClient* client=(rfbClient*)calloc(sizeof(rfbClient),1); if(!client) { rfbClientErr("Couldn't allocate client structure!\n"); return NULL; } #ifdef WIN32 if((errno = WSAStartup(MAKEWORD(2,0), &unused)) != 0) { rfbClientErr("Could not init Windows Sockets: %s\n", strerror(errno)); return NULL; } #endif initAppData(&client->appData); client->endianTest = 1; client->programName=""; client->serverHost=strdup(""); client->serverPort=5900; client->destHost = NULL; client->destPort = 5900; client->connectTimeout = DEFAULT_CONNECT_TIMEOUT; client->readTimeout = DEFAULT_READ_TIMEOUT; /* default: use complete frame buffer */ client->updateRect.x = -1; client->frameBuffer = NULL; client->outputWindow = 0; client->format.bitsPerPixel = bytesPerPixel*8; client->format.depth = bitsPerSample*samplesPerPixel; client->appData.requestedDepth=client->format.depth; client->format.bigEndian = *(char *)&client->endianTest?FALSE:TRUE; client->format.trueColour = 1; if (client->format.bitsPerPixel == 8) { client->format.redMax = 7; client->format.greenMax = 7; client->format.blueMax = 3; client->format.redShift = 0; client->format.greenShift = 3; client->format.blueShift = 6; } else { client->format.redMax = (1 << bitsPerSample) - 1; client->format.greenMax = (1 << bitsPerSample) - 1; client->format.blueMax = (1 << bitsPerSample) - 1; if(!client->format.bigEndian) { client->format.redShift = 0; client->format.greenShift = bitsPerSample; client->format.blueShift = bitsPerSample * 2; } else { if(client->format.bitsPerPixel==8*3) { client->format.redShift = bitsPerSample*2; client->format.greenShift = bitsPerSample*1; client->format.blueShift = 0; } else { client->format.redShift = bitsPerSample*3; client->format.greenShift = bitsPerSample*2; client->format.blueShift = bitsPerSample; } } } client->bufoutptr=client->buf; client->buffered=0; #ifdef LIBVNCSERVER_HAVE_LIBZ client->raw_buffer_size = -1; client->decompStreamInited = FALSE; #ifdef LIBVNCSERVER_HAVE_LIBJPEG memset(client->zlibStreamActive,0,sizeof(rfbBool)*4); #endif #endif client->HandleCursorPos = DummyPoint; client->SoftCursorLockArea = DummyRect; client->SoftCursorUnlockScreen = Dummy; client->GotFrameBufferUpdate = DummyRect; client->GotCopyRect = CopyRectangleFromRectangle; client->GotFillRect = FillRectangle; client->GotBitmap = CopyRectangle; client->FinishedFrameBufferUpdate = NULL; client->GetPassword = ReadPassword; client->MallocFrameBuffer = MallocFrameBuffer; client->Bell = Dummy; client->CurrentKeyboardLedState = 0; client->HandleKeyboardLedState = (HandleKeyboardLedStateProc)DummyPoint; client->QoS_DSCP = 0; client->authScheme = 0; client->subAuthScheme = 0; client->GetCredential = NULL; client->tlsSession = NULL; client->LockWriteToTLS = NULL; client->UnlockWriteToTLS = NULL; client->sock = RFB_INVALID_SOCKET; client->listenSock = RFB_INVALID_SOCKET; client->listenAddress = NULL; client->listen6Sock = RFB_INVALID_SOCKET; client->listen6Address = NULL; client->clientAuthSchemes = NULL; #ifdef LIBVNCSERVER_HAVE_SASL client->GetSASLMechanism = NULL; client->GetUser = NULL; client->saslSecret = NULL; #endif /* LIBVNCSERVER_HAVE_SASL */ return client; }
0
[ "CWE-400", "CWE-703" ]
libvncserver
bef41f6ec4097a8ee094f90a1b34a708fbd757ec
105,036,695,854,350,470,000,000,000,000,000,000,000
116
libvncclient: free vncRec memory in rfbClientCleanup() Otherwise we leak memory. Spotted by Ramin Farajpour Cami <[email protected]>, thanks!