func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
*/ void xmlXPathSubstringFunction(xmlXPathParserContextPtr ctxt, int nargs) { xmlXPathObjectPtr str, start, len; double le=0, in; int i, l, m; xmlChar *ret; if (nargs < 2) { CHECK_ARITY(2); } if (nargs > 3) { CHECK_ARITY(3); } /* * take care of possible last (position) argument */ if (nargs == 3) { CAST_TO_NUMBER; CHECK_TYPE(XPATH_NUMBER); len = valuePop(ctxt); le = len->floatval; xmlXPathReleaseObject(ctxt->context, len); } CAST_TO_NUMBER; CHECK_TYPE(XPATH_NUMBER); start = valuePop(ctxt); in = start->floatval; xmlXPathReleaseObject(ctxt->context, start); CAST_TO_STRING; CHECK_TYPE(XPATH_STRING); str = valuePop(ctxt); m = xmlUTF8Strlen((const unsigned char *)str->stringval); /* * If last pos not present, calculate last position */ if (nargs != 3) { le = (double)m; if (in < 1.0) in = 1.0; } /* Need to check for the special cases where either * the index is NaN, the length is NaN, or both * arguments are infinity (relying on Inf + -Inf = NaN) */ if (!xmlXPathIsInf(in) && !xmlXPathIsNaN(in + le)) { /* * To meet the requirements of the spec, the arguments * must be converted to integer format before * initial index calculations are done * * First we go to integer form, rounding up * and checking for special cases */ i = (int) in; if (((double)i)+0.5 <= in) i++; if (xmlXPathIsInf(le) == 1) { l = m; if (i < 1) i = 1; } else if (xmlXPathIsInf(le) == -1 || le < 0.0) l = 0; else { l = (int) le; if (((double)l)+0.5 <= le) l++; } /* Now we normalize inidices */ i -= 1; l += i; if (i < 0) i = 0; if (l > m) l = m; /* number of chars to copy */ l -= i; ret = xmlUTF8Strsub(str->stringval, i, l); } else { ret = NULL; } if (ret == NULL) valuePush(ctxt, xmlXPathCacheNewCString(ctxt->context, "")); else { valuePush(ctxt, xmlXPathCacheNewString(ctxt->context, ret)); xmlFree(ret); }
0
[ "CWE-119" ]
libxml2
91d19754d46acd4a639a8b9e31f50f31c78f8c9c
327,418,580,722,386,530,000,000,000,000,000,000,000
94
Fix the semantic of XPath axis for namespace/attribute context nodes The processing of namespace and attributes nodes was not compliant to the XPath-1.0 specification
void writeComment(const char* comment, bool eol_comment) { FStructData& current_struct = fs->getCurrentStruct(); int len; int multiline; const char* eol; char* ptr; if( !comment ) CV_Error( CV_StsNullPtr, "Null comment" ); if( strstr(comment, "--") != 0 ) CV_Error( CV_StsBadArg, "Double hyphen \'--\' is not allowed in the comments" ); len = (int)strlen(comment); eol = strchr(comment, '\n'); multiline = eol != 0; ptr = fs->bufferPtr(); if( multiline || !eol_comment || fs->bufferEnd() - ptr < len + 5 ) ptr = fs->flush(); else if( ptr > fs->bufferStart() + current_struct.indent ) *ptr++ = ' '; if( !multiline ) { ptr = fs->resizeWriteBuffer( ptr, len + 9 ); sprintf( ptr, "<!-- %s -->", comment ); len = (int)strlen(ptr); } else { strcpy( ptr, "<!--" ); len = 4; } fs->setBufferPtr(ptr + len); ptr = fs->flush(); if( multiline ) { while( comment ) { if( eol ) { ptr = fs->resizeWriteBuffer( ptr, (int)(eol - comment) + 1 ); memcpy( ptr, comment, eol - comment + 1 ); ptr += eol - comment; comment = eol + 1; eol = strchr( comment, '\n' ); } else { len = (int)strlen(comment); ptr = fs->resizeWriteBuffer( ptr, len ); memcpy( ptr, comment, len ); ptr += len; comment = 0; } fs->setBufferPtr(ptr); ptr = fs->flush(); } sprintf( ptr, "-->" ); fs->setBufferPtr(ptr + 3); fs->flush(); } }
0
[ "CWE-787" ]
opencv
f42d5399aac80d371b17d689851406669c9b9111
186,225,852,001,256,330,000,000,000,000,000,000,000
67
core(persistence): add more checks for implementation limitations
static void kvm_guest_cpu_offline(void *dummy) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); apf_task_wake_all(); }
0
[]
kvm
29fa6825463c97e5157284db80107d1bfac5d77b
26,437,760,587,990,910,000,000,000,000,000,000,000
8
x86, kvm: Clear paravirt_enabled on KVM guests for espfix32's benefit paravirt_enabled has the following effects: - Disables the F00F bug workaround warning. There is no F00F bug workaround any more because Linux's standard IDT handling already works around the F00F bug, but the warning still exists. This is only cosmetic, and, in any event, there is no such thing as KVM on a CPU with the F00F bug. - Disables 32-bit APM BIOS detection. On a KVM paravirt system, there should be no APM BIOS anyway. - Disables tboot. I think that the tboot code should check the CPUID hypervisor bit directly if it matters. - paravirt_enabled disables espfix32. espfix32 should *not* be disabled under KVM paravirt. The last point is the purpose of this patch. It fixes a leak of the high 16 bits of the kernel stack address on 32-bit KVM paravirt guests. Fixes CVE-2014-8134. Cc: [email protected] Suggested-by: Konrad Rzeszutek Wilk <[email protected]> Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
PHP_NAMED_FUNCTION(zif_locale_set_default) { char* locale_name = NULL; int len=0; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &locale_name ,&len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_set_default: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(len == 0) { locale_name = (char *)uloc_getDefault() ; len = strlen(locale_name); } zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); RETURN_TRUE; }
0
[ "CWE-125" ]
php-src
97eff7eb57fc2320c267a949cffd622c38712484
224,489,726,127,709,870,000,000,000,000,000,000,000
23
Fix bug #72241: get_icu_value_internal out-of-bounds read
fastd_handshake_timeout_t *unknown_hash_entry(int64_t base, size_t i, const fastd_peer_address_t *addr) { int64_t slice = base - i; uint32_t hash = ctx.unknown_handshake_seed; fastd_hash(&hash, &slice, sizeof(slice)); fastd_peer_address_hash(&hash, addr); fastd_hash_final(&hash); return &ctx.unknown_handshakes[(size_t)slice % UNKNOWN_TABLES][hash % UNKNOWN_ENTRIES]; }
0
[ "CWE-617", "CWE-119", "CWE-284" ]
fastd
737925113363b6130879729cdff9ccc46c33eaea
338,801,279,673,161,900,000,000,000,000,000,000,000
9
receive: fix buffer leak when receiving invalid packets For fastd versions before v20, this was just a memory leak (which could still be used for DoS, as it's remotely triggerable). With the new buffer management of fastd v20, this will trigger an assertion failure instead as soon as the buffer pool is empty.
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = __alloc_pages_node(node, flags, 0); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); /* KVM supports Enlightened VMCS v1 only */ if (static_branch_unlikely(&enable_evmcs)) vmcs->hdr.revision_id = KVM_EVMCS_VERSION; else vmcs->hdr.revision_id = vmcs_config.revision_id; if (shadow) vmcs->hdr.shadow_vmcs = 1; return vmcs; }
0
[ "CWE-703" ]
linux
6cd88243c7e03845a450795e134b488fc2afb736
267,531,318,119,676,600,000,000,000,000,000,000,000
22
KVM: x86: do not report a vCPU as preempted outside instruction boundaries If a vCPU is outside guest mode and is scheduled out, it might be in the process of making a memory access. A problem occurs if another vCPU uses the PV TLB flush feature during the period when the vCPU is scheduled out, and a virtual address has already been translated but has not yet been accessed, because this is equivalent to using a stale TLB entry. To avoid this, only report a vCPU as preempted if sure that the guest is at an instruction boundary. A rescheduling request will be delivered to the host physical CPU as an external interrupt, so for simplicity consider any vmexit *not* instruction boundary except for external interrupts. It would in principle be okay to report the vCPU as preempted also if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the vmentry/vmexit overhead unnecessarily, and optimistic spinning is also unlikely to succeed. However, leave it for later because right now kvm_vcpu_check_block() is doing memory accesses. Even though the TLB flush issue only applies to virtual memory address, it's very much preferrable to be conservative. Reported-by: Jann Horn <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; }
0
[ "CWE-415" ]
linux
4397f04575c44e1440ec2e49b6302785c95fd2f8
309,096,692,251,888,730,000,000,000,000,000,000,000
10
tracing: Fix possible double free on failure of allocating trace buffer Jing Xia and Chunyan Zhang reported that on failing to allocate part of the tracing buffer, memory is freed, but the pointers that point to them are not initialized back to NULL, and later paths may try to free the freed memory again. Jing and Chunyan fixed one of the locations that does this, but missed a spot. Link: http://lkml.kernel.org/r/[email protected] Cc: [email protected] Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Reported-by: Jing Xia <[email protected]> Reported-by: Chunyan Zhang <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info) { assert(memory_info != (MemoryInfo *) NULL); assert(memory_info->signature == MagickCoreSignature); if (memory_info->blob != (void *) NULL) switch (memory_info->type) { case AlignedVirtualMemory: { memory_info->blob=RelinquishAlignedMemory(memory_info->blob); break; } case MapVirtualMemory: { (void) UnmapBlob(memory_info->blob,memory_info->length); memory_info->blob=NULL; if (*memory_info->filename != '\0') (void) RelinquishUniqueFileResource(memory_info->filename); break; } case UnalignedVirtualMemory: default: { memory_info->blob=RelinquishMagickMemory(memory_info->blob); break; } } memory_info->signature=(~MagickCoreSignature); memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info); return(memory_info); }
0
[ "CWE-369" ]
ImageMagick
70aa86f5d5d8aa605a918ed51f7574f433a18482
113,761,906,716,922,760,000,000,000,000,000,000,000
31
possible divide by zero + clear buffers
static void sg_complete(struct urb *urb) { unsigned long flags; struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock_irqsave(&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock_irqrestore(&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i]) continue; if (found) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock_irqsave(&io->lock, flags); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); }
0
[ "CWE-416" ]
linux
056ad39ee9253873522f6469c3364964a322912b
307,008,518,589,465,050,000,000,000,000,000,000,000
68
USB: core: Fix free-while-in-use bug in the USB S-Glibrary FuzzUSB (a variant of syzkaller) found a free-while-still-in-use bug in the USB scatter-gather library: BUG: KASAN: use-after-free in atomic_read include/asm-generic/atomic-instrumented.h:26 [inline] BUG: KASAN: use-after-free in usb_hcd_unlink_urb+0x5f/0x170 drivers/usb/core/hcd.c:1607 Read of size 4 at addr ffff888065379610 by task kworker/u4:1/27 CPU: 1 PID: 27 Comm: kworker/u4:1 Not tainted 5.5.11 #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 Workqueue: scsi_tmf_2 scmd_eh_abort_handler Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xce/0x128 lib/dump_stack.c:118 print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374 __kasan_report+0x153/0x1cb mm/kasan/report.c:506 kasan_report+0x12/0x20 mm/kasan/common.c:639 check_memory_region_inline mm/kasan/generic.c:185 [inline] check_memory_region+0x152/0x1b0 mm/kasan/generic.c:192 __kasan_check_read+0x11/0x20 mm/kasan/common.c:95 atomic_read include/asm-generic/atomic-instrumented.h:26 [inline] usb_hcd_unlink_urb+0x5f/0x170 drivers/usb/core/hcd.c:1607 usb_unlink_urb+0x72/0xb0 drivers/usb/core/urb.c:657 usb_sg_cancel+0x14e/0x290 drivers/usb/core/message.c:602 usb_stor_stop_transport+0x5e/0xa0 drivers/usb/storage/transport.c:937 This bug occurs when cancellation of the S-G transfer races with transfer completion. When that happens, usb_sg_cancel() may continue to access the transfer's URBs after usb_sg_wait() has freed them. The bug is caused by the fact that usb_sg_cancel() does not take any sort of reference to the transfer, and so there is nothing to prevent the URBs from being deallocated while the routine is trying to use them. The fix is to take such a reference by incrementing the transfer's io->count field while the cancellation is in progres and decrementing it afterward. The transfer's URBs are not deallocated until io->complete is triggered, which happens when io->count reaches zero. Signed-off-by: Alan Stern <[email protected]> Reported-and-tested-by: Kyungtae Kim <[email protected]> CC: <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
void gd_putout(int i, void *out) { gdPutC(i, (gdIOCtx *)out); }
0
[ "CWE-415" ]
libgd
553702980ae89c83f2d6e254d62cf82e204956d0
294,195,112,780,955,600,000,000,000,000,000,000,000
4
Fix #492: Potential double-free in gdImage*Ptr() Whenever `gdImage*Ptr()` calls `gdImage*Ctx()` and the latter fails, we must not call `gdDPExtractData()`; otherwise a double-free would happen. Since `gdImage*Ctx()` are void functions, and we can't change that for BC reasons, we're introducing static helpers which are used internally. We're adding a regression test for `gdImageJpegPtr()`, but not for `gdImageGifPtr()` and `gdImageWbmpPtr()` since we don't know how to trigger failure of the respective `gdImage*Ctx()` calls. This potential security issue has been reported by Solmaz Salimi (aka. Rooney).
sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *asconf_ack = arg; struct sctp_chunk *last_asconf = asoc->addip_last_asconf; struct sctp_chunk *abort; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *addip_hdr; __u32 sent_serial, rcvd_serial; if (!sctp_vtag_verify(asconf_ack, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* ADD-IP, Section 4.1.2: * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !asconf_ack->auth) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; rcvd_serial = ntohl(addip_hdr->serial); /* Verify the ASCONF-ACK chunk before processing it. */ if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)addip_hdr->params, (void *)asconf_ack->chunk_end, &err_param)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err_param, commands); if (last_asconf) { addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; sent_serial = ntohl(addip_hdr->serial); } else { sent_serial = asoc->addip_serial - 1; } /* D0) If an endpoint receives an ASCONF-ACK that is greater than or * equal to the next serial number to be used but no ASCONF chunk is * outstanding the endpoint MUST ABORT the association. Note that a * sequence number is greater than if it is no more than 2^^31-1 * larger than the current sequence number (using serial arithmetic). */ if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && !(asoc->addip_last_asconf)) { abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } /* We are going to ABORT, so we might as well stop * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); if (!sctp_process_asconf_ack((struct sctp_association *)asoc, asconf_ack)) { /* Successfully processed ASCONF_ACK. We can * release the next asconf if we have one. */ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, SCTP_NULL()); return SCTP_DISPOSITION_CONSUME; } abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } /* We are going to ABORT, so we might as well stop * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } return SCTP_DISPOSITION_DISCARD; }
1
[ "CWE-20", "CWE-399" ]
linux
9de7922bc709eee2f609cd01d98aaedc4cf5ea74
200,974,972,626,172,660,000,000,000,000,000,000,000
117
net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for ASCONF chunk") added basic verification of ASCONF chunks, however, it is still possible to remotely crash a server by sending a special crafted ASCONF chunk, even up to pre 2.6.12 kernels: skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768 head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950 end:0x440 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:129! [...] Call Trace: <IRQ> [<ffffffff8144fb1c>] skb_put+0x5c/0x70 [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp] [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp] [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20 [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp] [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp] [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0 [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp] [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp] [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp] [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp] [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter] [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0 [<ffffffff81497078>] ip_local_deliver+0x98/0xa0 [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440 [<ffffffff81496ac5>] ip_rcv+0x275/0x350 [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750 [<ffffffff81460588>] netif_receive_skb+0x58/0x60 This can be triggered e.g., through a simple scripted nmap connection scan injecting the chunk after the handshake, for example, ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ------------------ ASCONF; UNKNOWN ------------------> ... where ASCONF chunk of length 280 contains 2 parameters ... 1) Add IP address parameter (param length: 16) 2) Add/del IP address parameter (param length: 255) ... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the Address Parameter in the ASCONF chunk is even missing, too. This is just an example and similarly-crafted ASCONF chunks could be used just as well. The ASCONF chunk passes through sctp_verify_asconf() as all parameters passed sanity checks, and after walking, we ended up successfully at the chunk end boundary, and thus may invoke sctp_process_asconf(). Parameter walking is done with WORD_ROUND() to take padding into account. In sctp_process_asconf()'s TLV processing, we may fail in sctp_process_asconf_param() e.g., due to removal of the IP address that is also the source address of the packet containing the ASCONF chunk, and thus we need to add all TLVs after the failure to our ASCONF response to remote via helper function sctp_add_asconf_response(), which basically invokes a sctp_addto_chunk() adding the error parameters to the given skb. When walking to the next parameter this time, we proceed with ... length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; ... instead of the WORD_ROUND()'ed length, thus resulting here in an off-by-one that leads to reading the follow-up garbage parameter length of 12336, and thus throwing an skb_over_panic for the reply when trying to sctp_addto_chunk() next time, which implicitly calls the skb_put() with that length. Fix it by using sctp_walk_params() [ which is also used in INIT parameter processing ] macro in the verification *and* in ASCONF processing: it will make sure we don't spill over, that we walk parameters WORD_ROUND()'ed. Moreover, we're being more defensive and guard against unknown parameter types and missized addresses. Joint work with Vlad Yasevich. Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
tsize_t t2p_readwrite_pdf_image(T2P* t2p, TIFF* input, TIFF* output){ tsize_t written=0; unsigned char* buffer=NULL; unsigned char* samplebuffer=NULL; tsize_t bufferoffset=0; tsize_t samplebufferoffset=0; tsize_t read=0; tstrip_t i=0; tstrip_t j=0; tstrip_t stripcount=0; tsize_t stripsize=0; tsize_t sepstripcount=0; tsize_t sepstripsize=0; #ifdef OJPEG_SUPPORT toff_t inputoffset=0; uint16 h_samp=1; uint16 v_samp=1; uint16 ri=1; uint32 rows=0; #endif /* ifdef OJPEG_SUPPORT */ #ifdef JPEG_SUPPORT unsigned char* jpt; float* xfloatp; uint64* sbc; unsigned char* stripbuffer; tsize_t striplength=0; uint32 max_striplength=0; #endif /* ifdef JPEG_SUPPORT */ /* Fail if prior error (in particular, can't trust tiff_datasize) */ if (t2p->t2p_error != T2P_ERR_OK) return(0); if(t2p->pdf_transcode == T2P_TRANSCODE_RAW){ #ifdef CCITT_SUPPORT if(t2p->pdf_compression == T2P_COMPRESS_G4){ buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if (buffer == NULL) { TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for " "t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } TIFFReadRawStrip(input, 0, (tdata_t) buffer, t2p->tiff_datasize); if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){ /* * make sure is lsb-to-msb * bit-endianness fill order */ TIFFReverseBits(buffer, t2p->tiff_datasize); } t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize); _TIFFfree(buffer); return(t2p->tiff_datasize); } #endif /* ifdef CCITT_SUPPORT */ #ifdef ZIP_SUPPORT if (t2p->pdf_compression == T2P_COMPRESS_ZIP) { buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer == NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); TIFFReadRawStrip(input, 0, (tdata_t) buffer, t2p->tiff_datasize); if (t2p->tiff_fillorder==FILLORDER_LSB2MSB) { TIFFReverseBits(buffer, t2p->tiff_datasize); } t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize); _TIFFfree(buffer); return(t2p->tiff_datasize); } #endif /* ifdef ZIP_SUPPORT */ #ifdef OJPEG_SUPPORT if(t2p->tiff_compression == COMPRESSION_OJPEG) { if(t2p->tiff_dataoffset != 0) { buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer == NULL) { TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); if(t2p->pdf_ojpegiflength==0){ inputoffset=t2pSeekFile(input, 0, SEEK_CUR); t2pSeekFile(input, t2p->tiff_dataoffset, SEEK_SET); t2pReadFile(input, (tdata_t) buffer, t2p->tiff_datasize); t2pSeekFile(input, inputoffset, SEEK_SET); t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize); _TIFFfree(buffer); return(t2p->tiff_datasize); } else { inputoffset=t2pSeekFile(input, 0, SEEK_CUR); t2pSeekFile(input, t2p->tiff_dataoffset, SEEK_SET); bufferoffset = t2pReadFile(input, (tdata_t) buffer, t2p->pdf_ojpegiflength); t2p->pdf_ojpegiflength = 0; t2pSeekFile(input, inputoffset, SEEK_SET); TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &h_samp, &v_samp); buffer[bufferoffset++]= 0xff; buffer[bufferoffset++]= 0xdd; buffer[bufferoffset++]= 0x00; buffer[bufferoffset++]= 0x04; h_samp*=8; v_samp*=8; ri=(t2p->tiff_width+h_samp-1) / h_samp; TIFFGetField(input, TIFFTAG_ROWSPERSTRIP, &rows); ri*=(rows+v_samp-1)/v_samp; buffer[bufferoffset++]= (ri>>8) & 0xff; buffer[bufferoffset++]= ri & 0xff; stripcount=TIFFNumberOfStrips(input); for(i=0;i<stripcount;i++){ if(i != 0 ){ buffer[bufferoffset++]=0xff; buffer[bufferoffset++]=(0xd0 | ((i-1)%8)); } bufferoffset+=TIFFReadRawStrip(input, i, (tdata_t) &(((unsigned char*)buffer)[bufferoffset]), -1); } t2pWriteFile(output, (tdata_t) buffer, bufferoffset); _TIFFfree(buffer); return(bufferoffset); } } else { if(! t2p->pdf_ojpegdata){ TIFFError(TIFF2PDF_MODULE, "No support for OJPEG image %s with bad tables", TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); _TIFFmemcpy(buffer, t2p->pdf_ojpegdata, t2p->pdf_ojpegdatalength); bufferoffset=t2p->pdf_ojpegdatalength; stripcount=TIFFNumberOfStrips(input); for(i=0;i<stripcount;i++){ if(i != 0){ buffer[bufferoffset++]=0xff; buffer[bufferoffset++]=(0xd0 | ((i-1)%8)); } bufferoffset+=TIFFReadRawStrip(input, i, (tdata_t) &(((unsigned char*)buffer)[bufferoffset]), -1); } if( ! ( (buffer[bufferoffset-1]==0xd9) && (buffer[bufferoffset-2]==0xff) ) ){ buffer[bufferoffset++]=0xff; buffer[bufferoffset++]=0xd9; } t2pWriteFile(output, (tdata_t) buffer, bufferoffset); _TIFFfree(buffer); return(bufferoffset); #if 0 /* This hunk of code removed code is clearly mis-placed and we are not sure where it should be (if anywhere) */ TIFFError(TIFF2PDF_MODULE, "No support for OJPEG image %s with no JPEG File Interchange offset", TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); #endif } } #endif /* ifdef OJPEG_SUPPORT */ #ifdef JPEG_SUPPORT if(t2p->tiff_compression == COMPRESSION_JPEG) { uint32 count = 0; buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); if (TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0) { if(count > 4) { _TIFFmemcpy(buffer, jpt, count); bufferoffset += count - 2; } } stripcount=TIFFNumberOfStrips(input); TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc); for(i=0;i<stripcount;i++){ if(sbc[i]>max_striplength) max_striplength=sbc[i]; } stripbuffer = (unsigned char*) _TIFFmalloc(max_striplength); if(stripbuffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %u bytes of memory for t2p_readwrite_pdf_image, %s", max_striplength, TIFFFileName(input)); _TIFFfree(buffer); t2p->t2p_error = T2P_ERR_ERROR; return(0); } for(i=0;i<stripcount;i++){ striplength=TIFFReadRawStrip(input, i, (tdata_t) stripbuffer, -1); if(!t2p_process_jpeg_strip( stripbuffer, &striplength, buffer, t2p->tiff_datasize, &bufferoffset, i, t2p->tiff_length)){ TIFFError(TIFF2PDF_MODULE, "Can't process JPEG data in input file %s", TIFFFileName(input)); _TIFFfree(samplebuffer); _TIFFfree(buffer); t2p->t2p_error = T2P_ERR_ERROR; return(0); } } buffer[bufferoffset++]=0xff; buffer[bufferoffset++]=0xd9; t2pWriteFile(output, (tdata_t) buffer, bufferoffset); _TIFFfree(stripbuffer); _TIFFfree(buffer); return(bufferoffset); } #endif /* ifdef JPEG_SUPPORT */ (void)0; } if(t2p->pdf_sample==T2P_SAMPLE_NOTHING){ buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); stripsize=TIFFStripSize(input); stripcount=TIFFNumberOfStrips(input); for(i=0;i<stripcount;i++){ read = TIFFReadEncodedStrip(input, i, (tdata_t) &buffer[bufferoffset], TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset)); if(read==-1){ TIFFError(TIFF2PDF_MODULE, "Error on decoding strip %u of %s", i, TIFFFileName(input)); _TIFFfree(buffer); t2p->t2p_error=T2P_ERR_ERROR; return(0); } bufferoffset+=read; } } else { if(t2p->pdf_sample & T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG){ sepstripsize=TIFFStripSize(input); sepstripcount=TIFFNumberOfStrips(input); stripsize=sepstripsize*t2p->tiff_samplesperpixel; stripcount=sepstripcount/t2p->tiff_samplesperpixel; buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); samplebuffer = (unsigned char*) _TIFFmalloc(stripsize); if(samplebuffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; _TIFFfree(buffer); return(0); } for(i=0;i<stripcount;i++){ samplebufferoffset=0; for(j=0;j<t2p->tiff_samplesperpixel;j++){ read = TIFFReadEncodedStrip(input, i + j*stripcount, (tdata_t) &(samplebuffer[samplebufferoffset]), TIFFmin(sepstripsize, stripsize - samplebufferoffset)); if(read==-1){ TIFFError(TIFF2PDF_MODULE, "Error on decoding strip %u of %s", i + j*stripcount, TIFFFileName(input)); _TIFFfree(buffer); t2p->t2p_error=T2P_ERR_ERROR; return(0); } samplebufferoffset+=read; } t2p_sample_planar_separate_to_contig( t2p, &(buffer[bufferoffset]), samplebuffer, samplebufferoffset); bufferoffset+=samplebufferoffset; } _TIFFfree(samplebuffer); goto dataready; } buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize); if(buffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } memset(buffer, 0, t2p->tiff_datasize); stripsize=TIFFStripSize(input); stripcount=TIFFNumberOfStrips(input); for(i=0;i<stripcount;i++){ read = TIFFReadEncodedStrip(input, i, (tdata_t) &buffer[bufferoffset], TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset)); if(read==-1){ TIFFError(TIFF2PDF_MODULE, "Error on decoding strip %u of %s", i, TIFFFileName(input)); _TIFFfree(samplebuffer); _TIFFfree(buffer); t2p->t2p_error=T2P_ERR_ERROR; return(0); } bufferoffset+=read; } if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){ // FIXME: overflow? samplebuffer=(unsigned char*)_TIFFrealloc( (tdata_t) buffer, t2p->tiff_datasize * t2p->tiff_samplesperpixel); if(samplebuffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; _TIFFfree(buffer); return(0); } else { buffer=samplebuffer; t2p->tiff_datasize *= t2p->tiff_samplesperpixel; } t2p_sample_realize_palette(t2p, buffer); } if(t2p->pdf_sample & T2P_SAMPLE_RGBA_TO_RGB){ t2p->tiff_datasize=t2p_sample_rgba_to_rgb( (tdata_t)buffer, t2p->tiff_width*t2p->tiff_length); } if(t2p->pdf_sample & T2P_SAMPLE_RGBAA_TO_RGB){ t2p->tiff_datasize=t2p_sample_rgbaa_to_rgb( (tdata_t)buffer, t2p->tiff_width*t2p->tiff_length); } if(t2p->pdf_sample & T2P_SAMPLE_YCBCR_TO_RGB){ samplebuffer=(unsigned char*)_TIFFrealloc( (tdata_t)buffer, t2p->tiff_width*t2p->tiff_length*4); if(samplebuffer==NULL){ TIFFError(TIFF2PDF_MODULE, "Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s", (unsigned long) t2p->tiff_datasize, TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; _TIFFfree(buffer); return(0); } else { buffer=samplebuffer; } if(!TIFFReadRGBAImageOriented( input, t2p->tiff_width, t2p->tiff_length, (uint32*)buffer, ORIENTATION_TOPLEFT, 0)){ TIFFError(TIFF2PDF_MODULE, "Can't use TIFFReadRGBAImageOriented to extract RGB image from %s", TIFFFileName(input)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } t2p->tiff_datasize=t2p_sample_abgr_to_rgb( (tdata_t) buffer, t2p->tiff_width*t2p->tiff_length); } if(t2p->pdf_sample & T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED){ t2p->tiff_datasize=t2p_sample_lab_signed_to_unsigned( (tdata_t)buffer, t2p->tiff_width*t2p->tiff_length); } } dataready: t2p_disable(output); TIFFSetField(output, TIFFTAG_PHOTOMETRIC, t2p->tiff_photometric); TIFFSetField(output, TIFFTAG_BITSPERSAMPLE, t2p->tiff_bitspersample); TIFFSetField(output, TIFFTAG_SAMPLESPERPIXEL, t2p->tiff_samplesperpixel); TIFFSetField(output, TIFFTAG_IMAGEWIDTH, t2p->tiff_width); TIFFSetField(output, TIFFTAG_IMAGELENGTH, t2p->tiff_length); TIFFSetField(output, TIFFTAG_ROWSPERSTRIP, t2p->tiff_length); TIFFSetField(output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFSetField(output, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB); switch(t2p->pdf_compression){ case T2P_COMPRESS_NONE: TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_NONE); break; #ifdef CCITT_SUPPORT case T2P_COMPRESS_G4: TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_CCITTFAX4); break; #endif /* ifdef CCITT_SUPPORT */ #ifdef JPEG_SUPPORT case T2P_COMPRESS_JPEG: if(t2p->tiff_photometric==PHOTOMETRIC_YCBCR) { uint16 hor = 0, ver = 0; if (TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &hor, &ver) !=0 ) { if(hor != 0 && ver != 0){ TIFFSetField(output, TIFFTAG_YCBCRSUBSAMPLING, hor, ver); } } if(TIFFGetField(input, TIFFTAG_REFERENCEBLACKWHITE, &xfloatp)!=0){ TIFFSetField(output, TIFFTAG_REFERENCEBLACKWHITE, xfloatp); } } if(TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_JPEG)==0){ TIFFError(TIFF2PDF_MODULE, "Unable to use JPEG compression for input %s and output %s", TIFFFileName(input), TIFFFileName(output)); _TIFFfree(buffer); t2p->t2p_error = T2P_ERR_ERROR; return(0); } TIFFSetField(output, TIFFTAG_JPEGTABLESMODE, 0); if(t2p->pdf_colorspace & (T2P_CS_RGB | T2P_CS_LAB)){ TIFFSetField(output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR){ TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else { TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RAW); } } if(t2p->pdf_colorspace & T2P_CS_GRAY){ (void)0; } if(t2p->pdf_colorspace & T2P_CS_CMYK){ (void)0; } if(t2p->pdf_defaultcompressionquality != 0){ TIFFSetField(output, TIFFTAG_JPEGQUALITY, t2p->pdf_defaultcompressionquality); } break; #endif /* ifdef JPEG_SUPPORT */ #ifdef ZIP_SUPPORT case T2P_COMPRESS_ZIP: TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE); if(t2p->pdf_defaultcompressionquality%100 != 0){ TIFFSetField(output, TIFFTAG_PREDICTOR, t2p->pdf_defaultcompressionquality % 100); } if(t2p->pdf_defaultcompressionquality/100 != 0){ TIFFSetField(output, TIFFTAG_ZIPQUALITY, (t2p->pdf_defaultcompressionquality / 100)); } break; #endif /* ifdef ZIP_SUPPORT */ default: break; } t2p_enable(output); t2p->outputwritten = 0; #ifdef JPEG_SUPPORT if(t2p->pdf_compression == T2P_COMPRESS_JPEG && t2p->tiff_photometric == PHOTOMETRIC_YCBCR){ bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0, buffer, stripsize * stripcount); } else #endif /* ifdef JPEG_SUPPORT */ { bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0, buffer, t2p->tiff_datasize); } if (buffer != NULL) { _TIFFfree(buffer); buffer=NULL; } if (bufferoffset == (tsize_t)-1) { TIFFError(TIFF2PDF_MODULE, "Error writing encoded strip to output PDF %s", TIFFFileName(output)); t2p->t2p_error = T2P_ERR_ERROR; return(0); } written = t2p->outputwritten; return(written); }
0
[ "CWE-119" ]
libtiff
b5d6803f0898e931cf772d3d0755704ab8488e63
230,066,797,823,251,500,000,000,000,000,000,000,000
591
* tools/tiff2pdf.c: fix write buffer overflow of 2 bytes on JPEG compressed images. Reported by Tyler Bohan of Cisco Talos as TALOS-CAN-0187 / CVE-2016-5652. Also prevents writing 2 extra uninitialized bytes to the file stream.
static void slot_put(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; spin_lock(&qd_lru_lock); gfs2_assert(sdp, qd->qd_slot_count); if (!--qd->qd_slot_count) { gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); qd->qd_slot = -1; } spin_unlock(&qd_lru_lock); }
0
[ "CWE-399" ]
linux-2.6
7e619bc3e6252dc746f64ac3b486e784822e9533
322,317,919,460,504,740,000,000,000,000,000,000,000
12
GFS2: Fix writing to non-page aligned gfs2_quota structures This is the upstream fix for this bug. This patch differs from the RHEL5 fix (Red Hat bz #555754) which simply writes to the 8-byte value field of the quota. In upstream quota code, we're required to write the entire quota (88 bytes) which can be split across a page boundary. We check for such quotas, and read/write the two parts from/to the corresponding pages holding these parts. With this patch, I don't see the bug anymore using the reproducer in Red Hat bz 555754. I successfully ran a couple of simple tests/mounts/ umounts and it doesn't seem like this patch breaks anything else. Signed-off-by: Abhi Das <[email protected]> Signed-off-by: Steven Whitehouse <[email protected]>
static auto makeServerDescriptionList() { return std::vector<ServerDescriptionPtr>{ make_with_latency(Milliseconds(1), HostAndPort("s1"), ServerType::kRSSecondary, {{"dc", "east"}, {"usage", "production"}}), make_with_latency(Milliseconds(1), HostAndPort("s1-test"), ServerType::kRSSecondary, {{"dc", "east"}, {"usage", "test"}}), make_with_latency(Milliseconds(1), HostAndPort("s2"), ServerType::kRSSecondary, {{"dc", "west"}, {"usage", "production"}}), make_with_latency(Milliseconds(1), HostAndPort("s2-test"), ServerType::kRSSecondary, {{"dc", "west"}, {"usage", "test"}}), make_with_latency(Milliseconds(1), HostAndPort("s3"), ServerType::kRSSecondary, {{"dc", "north"}, {"usage", "production"}})}; };
0
[ "CWE-755" ]
mongo
75f7184eafa78006a698cda4c4adfb57f1290047
77,316,393,406,491,700,000,000,000,000,000,000,000
23
SERVER-50170 fix max staleness read preference parameter for server selection
CtPtr ProtocolV2::finish_auth() { ceph_assert(auth_meta); // TODO: having a possibility to check whether we're server or client could // allow reusing finish_auth(). session_stream_handlers = \ ceph::crypto::onwire::rxtx_t::create_handler_pair(cct, *auth_meta, true); const auto sig = auth_meta->session_key.empty() ? sha256_digest_t() : auth_meta->session_key.hmac_sha256(cct, pre_auth.rxbuf); auto sig_frame = AuthSignatureFrame::Encode(sig); pre_auth.enabled = false; pre_auth.rxbuf.clear(); return WRITE(sig_frame, "auth signature", read_frame); }
0
[ "CWE-323" ]
ceph
20b7bb685c5ea74c651ca1ea547ac66b0fee7035
41,611,921,659,426,500,000,000,000,000,000,000,000
15
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities The secure mode uses AES-128-GCM with 96-bit nonces consisting of a 32-bit counter followed by a 64-bit salt. The counter is incremented after processing each frame, the salt is fixed for the duration of the session. Both are initialized from the session key generated during session negotiation, so the counter starts with essentially a random value. It is allowed to wrap, and, after 2**32 frames, it repeats, resulting in nonce reuse (the actual sequence numbers that the messenger works with are 64-bit, so the session continues on). Because of how GCM works, this completely breaks both confidentiality and integrity aspects of the secure mode. A single nonce reuse reveals the XOR of two plaintexts and almost completely reveals the subkey used for producing authentication tags. After a few nonces get used twice, all confidentiality and integrity goes out the window and the attacker can potentially encrypt-authenticate plaintext of their choice. We can't easily change the nonce format to extend the counter to 64 bits (and possibly XOR it with a longer salt). Instead, just remember the initial nonce and cut the session before it repeats, forcing renegotiation. Signed-off-by: Ilya Dryomov <[email protected]> Reviewed-by: Radoslaw Zarzynski <[email protected]> Reviewed-by: Sage Weil <[email protected]> Conflicts: src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17 ("msg: Build target 'common' without using namespace in headers") not in octopus ]
PHP_FUNCTION(mb_eregi) { _php_mb_regex_ereg_exec(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); }
0
[ "CWE-415" ]
php-src
5b597a2e5b28e2d5a52fc1be13f425f08f47cb62
8,903,933,569,651,264,000,000,000,000,000,000,000
4
Fix bug #72402: _php_mb_regex_ereg_replace_exec - double free
bool CTransaction::AddToMemoryPoolUnchecked() { printf("AcceptToMemoryPoolUnchecked(): size %lu\n", mapTransactions.size()); // Add to memory pool without checking anything. Don't call this directly, // call AcceptToMemoryPool to properly check the transaction first. CRITICAL_BLOCK(cs_mapTransactions) { uint256 hash = GetHash(); mapTransactions[hash] = *this; for (int i = 0; i < vin.size(); i++) mapNextTx[vin[i].prevout] = CInPoint(&mapTransactions[hash], i); nTransactionsUpdated++; ++nPooledTx; } return true; }
0
[ "CWE-16", "CWE-787" ]
bitcoin
a206b0ea12eb4606b93323268fc81a4f1f952531
140,095,037,105,042,700,000,000,000,000,000,000,000
16
Do not allow overwriting unspent transactions (BIP 30) Introduce the following network rule: * a block is not valid if it contains a transaction whose hash already exists in the block chain, unless all that transaction's outputs were already spent before said block. Warning: this is effectively a network rule change, with potential risk for forking the block chain. Leaving this unfixed carries the same risk however, for attackers that can cause a reorganisation in part of the network. Thanks to Russell O'Connor and Ben Reeves.
onig_set_callout_of_name(OnigEncoding enc, OnigCalloutType callout_type, UChar* name, UChar* name_end, int in, OnigCalloutFunc start_func, OnigCalloutFunc end_func, int arg_num, unsigned int arg_types[], int opt_arg_num, OnigValue opt_defaults[]) { int r; int i; int j; int id; int is_not_single; CalloutNameEntry* e; CalloutNameListEntry* fe; if (callout_type != ONIG_CALLOUT_TYPE_SINGLE) return ONIGERR_INVALID_ARGUMENT; if (arg_num < 0 || arg_num > ONIG_CALLOUT_MAX_ARGS_NUM) return ONIGERR_INVALID_CALLOUT_ARG; if (opt_arg_num < 0 || opt_arg_num > arg_num) return ONIGERR_INVALID_CALLOUT_ARG; if (start_func == 0 && end_func == 0) return ONIGERR_INVALID_CALLOUT_ARG; if ((in & ONIG_CALLOUT_IN_PROGRESS) == 0 && (in & ONIG_CALLOUT_IN_RETRACTION) == 0) return ONIGERR_INVALID_CALLOUT_ARG; for (i = 0; i < arg_num; i++) { unsigned int t = arg_types[i]; if (t == ONIG_TYPE_VOID) return ONIGERR_INVALID_CALLOUT_ARG; else { if (i >= arg_num - opt_arg_num) { if (t != ONIG_TYPE_LONG && t != ONIG_TYPE_CHAR && t != ONIG_TYPE_STRING && t != ONIG_TYPE_TAG) return ONIGERR_INVALID_CALLOUT_ARG; } else { if (t != ONIG_TYPE_LONG) { t = t & ~ONIG_TYPE_LONG; if (t != ONIG_TYPE_CHAR && t != ONIG_TYPE_STRING && t != ONIG_TYPE_TAG) return ONIGERR_INVALID_CALLOUT_ARG; } } } } if (! is_allowed_callout_name(enc, name, name_end)) { return ONIGERR_INVALID_CALLOUT_NAME; } is_not_single = (callout_type != ONIG_CALLOUT_TYPE_SINGLE); id = callout_name_entry(&e, enc, is_not_single, name, name_end); if (id < 0) return id; r = ONIG_NORMAL; if (IS_NULL(GlobalCalloutNameList)) { r = make_callout_func_list(&GlobalCalloutNameList, 10); if (r != ONIG_NORMAL) return r; } while (id >= GlobalCalloutNameList->n) { int rid; r = callout_func_list_add(GlobalCalloutNameList, &rid); if (r != ONIG_NORMAL) return r; } fe = GlobalCalloutNameList->v + id; fe->type = callout_type; fe->in = in; fe->start_func = start_func; fe->end_func = end_func; fe->arg_num = arg_num; fe->opt_arg_num = opt_arg_num; fe->name = e->name; for (i = 0; i < arg_num; i++) { fe->arg_types[i] = arg_types[i]; } for (i = arg_num - opt_arg_num, j = 0; i < arg_num; i++, j++) { if (fe->arg_types[i] == ONIG_TYPE_STRING) { OnigValue* val = opt_defaults + j; UChar* ds = onigenc_strdup(enc, val->s.start, val->s.end); CHECK_NULL_RETURN_MEMERR(ds); fe->opt_defaults[i].s.start = ds; fe->opt_defaults[i].s.end = ds + (val->s.end - val->s.start); } else { fe->opt_defaults[i] = opt_defaults[j]; } } r = id; // return id return r; }
0
[ "CWE-476" ]
oniguruma
850bd9b0d8186eb1637722b46b12656814ab4ad2
88,774,107,347,915,580,000,000,000,000,000,000,000
99
fix #87: Read unknown address in onig_error_code_to_str()
int register_inet6addr_notifier(struct notifier_block *nb) { return notifier_chain_register(&inet6addr_chain, nb); }
0
[ "CWE-200" ]
linux-2.6
8a47077a0b5aa2649751c46e7a27884e6686ccbf
202,028,330,552,753,000,000,000,000,000,000,000,000
4
[NETLINK]: Missing padding fields in dumped structures Plug holes with padding fields and initialized them to zero. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
InterfaceLuid(const char *iface_name, PNET_LUID luid) { NETIO_STATUS status; LPWSTR wide_name = utf8to16(iface_name); if (wide_name) { status = ConvertInterfaceAliasToLuid(wide_name, luid); free(wide_name); } else { status = ERROR_OUTOFMEMORY; } return status; }
0
[ "CWE-415" ]
openvpn
1394192b210cb3c6624a7419bcf3ff966742e79b
172,860,253,244,777,520,000,000,000,000,000,000,000
16
Fix potential double-free() in Interactive Service (CVE-2018-9336) Malformed input data on the service pipe towards the OpenVPN interactive service (normally used by the OpenVPN GUI to request openvpn instances from the service) can result in a double free() in the error handling code. This usually only leads to a process crash (DoS by an unprivileged local account) but since it could possibly lead to memory corruption if happening while multiple other threads are active at the same time, CVE-2018-9336 has been assigned to acknowledge this risk. Fix by ensuring that sud->directory is set to NULL in GetStartUpData() for all error cases (thus not being free()ed in FreeStartupData()). Rewrite control flow to use explicit error label for error exit. Discovered and reported by Jacob Baines <[email protected]>. CVE: 2018-9336 Signed-off-by: Gert Doering <[email protected]> Acked-by: Selva Nair <[email protected]> Message-Id: <[email protected]> URL: https://www.mail-archive.com/search?l=mid&[email protected] Signed-off-by: Gert Doering <[email protected]>
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) { u32 phy; if (!tg3_flag(tp, 5705_PLUS) || (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) return; if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 ephy; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { u32 reg = MII_TG3_FET_SHDW_MISCCTRL; tg3_writephy(tp, MII_TG3_FET_TEST, ephy | MII_TG3_FET_SHADOW_EN); if (!tg3_readphy(tp, reg, &phy)) { if (enable) phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; else phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; tg3_writephy(tp, reg, phy); } tg3_writephy(tp, MII_TG3_FET_TEST, ephy); } } else { int ret; ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); if (!ret) { if (enable) phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; else phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, phy); } } }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
123,165,205,338,819,600,000,000,000,000,000,000,000
40
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int gp_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u32 error_code = svm->vmcb->control.exit_info_1; int opcode; /* Both #GP cases have zero error_code */ if (error_code) goto reinject; /* Decode the instruction for usage later */ if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) goto reinject; opcode = svm_instr_opcode(vcpu); if (opcode == NONE_SVM_INSTR) { if (!enable_vmware_backdoor) goto reinject; /* * VMware backdoor emulation on #GP interception only handles * IN{S}, OUT{S}, and RDPMC. */ if (!is_guest_mode(vcpu)) return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); } else { /* All SVM instructions expect page aligned RAX */ if (svm->vmcb->save.rax & ~PAGE_MASK) goto reinject; return emulate_svm_instr(vcpu, opcode); } reinject: kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); return 1; }
0
[ "CWE-703" ]
linux
6cd88243c7e03845a450795e134b488fc2afb736
320,515,364,512,391,260,000,000,000,000,000,000,000
39
KVM: x86: do not report a vCPU as preempted outside instruction boundaries If a vCPU is outside guest mode and is scheduled out, it might be in the process of making a memory access. A problem occurs if another vCPU uses the PV TLB flush feature during the period when the vCPU is scheduled out, and a virtual address has already been translated but has not yet been accessed, because this is equivalent to using a stale TLB entry. To avoid this, only report a vCPU as preempted if sure that the guest is at an instruction boundary. A rescheduling request will be delivered to the host physical CPU as an external interrupt, so for simplicity consider any vmexit *not* instruction boundary except for external interrupts. It would in principle be okay to report the vCPU as preempted also if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the vmentry/vmexit overhead unnecessarily, and optimistic spinning is also unlikely to succeed. However, leave it for later because right now kvm_vcpu_check_block() is doing memory accesses. Even though the TLB flush issue only applies to virtual memory address, it's very much preferrable to be conservative. Reported-by: Jann Horn <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void xfrm_policy_kill(struct xfrm_policy *policy) { write_lock_bh(&policy->lock); policy->walk.dead = 1; write_unlock_bh(&policy->lock); atomic_inc(&policy->genid); if (del_timer(&policy->polq.hold_timer)) xfrm_pol_put(policy); skb_queue_purge(&policy->polq.hold_queue); if (del_timer(&policy->timer)) xfrm_pol_put(policy); xfrm_pol_put(policy); }
0
[ "CWE-703" ]
linux
f85daf0e725358be78dfd208dea5fd665d8cb901
240,460,962,696,423,500,000,000,000,000,000,000,000
17
xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup() xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of pols[0]. This refcount can be dropped in xfrm_expand_policies() when xfrm_expand_policies() return error. pols[0]'s refcount is balanced in here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with num_pols == 1 to drop this refcount when xfrm_expand_policies() return error. This patch also fix an illegal address access. pols[0] will save a error point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve an illegal address in xfrm_bundle_lookup's error path. Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path. Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows") Signed-off-by: Hangyu Hua <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
static double mp_solve(_cimg_math_parser& mp) { double *ptrd = &_mp_arg(1) + 1; const double *ptr1 = &_mp_arg(2) + 1, *ptr2 = &_mp_arg(3) + 1; const unsigned int k = (unsigned int)mp.opcode[4], l = (unsigned int)mp.opcode[5], m = (unsigned int)mp.opcode[6]; CImg<doubleT>(ptrd,m,k,1,1,true) = CImg<doubleT>(ptr2,m,l,1,1,false).solve(CImg<doubleT>(ptr1,k,l,1,1,true)); return cimg::type<double>::nan(); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
79,964,447,972,688,455,000,000,000,000,000,000,000
12
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
ReadBMP (const gchar *name, GError **error) { FILE *fd; guchar buffer[64]; gint ColormapSize, rowbytes, Maps; gboolean Grey = FALSE; guchar ColorMap[256][3]; gint32 image_ID; gchar magick[2]; Bitmap_Channel masks[4]; filename = name; fd = g_fopen (filename, "rb"); if (!fd) { g_set_error (error, G_FILE_ERROR, g_file_error_from_errno (errno), _("Could not open '%s' for reading: %s"), gimp_filename_to_utf8 (filename), g_strerror (errno)); return -1; } gimp_progress_init_printf (_("Opening '%s'"), gimp_filename_to_utf8 (name)); /* It is a File. Now is it a Bitmap? Read the shortest possible header */ if (!ReadOK (fd, magick, 2) || !(!strncmp (magick, "BA", 2) || !strncmp (magick, "BM", 2) || !strncmp (magick, "IC", 2) || !strncmp (magick, "PI", 2) || !strncmp (magick, "CI", 2) || !strncmp (magick, "CP", 2))) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } while (!strncmp (magick, "BA", 2)) { if (!ReadOK (fd, buffer, 12)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } if (!ReadOK (fd, magick, 2)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } } if (!ReadOK (fd, buffer, 12)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } /* bring them to the right byteorder. Not too nice, but it should work */ Bitmap_File_Head.bfSize = ToL (&buffer[0x00]); Bitmap_File_Head.zzHotX = ToS (&buffer[0x04]); Bitmap_File_Head.zzHotY = ToS (&buffer[0x06]); Bitmap_File_Head.bfOffs = ToL (&buffer[0x08]); if (!ReadOK (fd, buffer, 4)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } Bitmap_File_Head.biSize = ToL (&buffer[0x00]); /* What kind of bitmap is it? */ if (Bitmap_File_Head.biSize == 12) /* OS/2 1.x ? */ { if (!ReadOK (fd, buffer, 8)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error reading BMP file header from '%s'"), gimp_filename_to_utf8 (filename)); return -1; } Bitmap_Head.biWidth = ToS (&buffer[0x00]); /* 12 */ Bitmap_Head.biHeight = ToS (&buffer[0x02]); /* 14 */ Bitmap_Head.biPlanes = ToS (&buffer[0x04]); /* 16 */ Bitmap_Head.biBitCnt = ToS (&buffer[0x06]); /* 18 */ Bitmap_Head.biCompr = 0; Bitmap_Head.biSizeIm = 0; Bitmap_Head.biXPels = Bitmap_Head.biYPels = 0; Bitmap_Head.biClrUsed = 0; Bitmap_Head.biClrImp = 0; Bitmap_Head.masks[0] = 0; Bitmap_Head.masks[1] = 0; Bitmap_Head.masks[2] = 0; Bitmap_Head.masks[3] = 0; memset(masks, 0, sizeof(masks)); Maps = 3; } else if (Bitmap_File_Head.biSize == 40) /* Windows 3.x */ { if (!ReadOK (fd, buffer, 36)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error reading BMP file header from '%s'"), gimp_filename_to_utf8 (filename)); return -1; } Bitmap_Head.biWidth = ToL (&buffer[0x00]); /* 12 */ Bitmap_Head.biHeight = ToL (&buffer[0x04]); /* 16 */ Bitmap_Head.biPlanes = ToS (&buffer[0x08]); /* 1A */ Bitmap_Head.biBitCnt = ToS (&buffer[0x0A]); /* 1C */ Bitmap_Head.biCompr = ToL (&buffer[0x0C]); /* 1E */ Bitmap_Head.biSizeIm = ToL (&buffer[0x10]); /* 22 */ Bitmap_Head.biXPels = ToL (&buffer[0x14]); /* 26 */ Bitmap_Head.biYPels = ToL (&buffer[0x18]); /* 2A */ Bitmap_Head.biClrUsed = ToL (&buffer[0x1C]); /* 2E */ Bitmap_Head.biClrImp = ToL (&buffer[0x20]); /* 32 */ Bitmap_Head.masks[0] = 0; Bitmap_Head.masks[1] = 0; Bitmap_Head.masks[2] = 0; Bitmap_Head.masks[3] = 0; Maps = 4; memset(masks, 0, sizeof(masks)); if (Bitmap_Head.biCompr == BI_BITFIELDS) { if (!ReadOK (fd, buffer, 3 * sizeof (guint32))) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error reading BMP file header from '%s'"), gimp_filename_to_utf8 (filename)); return -1; } Bitmap_Head.masks[0] = ToL(&buffer[0x00]); Bitmap_Head.masks[1] = ToL(&buffer[0x04]); Bitmap_Head.masks[2] = ToL(&buffer[0x08]); ReadChannelMasks (&Bitmap_Head.masks[0], masks, 3); } else switch (Bitmap_Head.biBitCnt) { case 32: masks[0].mask = 0x00ff0000; masks[0].shiftin = 16; masks[0].max_value= (gfloat)255.0; masks[1].mask = 0x0000ff00; masks[1].shiftin = 8; masks[1].max_value= (gfloat)255.0; masks[2].mask = 0x000000ff; masks[2].shiftin = 0; masks[2].max_value= (gfloat)255.0; masks[3].mask = 0xff000000; masks[3].shiftin = 24; masks[3].max_value= (gfloat)255.0; break; case 24: masks[0].mask = 0xff0000; masks[0].shiftin = 16; masks[0].max_value= (gfloat)255.0; masks[1].mask = 0x00ff00; masks[1].shiftin = 8; masks[1].max_value= (gfloat)255.0; masks[2].mask = 0x0000ff; masks[2].shiftin = 0; masks[2].max_value= (gfloat)255.0; masks[3].mask = 0x0; masks[3].shiftin = 0; masks[3].max_value= (gfloat)0.0; break; case 16: masks[0].mask = 0x7c00; masks[0].shiftin = 10; masks[0].max_value= (gfloat)31.0; masks[1].mask = 0x03e0; masks[1].shiftin = 5; masks[1].max_value= (gfloat)31.0; masks[2].mask = 0x001f; masks[2].shiftin = 0; masks[2].max_value= (gfloat)31.0; masks[3].mask = 0x0; masks[3].shiftin = 0; masks[3].max_value= (gfloat)0.0; break; default: break; } } else if (Bitmap_File_Head.biSize >= 56 && Bitmap_File_Head.biSize <= 64) /* enhanced Windows format with bit masks */ { if (!ReadOK (fd, buffer, Bitmap_File_Head.biSize - 4)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error reading BMP file header from '%s'"), gimp_filename_to_utf8 (filename)); return -1; } Bitmap_Head.biWidth =ToL (&buffer[0x00]); /* 12 */ Bitmap_Head.biHeight =ToL (&buffer[0x04]); /* 16 */ Bitmap_Head.biPlanes =ToS (&buffer[0x08]); /* 1A */ Bitmap_Head.biBitCnt =ToS (&buffer[0x0A]); /* 1C */ Bitmap_Head.biCompr =ToL (&buffer[0x0C]); /* 1E */ Bitmap_Head.biSizeIm =ToL (&buffer[0x10]); /* 22 */ Bitmap_Head.biXPels =ToL (&buffer[0x14]); /* 26 */ Bitmap_Head.biYPels =ToL (&buffer[0x18]); /* 2A */ Bitmap_Head.biClrUsed =ToL (&buffer[0x1C]); /* 2E */ Bitmap_Head.biClrImp =ToL (&buffer[0x20]); /* 32 */ Bitmap_Head.masks[0] =ToL (&buffer[0x24]); /* 36 */ Bitmap_Head.masks[1] =ToL (&buffer[0x28]); /* 3A */ Bitmap_Head.masks[2] =ToL (&buffer[0x2C]); /* 3E */ Bitmap_Head.masks[3] =ToL (&buffer[0x30]); /* 42 */ Maps = 4; ReadChannelMasks (&Bitmap_Head.masks[0], masks, 4); } else { GdkPixbuf* pixbuf = gdk_pixbuf_new_from_file(filename, NULL); if (pixbuf) { gint32 layer_ID; image_ID = gimp_image_new (gdk_pixbuf_get_width (pixbuf), gdk_pixbuf_get_height (pixbuf), GIMP_RGB); layer_ID = gimp_layer_new_from_pixbuf (image_ID, _("Background"), pixbuf, 100., GIMP_NORMAL_MODE, 0, 0); g_object_unref (pixbuf); gimp_image_set_filename (image_ID, filename); gimp_image_add_layer (image_ID, layer_ID, -1); return image_ID; } else { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error reading BMP file header from '%s'"), gimp_filename_to_utf8 (filename)); return -1; } } /* Valid bitpdepthis 1, 4, 8, 16, 24, 32 */ /* 16 is awful, we should probably shoot whoever invented it */ /* There should be some colors used! */ ColormapSize = (Bitmap_File_Head.bfOffs - Bitmap_File_Head.biSize - 14) / Maps; if ((Bitmap_Head.biClrUsed == 0) && (Bitmap_Head.biBitCnt <= 8)) ColormapSize = Bitmap_Head.biClrUsed = 1 << Bitmap_Head.biBitCnt; if (ColormapSize > 256) ColormapSize = 256; /* Sanity checks */ if (Bitmap_Head.biHeight == 0 || Bitmap_Head.biWidth == 0) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } if (Bitmap_Head.biWidth < 0) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } if (Bitmap_Head.biPlanes != 1) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } if (Bitmap_Head.biClrUsed > 256) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("'%s' is not a valid BMP file"), gimp_filename_to_utf8 (filename)); return -1; } /* Windows and OS/2 declare filler so that rows are a multiple of * word length (32 bits == 4 bytes) */ rowbytes= ((Bitmap_Head.biWidth * Bitmap_Head.biBitCnt - 1) / 32) * 4 + 4; #ifdef DEBUG printf ("\nSize: %u, Colors: %u, Bits: %u, Width: %u, Height: %u, " "Comp: %u, Zeile: %u\n", Bitmap_File_Head.bfSize, Bitmap_Head.biClrUsed, Bitmap_Head.biBitCnt, Bitmap_Head.biWidth, Bitmap_Head.biHeight, Bitmap_Head.biCompr, rowbytes); #endif if (Bitmap_Head.biBitCnt <= 8) { #ifdef DEBUG printf ("Colormap read\n"); #endif /* Get the Colormap */ if (!ReadColorMap (fd, ColorMap, ColormapSize, Maps, &Grey)) return -1; } fseek (fd, Bitmap_File_Head.bfOffs, SEEK_SET); /* Get the Image and return the ID or -1 on error*/ image_ID = ReadImage (fd, Bitmap_Head.biWidth, ABS (Bitmap_Head.biHeight), ColorMap, Bitmap_Head.biClrUsed, Bitmap_Head.biBitCnt, Bitmap_Head.biCompr, rowbytes, Grey, masks, error); if (image_ID < 0) return -1; if (Bitmap_Head.biXPels > 0 && Bitmap_Head.biYPels > 0) { /* Fixed up from scott@asofyet's changes last year, njl195 */ gdouble xresolution; gdouble yresolution; /* I don't agree with scott's feeling that Gimp should be * trying to "fix" metric resolution translations, in the * long term Gimp should be SI (metric) anyway, but we * haven't told the Americans that yet */ xresolution = Bitmap_Head.biXPels * 0.0254; yresolution = Bitmap_Head.biYPels * 0.0254; gimp_image_set_resolution (image_ID, xresolution, yresolution); } if (Bitmap_Head.biHeight < 0) gimp_image_flip (image_ID, GIMP_ORIENTATION_VERTICAL); return image_ID; }
1
[ "CWE-190" ]
gimp
e3afc99b2fa7aeddf0dba4778663160a5bc682d3
89,349,128,805,997,680,000,000,000,000,000,000,000
381
Harden the BMP plugin against integer overflows. Issues discovered by Stefan Cornelius, Secunia Research, advisory SA37232 and CVE identifier CVE-2009-1570. Fixes bug #600484.
static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { struct aa_profile *profile = aa_current_profile(); int error = 0; if (!unconfined(profile)) error = aa_task_setrlimit(profile, task, resource, new_rlim); return error; }
0
[ "CWE-20" ]
linux
a5b2c5b2ad5853591a6cac6134cd0f599a720865
24,776,362,245,947,185,000,000,000,000,000,000,000
11
AppArmor: fix oops in apparmor_setprocattr When invalid parameters are passed to apparmor_setprocattr a NULL deref oops occurs when it tries to record an audit message. This is because it is passing NULL for the profile parameter for aa_audit. But aa_audit now requires that the profile passed is not NULL. Fix this by passing the current profile on the task that is trying to setprocattr. Signed-off-by: Kees Cook <[email protected]> Signed-off-by: John Johansen <[email protected]> Cc: [email protected] Signed-off-by: James Morris <[email protected]>
opt_flag(const char *opt, int allow_negate, const char **optsp) { size_t opt_len = strlen(opt); const char *opts = *optsp; int negate = 0; if (allow_negate && strncasecmp(opts, "no-", 3) == 0) { opts += 3; negate = 1; } if (strncasecmp(opts, opt, opt_len) == 0) { *optsp = opts + opt_len; return negate ? 0 : 1; } return -1; }
0
[]
openssh-portable
f3cbe43e28fe71427d41cfe3a17125b972710455
317,059,948,051,652,970,000,000,000,000,000,000,000
16
upstream: need initgroups() before setresgid(); reported by anton@, ok deraadt@ OpenBSD-Commit-ID: 6aa003ee658b316960d94078f2a16edbc25087ce
static struct sched_group *find_busiest_group(struct lb_env *env) { struct sg_lb_stats *local, *busiest; struct sd_lb_stats sds; init_sd_lb_stats(&sds); /* * Compute the various statistics relavent for load balancing at * this level. */ update_sd_lb_stats(env, &sds); if (static_branch_unlikely(&sched_energy_present)) { struct root_domain *rd = env->dst_rq->rd; if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) goto out_balanced; } local = &sds.local_stat; busiest = &sds.busiest_stat; /* ASYM feature bypasses nice load balance check */ if (check_asym_packing(env, &sds)) return sds.busiest; /* There is no busy sibling group to pull tasks from */ if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; /* XXX broken for overlapping NUMA groups */ sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) / sds.total_capacity; /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically * isn't true due to cpus_allowed constraints and the like. */ if (busiest->group_type == group_imbalanced) goto force_balance; /* * When dst_cpu is idle, prevent SMP nice and/or asymmetric group * capacities from resulting in underutilization due to avg_load. */ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && busiest->group_no_capacity) goto force_balance; /* Misfit tasks should be dealt with regardless of the avg load */ if (busiest->group_type == group_misfit_task) goto force_balance; /* * If the local group is busier than the selected busiest group * don't try and pull any tasks. */ if (local->avg_load >= busiest->avg_load) goto out_balanced; /* * Don't pull any tasks if this group is already above the domain * average load. */ if (local->avg_load >= sds.avg_load) goto out_balanced; if (env->idle == CPU_IDLE) { /* * This CPU is idle. If the busiest group is not overloaded * and there is no imbalance between this and busiest group * wrt idle CPUs, it is balanced. The imbalance becomes * significant if the diff is greater than 1 otherwise we * might end up to just move the imbalance on another group */ if ((busiest->group_type != group_overloaded) && (local->idle_cpus <= (busiest->idle_cpus + 1))) goto out_balanced; } else { /* * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use * imbalance_pct to be conservative. */ if (100 * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) goto out_balanced; } force_balance: /* Looks like there is an imbalance. Compute it */ env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); return env->imbalance ? sds.busiest : NULL; out_balanced: env->imbalance = 0; return NULL; }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
85,946,247,003,111,160,000,000,000,000,000,000,000
100
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid) { struct btrfs_fs_devices *fs_devices; int ret; lockdep_assert_held(&uuid_mutex); ASSERT(fsid); /* This will match only for multi-device seed fs */ list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) return fs_devices; fs_devices = find_fsid(fsid, NULL); if (!fs_devices) { if (!btrfs_test_opt(fs_info, DEGRADED)) return ERR_PTR(-ENOENT); fs_devices = alloc_fs_devices(fsid, NULL); if (IS_ERR(fs_devices)) return fs_devices; fs_devices->seeding = true; fs_devices->opened = 1; return fs_devices; } /* * Upon first call for a seed fs fsid, just create a private copy of the * respective fs_devices and anchor it at fs_info->fs_devices->seed_list */ fs_devices = clone_fs_devices(fs_devices); if (IS_ERR(fs_devices)) return fs_devices; ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); if (ret) { free_fs_devices(fs_devices); return ERR_PTR(ret); } if (!fs_devices->seeding) { close_fs_devices(fs_devices); free_fs_devices(fs_devices); return ERR_PTR(-EINVAL); } list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); return fs_devices; }
0
[ "CWE-476", "CWE-703" ]
linux
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
57,124,786,475,932,440,000,000,000,000,000,000,000
53
btrfs: fix NULL pointer dereference when deleting device by invalid id [BUG] It's easy to trigger NULL pointer dereference, just by removing a non-existing device id: # mkfs.btrfs -f -m single -d single /dev/test/scratch1 \ /dev/test/scratch2 # mount /dev/test/scratch1 /mnt/btrfs # btrfs device remove 3 /mnt/btrfs Then we have the following kernel NULL pointer dereference: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs] btrfs_ioctl+0x18bb/0x3190 [btrfs] ? lock_is_held_type+0xa5/0x120 ? find_held_lock.constprop.0+0x2b/0x80 ? do_user_addr_fault+0x201/0x6a0 ? lock_release+0xd2/0x2d0 ? __x64_sys_ioctl+0x83/0xb0 __x64_sys_ioctl+0x83/0xb0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae [CAUSE] Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") moves the "missing" device path check into btrfs_rm_device(). But btrfs_rm_device() itself can have case where it only receives @devid, with NULL as @device_path. In that case, calling strcmp() on NULL will trigger the NULL pointer dereference. Before that commit, we handle the "missing" case inside btrfs_find_device_by_devspec(), which will not check @device_path at all if @devid is provided, thus no way to trigger the bug. [FIX] Before calling strcmp(), also make sure @device_path is not NULL. Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") CC: [email protected] # 5.4+ Reported-by: butt3rflyh4ck <[email protected]> Reviewed-by: Anand Jain <[email protected]> Signed-off-by: Qu Wenruo <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
static void copy_valid_id_string(struct snd_card *card, const char *src, const char *nid) { char *id = card->id; while (*nid && !isalnum(*nid)) nid++; if (isdigit(*nid)) *id++ = isalpha(*src) ? *src : 'D'; while (*nid && (size_t)(id - card->id) < sizeof(card->id) - 1) { if (isalnum(*nid)) *id++ = *nid; nid++; } *id = 0; }
0
[ "CWE-416" ]
linux
2a3f7221acddfe1caa9ff09b3a8158c39b2fdeac
305,293,388,872,845,200,000,000,000,000,000,000,000
16
ALSA: core: Fix card races between register and disconnect There is a small race window in the card disconnection code that allows the registration of another card with the very same card id. This leads to a warning in procfs creation as caught by syzkaller. The problem is that we delete snd_cards and snd_cards_lock entries at the very beginning of the disconnection procedure. This makes the slot available to be assigned for another card object while the disconnection procedure is being processed. Then it becomes possible to issue a procfs registration with the existing file name although we check the conflict beforehand. The fix is simply to move the snd_cards and snd_cards_lock clearances at the end of the disconnection procedure. The references to these entries are merely either from the global proc files like /proc/asound/cards or from the card registration / disconnection, so it should be fine to shift at the very end. Reported-by: [email protected] Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
TEST(UriSuite, TestToStringBug1950126) { UriParserStateW state; UriUriW uriOne; UriUriW uriTwo; const wchar_t * const uriOneString = L"http://e.com/"; const wchar_t * const uriTwoString = L"http://e.com"; state.uri = &uriOne; ASSERT_TRUE(URI_SUCCESS == uriParseUriW(&state, uriOneString)); state.uri = &uriTwo; ASSERT_TRUE(URI_SUCCESS == uriParseUriW(&state, uriTwoString)); ASSERT_TRUE(URI_FALSE == uriEqualsUriW(&uriOne, &uriTwo)); uriFreeUriMembersW(&uriOne); uriFreeUriMembersW(&uriTwo); ASSERT_TRUE(testToStringHelper(uriOneString)); ASSERT_TRUE(testToStringHelper(uriTwoString)); }
0
[ "CWE-125" ]
uriparser
cef25028de5ff872c2e1f0a6c562eb3ea9ecbce4
23,214,101,473,627,507,000,000,000,000,000,000,000
17
Fix uriParse*Ex* out-of-bounds read
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, size_t size) { int64_t len; if (!bdrv_is_inserted(bs)) return -ENOMEDIUM; if (bs->growable) return 0; len = bdrv_getlength(bs); if (offset < 0) return -EIO; if ((offset > len) || (len - offset < size)) return -EIO; return 0; }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
312,281,802,529,720,900,000,000,000,000,000,000,000
21
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
int ntfs_attr_map_whole_runlist(ntfs_attr *na) { VCN next_vcn, last_vcn, highest_vcn; ntfs_attr_search_ctx *ctx; ntfs_volume *vol = na->ni->vol; ATTR_RECORD *a; int ret = -1; int not_mapped; ntfs_log_enter("Entering for inode %llu, attr 0x%x.\n", (unsigned long long)na->ni->mft_no, le32_to_cpu(na->type)); /* avoid multiple full runlist mappings */ if (NAttrFullyMapped(na)) { ret = 0; goto out; } ctx = ntfs_attr_get_search_ctx(na->ni, NULL); if (!ctx) goto out; /* Map all attribute extents one by one. */ next_vcn = last_vcn = highest_vcn = 0; a = NULL; while (1) { runlist_element *rl; not_mapped = 0; if (ntfs_rl_vcn_to_lcn(na->rl, next_vcn) == LCN_RL_NOT_MAPPED) not_mapped = 1; if (ntfs_attr_lookup(na->type, na->name, na->name_len, CASE_SENSITIVE, next_vcn, NULL, 0, ctx)) break; a = ctx->attr; if (not_mapped) { /* Decode the runlist. */ rl = ntfs_mapping_pairs_decompress(na->ni->vol, a, na->rl); if (!rl) goto err_out; na->rl = rl; } /* Are we in the first extent? */ if (!next_vcn) { if (a->lowest_vcn) { errno = EIO; ntfs_log_perror("First extent of inode %llu " "attribute has non-zero lowest_vcn", (unsigned long long)na->ni->mft_no); goto err_out; } /* Get the last vcn in the attribute. */ last_vcn = sle64_to_cpu(a->allocated_size) >> vol->cluster_size_bits; } /* Get the lowest vcn for the next extent. */ highest_vcn = sle64_to_cpu(a->highest_vcn); next_vcn = highest_vcn + 1; /* Only one extent or error, which we catch below. */ if (next_vcn <= 0) { errno = ENOENT; break; } /* Avoid endless loops due to corruption. */ if (next_vcn < sle64_to_cpu(a->lowest_vcn)) { errno = EIO; ntfs_log_perror("Inode %llu has corrupt attribute list", (unsigned long long)na->ni->mft_no); goto err_out; } } if (!a) { ntfs_log_perror("Couldn't find attribute for runlist mapping"); goto err_out; } /* * Cannot check highest_vcn when the last runlist has * been modified earlier, as runlists and sizes may be * updated without highest_vcn being in sync, when * HOLES_DELAY is used */ if (not_mapped && highest_vcn && highest_vcn != last_vcn - 1) { errno = EIO; ntfs_log_perror("Failed to load full runlist: inode: %llu " "highest_vcn: 0x%llx last_vcn: 0x%llx", (unsigned long long)na->ni->mft_no, (long long)highest_vcn, (long long)last_vcn); goto err_out; } if (errno == ENOENT) { NAttrSetFullyMapped(na); ret = 0; } err_out: ntfs_attr_put_search_ctx(ctx); out: ntfs_log_leave("\n"); return ret; }
0
[ "CWE-703" ]
ntfs-3g
60717a846deaaea47e50ce58872869f7bd1103b5
3,255,716,422,731,543,000,000,000,000,000,000,000
106
Avoided allocating and reading an attribute beyond its full size Before reading a full attribute value for internal use, its expected length has been checked to be < 0x40000. However the allocated size in the runlist may be much bigger as a consequence of a bug or malice. To prevent malloc'ing excessive size, restrict the size of the last run to read to the needed length.
TEST_P(SslCertficateIntegrationTest, ServerEcdsa) { server_rsa_cert_ = false; server_ecdsa_cert_ = true; ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
76,958,335,842,510,020,000,000,000,000,000,000,000
9
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
void rfbInitServer(rfbScreenInfoPtr screen) { #ifdef WIN32 WSADATA trash; WSAStartup(MAKEWORD(2,2),&trash); #endif rfbInitSockets(screen); rfbHttpInitSockets(screen); #ifndef __MINGW32__ if(screen->ignoreSIGPIPE) signal(SIGPIPE,SIG_IGN); #endif }
0
[]
libvncserver
804335f9d296440bb708ca844f5d89b58b50b0c6
307,857,544,660,432,570,000,000,000,000,000,000,000
13
Thread safety for zrle, zlib, tight. Proposed tight security type fix for debian bug 517422.
static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; }
0
[ "CWE-617", "CWE-362" ]
linux
2dcab598484185dea7ec22219c76dcdd59e3cb90
112,650,043,432,774,800,000,000,000,000,000,000,000
45
sctp: avoid BUG_ON on sctp_wait_for_sndbuf Alexander Popov reported that an application may trigger a BUG_ON in sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is waiting on it to queue more data and meanwhile another thread peels off the association being used by the first thread. This patch replaces the BUG_ON call with a proper error handling. It will return -EPIPE to the original sendmsg call, similarly to what would have been done if the association wasn't found in the first place. Acked-by: Alexander Popov <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Reviewed-by: Xin Long <[email protected]> Signed-off-by: David S. Miller <[email protected]>
PHP_FUNCTION(xmlrpc_encode) { XMLRPC_VALUE xOut = NULL; zval *arg1; char *outBuf; if (zend_parse_parameters(ZEND_NUM_ARGS(), "z", &arg1) == FAILURE) { return; } if (USED_RET()) { /* convert native php type to xmlrpc type */ xOut = PHP_to_XMLRPC(arg1); /* generate raw xml from xmlrpc data */ outBuf = XMLRPC_VALUE_ToXML(xOut, 0); if (xOut) { if (outBuf) { RETVAL_STRING(outBuf); free(outBuf); } /* cleanup */ XMLRPC_CleanupValue(xOut); } } }
0
[]
php-src
f3c1863aa2721343245b63ac7bd68cfdc3dd41f3
148,330,345,768,976,080,000,000,000,000,000,000,000
27
Fixed #70728 Conflicts: ext/xmlrpc/xmlrpc-epi-php.c
static my_bool sql_connect(MYSQL *mysql, uint wait) { my_bool info=0; for (;;) { if (mysql_real_connect(mysql,host,user,opt_password,NullS,tcp_port, unix_port, CLIENT_REMEMBER_OPTIONS)) { mysql->reconnect= 1; if (info) { fputs("\n",stderr); (void) fflush(stderr); } return 0; } if (!wait) // was or reached 0, fail { if (!option_silent) // print diagnostics { if (!host) host= (char*) LOCAL_HOST; my_printf_error(0,"connect to server at '%s' failed\nerror: '%s'", error_flags, host, mysql_error(mysql)); if (mysql_errno(mysql) == CR_CONNECTION_ERROR) { fprintf(stderr, "Check that mysqld is running and that the socket: '%s' exists!\n", unix_port ? unix_port : mysql_unix_port); } else if (mysql_errno(mysql) == CR_CONN_HOST_ERROR || mysql_errno(mysql) == CR_UNKNOWN_HOST) { fprintf(stderr,"Check that mysqld is running on %s",host); fprintf(stderr," and that the port is %d.\n", tcp_port ? tcp_port: mysql_port); fprintf(stderr,"You can check this by doing 'telnet %s %d'\n", host, tcp_port ? tcp_port: mysql_port); } } return 1; } if (wait != (uint) ~0) wait--; /* count down, one less retry */ if ((mysql_errno(mysql) != CR_CONN_HOST_ERROR) && (mysql_errno(mysql) != CR_CONNECTION_ERROR)) { /* Error is worse than "server doesn't answer (yet?)"; fail even if we still have "wait-coins" unless --force was also given. */ fprintf(stderr,"Got error: %s\n", mysql_error(mysql)); if (!option_force) return 1; } else if (!option_silent) { if (!info) { info=1; fputs("Waiting for MySQL server to answer",stderr); (void) fflush(stderr); } else { putc('.',stderr); (void) fflush(stderr); } } sleep(5); } }
1
[ "CWE-295" ]
mysql-server
b3e9211e48a3fb586e88b0270a175d2348935424
103,891,601,276,542,130,000,000,000,000,000,000,000
77
WL#9072: Backport WL#8785 to 5.5
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteIntArray** output_shape) { const int dims1 = NumDimensions(input1); const int dims2 = NumDimensions(input2); const int out_dims = std::max(dims1, dims2); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); if (!(d1 == d2 || d1 == 1 || d2 == 1)) { context->ReportError(context, "Given shapes, %s and %s, are not broadcastable.", GetShapeDebugString(input1->dims).c_str(), GetShapeDebugString(input2->dims).c_str()); return kTfLiteError; } if (d1 == 0 || d2 == 0) { shape->data[out_dims - i - 1] = 0; } else { shape->data[out_dims - i - 1] = std::max(d1, d2); } } *output_shape = shape.release(); return kTfLiteOk; }
0
[ "CWE-476", "CWE-369" ]
tensorflow
5b048e87e4e55990dae6b547add4dae59f4e1c76
80,650,444,671,175,580,000,000,000,000,000,000,000
30
Fix a null pointer exception in SVDF This is due to not checking that `GetVariableInput` returns non-null tensor. Also fix a potential null pointer exception in `GetVariableInput`. PiperOrigin-RevId: 385160147 Change-Id: Iadf3f0705b036a9014d27caa5a8bbd91f4c4c401
mrb_mod_const_missing(mrb_state *mrb, mrb_value mod) { mrb_sym sym; mrb_get_args(mrb, "n", &sym); if (mrb_class_real(mrb_class_ptr(mod)) != mrb->object_class) { mrb_name_error(mrb, sym, "uninitialized constant %S::%S", mod, mrb_sym2str(mrb, sym)); } else { mrb_name_error(mrb, sym, "uninitialized constant %S", mrb_sym2str(mrb, sym)); } /* not reached */ return mrb_nil_value(); }
0
[ "CWE-476", "CWE-415" ]
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
27,326,286,650,380,170,000,000,000,000,000,000,000
18
`mrb_class_real()` did not work for `BasicObject`; fix #4037
uint ha_myisam::checksum() const { return (uint)file->state->checksum; }
0
[ "CWE-362" ]
mysql-server
4e5473862e6852b0f3802b0cd0c6fa10b5253291
161,062,350,326,788,500,000,000,000,000,000,000,000
4
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD) is created. When repair finishes, this file is renamed to the original .MYD file. The problem was that during this rename, we copied the stats from the old file to the new file with chmod/chown. If a user managed to replace the temporary file before chmod/chown was executed, it was possible to get an arbitrary file with the privileges of the mysql user. This patch fixes the problem by not copying stats from the old file to the new file. This is not needed as the new file was created with the correct stats. This fix only changes server behavior - external utilities such as myisamchk still does chmod/chown. No test case provided since the problem involves synchronization with file system operations.
static errno_t handle_missing_pvt(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct sdap_options *opts, const char *orig_dn, int timeout, const char *username, struct sdap_handle *sh, struct tevent_req *req, tevent_req_fn callback) { struct tevent_req *subreq = NULL; errno_t ret; if (sh != NULL) { /* plain LDAP provider already has a sdap_handle */ subreq = sdap_get_ad_tokengroups_send(mem_ctx, ev, opts, sh, username, orig_dn, timeout); if (subreq == NULL) { ret = ENOMEM; tevent_req_error(req, ret); goto done; } tevent_req_set_callback(subreq, callback, req); ret = EOK; goto done; } else { ret = EINVAL; goto done; } done: return ret; }
0
[ "CWE-264" ]
sssd
191d7f7ce3de10d9e19eaa0a6ab3319bcd4ca95d
39,972,932,607,115,760,000,000,000,000,000,000,000
35
AD: process non-posix nested groups using tokenGroups When initgr is performed for AD supporting tokenGroups, do not skip non-posix groups. Resolves: https://fedorahosted.org/sssd/ticket/2343 Reviewed-by: Michal Židek <[email protected]> (cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58)
BOOL transport_set_blocking_mode(rdpTransport* transport, BOOL blocking) { BOOL status; status = TRUE; transport->blocking = blocking; if (transport->SplitInputOutput) { status &= tcp_set_blocking_mode(transport->TcpIn, blocking); status &= tcp_set_blocking_mode(transport->TcpOut, blocking); } else { status &= tcp_set_blocking_mode(transport->TcpIn, blocking); } if (transport->layer == TRANSPORT_LAYER_TSG) { tsg_set_blocking_mode(transport->tsg, blocking); } return status; }
0
[ "CWE-476", "CWE-125" ]
FreeRDP
0773bb9303d24473fe1185d85a424dfe159aff53
128,261,937,024,806,100,000,000,000,000,000,000,000
24
nla: invalidate sec handle after creation If sec pointer isn't invalidated after creation it is not possible to check if the upper and lower pointers are valid. This fixes a segfault in the server part if the client disconnects before the authentication was finished.
pixBlockconvGrayUnnormalized(PIX *pixs, l_int32 wc, l_int32 hc) { l_int32 i, j, w, h, d, wpla, wpld, jmax; l_uint32 *linemina, *linemaxa, *lined, *dataa, *datad; PIX *pixsb, *pixacc, *pixd; PROCNAME("pixBlockconvGrayUnnormalized"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, NULL); pixGetDimensions(pixs, &w, &h, &d); if (d != 8) return (PIX *)ERROR_PTR("pixs not 8 bpp", procName, NULL); if (wc < 0) wc = 0; if (hc < 0) hc = 0; if (w < 2 * wc + 1 || h < 2 * hc + 1) { wc = L_MIN(wc, (w - 1) / 2); hc = L_MIN(hc, (h - 1) / 2); L_WARNING("kernel too large; reducing!\n", procName); L_INFO("wc = %d, hc = %d\n", procName, wc, hc); } if (wc == 0 && hc == 0) /* no-op */ return pixCopy(NULL, pixs); if ((pixsb = pixAddMirroredBorder(pixs, wc + 1, wc, hc + 1, hc)) == NULL) return (PIX *)ERROR_PTR("pixsb not made", procName, NULL); pixacc = pixBlockconvAccum(pixsb); pixDestroy(&pixsb); if (!pixacc) return (PIX *)ERROR_PTR("pixacc not made", procName, NULL); if ((pixd = pixCreate(w, h, 32)) == NULL) { pixDestroy(&pixacc); return (PIX *)ERROR_PTR("pixd not made", procName, NULL); } wpla = pixGetWpl(pixacc); wpld = pixGetWpl(pixd); datad = pixGetData(pixd); dataa = pixGetData(pixacc); for (i = 0; i < h; i++) { lined = datad + i * wpld; linemina = dataa + i * wpla; linemaxa = dataa + (i + 2 * hc + 1) * wpla; for (j = 0; j < w; j++) { jmax = j + 2 * wc + 1; lined[j] = linemaxa[jmax] - linemaxa[j] - linemina[jmax] + linemina[j]; } } pixDestroy(&pixacc); return pixd; }
0
[]
leptonica
480f5e74c24fdc2003c42a4e15d1f24c9e6ea469
69,585,571,770,598,440,000,000,000,000,000,000,000
55
Fixed issue 21972 (oss-fuzz) Divide by zero in pixBlockconvGray().
static int blkcipher_walk_next(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct crypto_blkcipher *tfm = desc->tfm; unsigned int alignmask = crypto_blkcipher_alignmask(tfm); unsigned int bsize; unsigned int n; int err; n = walk->total; if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return blkcipher_walk_done(desc, walk, -EINVAL); } walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | BLKCIPHER_WALK_DIFF); if (!scatterwalk_aligned(&walk->in, alignmask) || !scatterwalk_aligned(&walk->out, alignmask)) { walk->flags |= BLKCIPHER_WALK_COPY; if (!walk->page) { walk->page = (void *)__get_free_page(GFP_ATOMIC); if (!walk->page) n = 0; } } bsize = min(walk->blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { err = blkcipher_next_slow(desc, walk, bsize, alignmask); goto set_phys_lowmem; } walk->nbytes = n; if (walk->flags & BLKCIPHER_WALK_COPY) { err = blkcipher_next_copy(walk); goto set_phys_lowmem; } return blkcipher_next_fast(desc, walk); set_phys_lowmem: if (walk->flags & BLKCIPHER_WALK_PHYS) { walk->src.phys.page = virt_to_page(walk->src.virt.addr); walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); walk->src.phys.offset &= PAGE_SIZE - 1; walk->dst.phys.offset &= PAGE_SIZE - 1; } return err; }
0
[ "CWE-310" ]
linux
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
290,036,210,425,734,020,000,000,000,000,000,000,000
53
crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <[email protected]> Cc: Steffen Klassert <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
c_valid_ordinal_p(int y, int d, double sg, int *rd, int *rjd, int *ns) { int ry2, rd2; if (d < 0) { int rjd2, ns2; if (!c_find_ldoy(y, sg, &rjd2, &ns2)) return 0; c_jd_to_ordinal(rjd2 + d + 1, sg, &ry2, &rd2); if (ry2 != y) return 0; d = rd2; } c_ordinal_to_jd(y, d, sg, rjd, ns); c_jd_to_ordinal(*rjd, sg, &ry2, &rd2); if (ry2 != y || rd2 != d) return 0; return 1; }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
244,557,535,550,571,030,000,000,000,000,000,000,000
21
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
static GList *completion_joinlist(GList *list1, GList *list2) { GList *old; old = list2; while (list2 != NULL) { if (!glist_find_icase_string(list1, list2->data)) list1 = g_list_append(list1, list2->data); else g_free(list2->data); list2 = list2->next; } g_list_free(old); return list1; }
0
[ "CWE-416" ]
irssi
36564717c9f701e3a339da362ab46d220d27e0c1
239,424,920,544,595,060,000,000,000,000,000,000,000
17
Merge branch 'security' into 'master' Security See merge request irssi/irssi!34 (cherry picked from commit b0d9cb33cd9ef9da7c331409e8b7c57a6f3aef3f)
void RGWCopyObj_ObjStore_S3::send_partial_response(off_t ofs) { if (! sent_header) { if (op_ret) set_req_state_err(s, op_ret); dump_errno(s); // Explicitly use chunked transfer encoding so that we can stream the result // to the user without having to wait for the full length of it. end_header(s, this, "application/xml", CHUNKED_TRANSFER_ENCODING); dump_start(s); if (op_ret == 0) { s->formatter->open_object_section_in_ns("CopyObjectResult", XMLNS_AWS_S3); } sent_header = true; } else { /* Send progress field. Note that this diverge from the original S3 * spec. We do this in order to keep connection alive. */ s->formatter->dump_int("Progress", (uint64_t)ofs); } rgw_flush_formatter(s, s->formatter); }
0
[ "CWE-79" ]
ceph
8f90658c731499722d5f4393c8ad70b971d05f77
261,242,800,114,669,200,000,000,000,000,000,000,000
23
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <[email protected]> Reviewed-by: Casey Bodley <[email protected]> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
int sqlite3ShadowTableName(sqlite3 *db, const char *zName){ char *zTail; /* Pointer to the last "_" in zName */ Table *pTab; /* Table that zName is a shadow of */ Module *pMod; /* Module for the virtual table */ zTail = strrchr(zName, '_'); if( zTail==0 ) return 0; *zTail = 0; pTab = sqlite3FindTable(db, zName, 0); *zTail = '_'; if( pTab==0 ) return 0; if( !IsVirtual(pTab) ) return 0; pMod = (Module*)sqlite3HashFind(&db->aModule, pTab->azModuleArg[0]); if( pMod==0 ) return 0; if( pMod->pModule->iVersion<3 ) return 0; if( pMod->pModule->xShadowName==0 ) return 0; return pMod->pModule->xShadowName(zTail+1); }
0
[ "CWE-674", "CWE-787" ]
sqlite
38096961c7cd109110ac21d3ed7dad7e0cb0ae06
128,272,767,840,325,170,000,000,000,000,000,000,000
18
Avoid infinite recursion in the ALTER TABLE code when a view contains an unused CTE that references, directly or indirectly, the view itself. FossilOrigin-Name: 1d2e53a39b87e364685e21de137655b6eee725e4c6d27fc90865072d7c5892b5
xfs_fill_fsxattr( struct xfs_inode *ip, int whichfork, struct fileattr *fa) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); fileattr_fill_xflags(fa, xfs_ip2xflags(ip)); if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) { fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { /* * Don't let a misaligned extent size hint on a directory * escape to userspace if it won't pass the setattr checks * later. */ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && ip->i_extsize % mp->m_sb.sb_rextsize > 0) { fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); fa->fsx_extsize = 0; } else { fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); } } if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); fa->fsx_projid = ip->i_projid; if (ifp && !xfs_need_iread_extents(ifp)) fa->fsx_nextents = xfs_iext_count(ifp); else fa->fsx_nextents = xfs_ifork_nextents(ifp); }
0
[ "CWE-125" ]
linux
983d8e60f50806f90534cc5373d0ce867e5aaf79
170,323,768,505,187,900,000,000,000,000,000,000,000
36
xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate The old ALLOCSP/FREESP ioctls in XFS can be used to preallocate space at the end of files, just like fallocate and RESVSP. Make the behavior consistent with the other ioctls. Reported-by: Kirill Tkhai <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]> Reviewed-by: Dave Chinner <[email protected]> Reviewed-by: Eric Sandeen <[email protected]>
of2str(int of) { int s, h, m; decode_offset(of, s, h, m); return rb_enc_sprintf(rb_usascii_encoding(), "%c%02d:%02d", s, h, m); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
249,105,258,804,678,630,000,000,000,000,000,000,000
7
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) { MYSQL_ROW row; MYSQL_RES *UNINIT_VAR(master); char binlog_pos_file[FN_REFLEN]; char binlog_pos_offset[LONGLONG_LEN+1]; char *file, *offset; const char *comment_prefix= (opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : ""; if (consistent_binlog_pos) { if(!check_consistent_binlog_pos(binlog_pos_file, binlog_pos_offset)) return 1; file= binlog_pos_file; offset= binlog_pos_offset; } else { if (mysql_query_with_error_report(mysql_con, &master, "SHOW MASTER STATUS")) return 1; row= mysql_fetch_row(master); if (row && row[0] && row[1]) { file= row[0]; offset= row[1]; } else { mysql_free_result(master); if (!ignore_errors) { /* SHOW MASTER STATUS reports nothing and --force is not enabled */ fprintf(stderr, "%s: Error: Binlogging on server not active\n", my_progname_short); maybe_exit(EX_MYSQLERR); return 1; } else { return 0; } } } /* SHOW MASTER STATUS reports file and position */ print_comment(md_result_file, 0, "\n--\n-- Position to start replication or point-in-time " "recovery from\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", comment_prefix, file, offset); check_io(md_result_file); if (!consistent_binlog_pos) mysql_free_result(master); return 0; }
0
[]
server
5a43a31ee81bc181eeb5ef2bf0704befa6e0594d
241,528,946,561,725,100,000,000,000,000,000,000,000
60
mysqldump: comments and identifiers with new lines don't let identifiers with new lines to break a comment
MagickExport MagickBooleanType IsOptionMember(const char *option, const char *options) { char **option_list, *string; int number_options; MagickBooleanType member; register ssize_t i; /* Is option a member of the options list? */ if (options == (const char *) NULL) return(MagickFalse); string=ConstantString(options); (void) SubstituteString(&string,","," "); option_list=StringToArgv(string,&number_options); string=DestroyString(string); if (option_list == (char **) NULL) return(MagickFalse); member=MagickFalse; option_list[0]=DestroyString(option_list[0]); for (i=1; i < (ssize_t) number_options; i++) { if ((*option_list[i] == '!') && (LocaleCompare(option,option_list[i]+1) == 0)) break; if (GlobExpression(option,option_list[i],MagickTrue) != MagickFalse) { member=MagickTrue; break; } option_list[i]=DestroyString(option_list[i]); } for ( ; i < (ssize_t) number_options; i++) option_list[i]=DestroyString(option_list[i]); option_list=(char **) RelinquishMagickMemory(option_list); return(member); }
0
[ "CWE-399" ]
ImageMagick
6790815c75bdea0357df5564345847856e995d6b
327,436,979,432,365,880,000,000,000,000,000,000,000
46
Fixed memory leak in IsOptionMember.
static void ape_unpack_stereo(APEContext *ctx, int count) { int32_t left, right; int32_t *decoded0 = ctx->decoded[0]; int32_t *decoded1 = ctx->decoded[1]; if ((ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) == APE_FRAMECODE_STEREO_SILENCE) { /* We are pure silence, so we're done. */ av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n"); return; } ctx->entropy_decode_stereo(ctx, count); /* Now apply the predictor decoding */ ctx->predictor_decode_stereo(ctx, count); /* Decorrelate and scale to output depth */ while (count--) { left = *decoded1 - (*decoded0 / 2); right = left + *decoded0; *(decoded0++) = left; *(decoded1++) = right; } }
0
[ "CWE-125" ]
FFmpeg
ba4beaf6149f7241c8bd85fe853318c2f6837ad0
110,684,308,783,143,660,000,000,000,000,000,000,000
26
avcodec/apedec: Fix integer overflow Fixes: out of array access Fixes: PoC.ape and others Found-by: Bingchang, Liu@VARAS of IIE Signed-off-by: Michael Niedermayer <[email protected]>
bool Item_field::collect_item_field_processor(void *arg) { DBUG_ENTER("Item_field::collect_item_field_processor"); DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname")); List<Item_field> *item_list= (List<Item_field>*) arg; List_iterator<Item_field> item_list_it(*item_list); Item_field *curr_item; while ((curr_item= item_list_it++)) { if (curr_item->eq(this, 1)) DBUG_RETURN(FALSE); /* Already in the set. */ } item_list->push_back(this); DBUG_RETURN(FALSE); }
0
[ "CWE-89" ]
server
b5e16a6e0381b28b598da80b414168ce9a5016e5
290,728,146,390,896,500,000,000,000,000,000,000,000
15
MDEV-26061 MariaDB server crash at Field::set_default * Item_default_value::fix_fields creates a copy of its argument's field. * Field::default_value is changed when its expression is prepared in unpack_vcol_info_from_frm() This means we must unpack any vcol expression that includes DEFAULT(x) strictly after unpacking x->default_value. To avoid building and solving this dependency graph on every table open, we update Item_default_value::field->default_value after all vcols are unpacked and fixed.
void proxyToQuanX(std::vector<Proxy> &nodes, INIReader &ini, std::vector<RulesetContent> &ruleset_content_array, const ProxyGroupConfigs &extra_proxy_group, extra_settings &ext) { std::string type; std::string remark, hostname, port, method; std::string password, plugin, pluginopts; std::string id, transproto, host, path; std::string protocol, protoparam, obfs, obfsparam; std::string proxyStr; tribool udp, tfo, scv, tls13; std::vector<Proxy> nodelist; string_array remarks_list; ini.SetCurrentSection("server_local"); ini.EraseSection(); for(Proxy &x : nodes) { if(ext.append_proxy_type) x.Remark = "[" + type + "] " + x.Remark; processRemark(x.Remark, remark, remarks_list); std::string &hostname = x.Hostname, &method = x.EncryptMethod, &id = x.UserId, &transproto = x.TransferProtocol, &host = x.Host, &path = x.Path, &password = x.Password, &plugin = x.Plugin, &pluginopts = x.PluginOption, &protocol = x.Protocol, &protoparam = x.ProtocolParam, &obfs = x.OBFS, &obfsparam = x.OBFSParam, &username = x.Username; std::string port = std::to_string(x.Port); bool &tlssecure = x.TLSSecure; udp = ext.udp; tfo = ext.tfo; scv = ext.skip_cert_verify; tls13 = ext.tls13; udp.define(x.UDP); tfo.define(x.TCPFastOpen); scv.define(x.AllowInsecure); tls13.define(x.TLS13); switch(x.Type) { case ProxyType::VMess: if(method == "auto") method = "chacha20-ietf-poly1305"; proxyStr = "vmess = " + hostname + ":" + port + ", method=" + method + ", password=" + id + ", aead=" + (x.AlterId == 0 ? "true" : "false"); if(tlssecure && !tls13.is_undef()) proxyStr += ", tls13=" + std::string(tls13 ? "true" : "false"); if(transproto == "ws") { if(tlssecure) proxyStr += ", obfs=wss"; else proxyStr += ", obfs=ws"; proxyStr += ", obfs-host=" + host + ", obfs-uri=" + path; } else if(tlssecure) proxyStr += ", obfs=over-tls, obfs-host=" + host; break; case ProxyType::Shadowsocks: proxyStr = "shadowsocks = " + hostname + ":" + port + ", method=" + method + ", password=" + password; if(!plugin.empty()) { switch(hash_(plugin)) { case "simple-obfs"_hash: case "obfs-local"_hash: if(!pluginopts.empty()) proxyStr += ", " + replaceAllDistinct(pluginopts, ";", ", "); break; case "v2ray-plugin"_hash: pluginopts = replaceAllDistinct(pluginopts, ";", "&"); plugin = getUrlArg(pluginopts, "mode") == "websocket" ? "ws" : ""; host = getUrlArg(pluginopts, "host"); path = getUrlArg(pluginopts, "path"); tlssecure = pluginopts.find("tls") != pluginopts.npos; if(tlssecure && plugin == "ws") { plugin += 's'; if(!tls13.is_undef()) proxyStr += ", tls13=" + std::string(tls13 ? "true" : "false"); } proxyStr += ", obfs=" + plugin; if(!host.empty()) proxyStr += ", obfs-host=" + host; if(!path.empty()) proxyStr += ", obfs-uri=" + path; break; default: continue; } } break; case ProxyType::ShadowsocksR: proxyStr = "shadowsocks = " + hostname + ":" + port + ", method=" + method + ", password=" + password + ", ssr-protocol=" + protocol; if(!protoparam.empty()) proxyStr += ", ssr-protocol-param=" + protoparam; proxyStr += ", obfs=" + obfs; if(!obfsparam.empty()) proxyStr += ", obfs-host=" + obfsparam; break; case ProxyType::HTTP: case ProxyType::HTTPS: proxyStr = "http = " + hostname + ":" + port + ", username=" + (username.empty() ? "none" : username) + ", password=" + (password.empty() ? "none" : password); if(tlssecure) { proxyStr += ", over-tls=true"; if(!tls13.is_undef()) proxyStr += ", tls13=" + std::string(tls13 ? "true" : "false"); } else { proxyStr += ", over-tls=false"; } break; case ProxyType::Trojan: proxyStr = "trojan = " + hostname + ":" + port + ", password=" + password; if(tlssecure) { proxyStr += ", over-tls=true, tls-host=" + host; if(!tls13.is_undef()) proxyStr += ", tls13=" + std::string(tls13 ? "true" : "false"); } else { proxyStr += ", over-tls=false"; } break; default: continue; } if(!tfo.is_undef()) proxyStr += ", fast-open=" + tfo.get_str(); if(!udp.is_undef()) proxyStr += ", udp-relay=" + udp.get_str(); if(tlssecure && !scv.is_undef() && (x.Type == ProxyType::HTTP || x.Type == ProxyType::Trojan)) proxyStr += ", tls-verification=" + scv.reverse().get_str(); proxyStr += ", tag=" + remark; ini.Set("{NONAME}", proxyStr); remarks_list.emplace_back(std::move(remark)); nodelist.emplace_back(x); } if(ext.nodelist) return; string_multimap original_groups; string_array filtered_nodelist; ini.SetCurrentSection("policy"); ini.GetItems(original_groups); ini.EraseSection(); std::string singlegroup; std::string proxies; string_array vArray; for(const ProxyGroupConfig &x : extra_proxy_group) { eraseElements(filtered_nodelist); switch(x.Type) { case ProxyGroupType::Select: type = "static"; break; case ProxyGroupType::URLTest: type = "url-latency-benchmark"; break; case ProxyGroupType::Fallback: type = "available"; break; case ProxyGroupType::LoadBalance: type = "round-robin"; break; case ProxyGroupType::SSID: type = "ssid"; for(auto iter = x.Proxies.begin(); iter != x.Proxies.end(); iter++) filtered_nodelist.emplace_back(replaceAllDistinct(*iter, "=", ":")); break; default: continue; } if(x.Type != ProxyGroupType::SSID) { for(const auto &y : x.Proxies) groupGenerate(y, nodelist, filtered_nodelist, true, ext); if(filtered_nodelist.empty()) filtered_nodelist.emplace_back("direct"); if(filtered_nodelist.size() < 2) // force groups with 1 node to be static type = "static"; } auto iter = std::find_if(original_groups.begin(), original_groups.end(), [&](const string_multimap::value_type &n) { std::string groupdata = n.second; std::string::size_type cpos = groupdata.find(","); if(cpos != groupdata.npos) return trim(groupdata.substr(0, cpos)) == x.Name; else return false; }); if(iter != original_groups.end()) { vArray = split(iter->second, ","); if(vArray.size() > 1) { if(trim(vArray[vArray.size() - 1]).find("img-url") == 0) filtered_nodelist.emplace_back(trim(vArray[vArray.size() - 1])); } } proxies = join(filtered_nodelist, ", "); singlegroup = type + "=" + x.Name + ", " + proxies; ini.Set("{NONAME}", singlegroup); } if(ext.enable_rule_generator) rulesetToSurge(ini, ruleset_content_array, -1, ext.overwrite_original_rules, ext.managed_config_prefix); //process scripts string_multimap scripts; std::string content, title, url; const std::string pattern = "^(.*? url script-.*? )(.*?)$"; if(ini.SectionExist("rewrite_local") && !ext.quanx_dev_id.empty()) { ini.GetItems("rewrite_local", scripts); ini.EraseSection("rewrite_local"); ini.SetCurrentSection("rewrite_local"); for(auto &x : scripts) { title = x.first; if(title != "{NONAME}") content = title + "=" + x.second; else content = x.second; if(regMatch(content, pattern)) { url = regReplace(content, pattern, "$2"); if(isLink(url)) { url = ext.managed_config_prefix + "/qx-script?id=" + ext.quanx_dev_id + "&url=" + urlSafeBase64Encode(url); content = regReplace(content, pattern, "$1") + url; } } ini.Set("{NONAME}", content); } } eraseElements(scripts); string_size pos; if(ini.SectionExist("rewrite_remote") && !ext.quanx_dev_id.empty()) { ini.GetItems("rewrite_remote", scripts); ini.EraseSection("rewrite_remote"); ini.SetCurrentSection("rewrite_remote"); for(auto &x : scripts) { title = x.first; if(title != "{NONAME}") content = title + "=" + x.second; else content = x.second; if(isLink(content)) { pos = content.find(","); url = ext.managed_config_prefix + "/qx-rewrite?id=" + ext.quanx_dev_id + "&url=" + urlSafeBase64Encode(content.substr(0, pos)); if(pos != content.npos) url += content.substr(pos); content = url; } ini.Set("{NONAME}", content); } } }
0
[ "CWE-434", "CWE-94" ]
subconverter
ce8d2bd0f13f05fcbd2ed90755d097f402393dd3
125,721,630,267,152,230,000,000,000,000,000,000,000
273
Enhancements Add authorization check before loading scripts. Add detailed logs when loading preference settings.
size_t writeFile(const DataBuf& buf, const std::string& path) { FileIo file(path); if (file.open("wb") != 0) { throw Error(kerFileOpenFailed, path, "wb", strError()); } return file.write(buf.pData_, buf.size_); }
0
[ "CWE-125" ]
exiv2
bd0afe0390439b2c424d881c8c6eb0c5624e31d9
337,338,579,654,006,680,000,000,000,000,000,000,000
8
Add bounds check to MemIo::seek(). (#944) - Regression test for missing bounds check in MemIo::seek() - Add bounds check to MemIo::seek(), this fixes CVE-2019-13504
RQueueItem(SQLQuery* Q, MySQLresult* R) : q(Q), r(R) {}
0
[ "CWE-476" ]
inspircd
2cc35d8625b7ea5cbd1d1ebb116aff86c5280162
323,525,576,707,577,100,000,000,000,000,000,000,000
1
Initialise and deallocate the MySQL library correctly.
vc4_reset(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); DRM_INFO("Resetting GPU.\n"); mutex_lock(&vc4->power_lock); if (vc4->power_refcount) { /* Power the device off and back on the by dropping the * reference on runtime PM. */ pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev); pm_runtime_get_sync(&vc4->v3d->pdev->dev); } mutex_unlock(&vc4->power_lock); vc4_irq_reset(dev); /* Rearm the hangcheck -- another job might have been waiting * for our hung one to get kicked off, and vc4_irq_reset() * would have started it. */ vc4_queue_hangcheck(dev); }
0
[ "CWE-190", "CWE-703" ]
linux
0f2ff82e11c86c05d051cae32b58226392d33bbf
53,607,064,452,253,440,000,000,000,000,000,000,000
24
drm/vc4: Fix an integer overflow in temporary allocation layout. We copy the unvalidated ioctl arguments from the user into kernel temporary memory to run the validation from, to avoid a race where the user updates the unvalidate contents in between validating them and copying them into the validated BO. However, in setting up the layout of the kernel side, we failed to check one of the additions (the roundup() for shader_rec_offset) against integer overflow, allowing a nearly MAX_UINT value of bin_cl_size to cause us to under-allocate the temporary space that we then copy_from_user into. Reported-by: Murray McAllister <[email protected]> Signed-off-by: Eric Anholt <[email protected]> Fixes: d5b1a78a772f ("drm/vc4: Add support for drawing 3D frames.")
void net_check_clients(void) { NetClientState *nc; int i; net_hub_check_clients(); QTAILQ_FOREACH(nc, &net_clients, next) { if (!nc->peer) { warn_report("%s %s has no peer", nc->info->type == NET_CLIENT_DRIVER_NIC ? "nic" : "netdev", nc->name); } } /* Check that all NICs requested via -net nic actually got created. * NICs created via -device don't need to be checked here because * they are always instantiated. */ for (i = 0; i < MAX_NICS; i++) { NICInfo *nd = &nd_table[i]; if (nd->used && !nd->instantiated) { warn_report("requested NIC (%s, model %s) " "was not created (not supported by this machine?)", nd->name ? nd->name : "anonymous", nd->model ? nd->model : "unspecified"); } } }
0
[ "CWE-190" ]
qemu
25c01bd19d0e4b66f357618aeefda1ef7a41e21a
173,814,260,786,750,940,000,000,000,000,000,000,000
30
net: drop too large packet early We try to detect and drop too large packet (>INT_MAX) in 1592a9947036 ("net: ignore packet size greater than INT_MAX") during packet delivering. Unfortunately, this is not sufficient as we may hit another integer overflow when trying to queue such large packet in qemu_net_queue_append_iov(): - size of the allocation may overflow on 32bit - packet->size is integer which may overflow even on 64bit Fixing this by moving the check to qemu_sendv_packet_async() which is the entrance of all networking codes and reduce the limit to NET_BUFSIZE to be more conservative. This works since: - For the callers that call qemu_sendv_packet_async() directly, they only care about if zero is returned to determine whether to prevent the source from producing more packets. A callback will be triggered if peer can accept more then source could be enabled. This is usually used by high speed networking implementation like virtio-net or netmap. - For the callers that call qemu_sendv_packet() that calls qemu_sendv_packet_async() indirectly, they often ignore the return value. In this case qemu will just the drop packets if peer can't receive. Qemu will copy the packet if it was queued. So it was safe for both kinds of the callers to assume the packet was sent. Since we move the check from qemu_deliver_packet_iov() to qemu_sendv_packet_async(), it would be safer to make qemu_deliver_packet_iov() static to prevent any external user in the future. This is a revised patch of CVE-2018-17963. Cc: [email protected] Cc: Li Qiang <[email protected]> Fixes: 1592a9947036 ("net: ignore packet size greater than INT_MAX") Reported-by: Li Qiang <[email protected]> Reviewed-by: Li Qiang <[email protected]> Signed-off-by: Jason Wang <[email protected]> Reviewed-by: Thomas Huth <[email protected]> Message-id: [email protected] Signed-off-by: Peter Maydell <[email protected]>
static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo *from) { clear_siginfo(to); to->si_signo = from->si_signo; to->si_errno = from->si_errno; to->si_code = from->si_code; switch(siginfo_layout(from->si_signo, from->si_code)) { case SIL_KILL: to->si_pid = from->si_pid; to->si_uid = from->si_uid; break; case SIL_TIMER: to->si_tid = from->si_tid; to->si_overrun = from->si_overrun; to->si_int = from->si_int; break; case SIL_POLL: to->si_band = from->si_band; to->si_fd = from->si_fd; break; case SIL_FAULT: to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO to->si_trapno = from->si_trapno; #endif break; case SIL_FAULT_MCEERR: to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO to->si_trapno = from->si_trapno; #endif to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO to->si_trapno = from->si_trapno; #endif to->si_lower = compat_ptr(from->si_lower); to->si_upper = compat_ptr(from->si_upper); break; case SIL_FAULT_PKUERR: to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO to->si_trapno = from->si_trapno; #endif to->si_pkey = from->si_pkey; break; case SIL_CHLD: to->si_pid = from->si_pid; to->si_uid = from->si_uid; to->si_status = from->si_status; #ifdef CONFIG_X86_X32_ABI if (in_x32_syscall()) { to->si_utime = from->_sifields._sigchld_x32._utime; to->si_stime = from->_sifields._sigchld_x32._stime; } else #endif { to->si_utime = from->si_utime; to->si_stime = from->si_stime; } break; case SIL_RT: to->si_pid = from->si_pid; to->si_uid = from->si_uid; to->si_int = from->si_int; break; case SIL_SYS: to->si_call_addr = compat_ptr(from->si_call_addr); to->si_syscall = from->si_syscall; to->si_arch = from->si_arch; break; } return 0; }
0
[ "CWE-190" ]
linux
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
254,289,020,798,927,830,000,000,000,000,000,000,000
77
signal: Extend exec_id to 64bits Replace the 32bit exec_id with a 64bit exec_id to make it impossible to wrap the exec_id counter. With care an attacker can cause exec_id wrap and send arbitrary signals to a newly exec'd parent. This bypasses the signal sending checks if the parent changes their credentials during exec. The severity of this problem can been seen that in my limited testing of a 32bit exec_id it can take as little as 19s to exec 65536 times. Which means that it can take as little as 14 days to wrap a 32bit exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7 days. Even my slower timing is in the uptime of a typical server. Which means self_exec_id is simply a speed bump today, and if exec gets noticably faster self_exec_id won't even be a speed bump. Extending self_exec_id to 64bits introduces a problem on 32bit architectures where reading self_exec_id is no longer atomic and can take two read instructions. Which means that is is possible to hit a window where the read value of exec_id does not match the written value. So with very lucky timing after this change this still remains expoiltable. I have updated the update of exec_id on exec to use WRITE_ONCE and the read of exec_id in do_notify_parent to use READ_ONCE to make it clear that there is no locking between these two locations. Link: https://lore.kernel.org/kernel-hardening/[email protected] Fixes: 2.3.23pre2 Cc: [email protected] Signed-off-by: "Eric W. Biederman" <[email protected]>
de265_error decoder_context::decode_some(bool* did_work) { de265_error err = DE265_OK; *did_work = false; if (image_units.empty()) { return DE265_OK; } // nothing to do // decode something if there is work to do if ( ! image_units.empty() ) { // && ! image_units[0]->slice_units.empty() ) { image_unit* imgunit = image_units[0]; slice_unit* sliceunit = imgunit->get_next_unprocessed_slice_segment(); if (sliceunit != NULL) { //pop_front(imgunit->slice_units); if (sliceunit->flush_reorder_buffer) { dpb.flush_reorder_buffer(); } *did_work = true; //err = decode_slice_unit_sequential(imgunit, sliceunit); err = decode_slice_unit_parallel(imgunit, sliceunit); if (err) { return err; } //delete sliceunit; } } // if we decoded all slices of the current image and there will not // be added any more slices to the image, output the image if ( ( image_units.size()>=2 && image_units[0]->all_slice_segments_processed()) || ( image_units.size()>=1 && image_units[0]->all_slice_segments_processed() && nal_parser.number_of_NAL_units_pending()==0 && (nal_parser.is_end_of_stream() || nal_parser.is_end_of_frame()) )) { image_unit* imgunit = image_units[0]; *did_work=true; // mark all CTBs as decoded even if they are not, because faulty input // streams could miss part of the picture // TODO: this will not work when slice decoding is parallel to post-filtering, // so we will have to replace this with keeping track of which CTB should have // been decoded (but aren't because of the input stream being faulty) imgunit->img->mark_all_CTB_progress(CTB_PROGRESS_PREFILTER); // run post-processing filters (deblocking & SAO) if (img->decctx->num_worker_threads) run_postprocessing_filters_parallel(imgunit); else run_postprocessing_filters_sequential(imgunit->img); // process suffix SEIs for (int i=0;i<imgunit->suffix_SEIs.size();i++) { const sei_message& sei = imgunit->suffix_SEIs[i]; err = process_sei(&sei, imgunit->img); if (err != DE265_OK) break; } push_picture_to_output_queue(imgunit); // remove just decoded image unit from queue delete imgunit; pop_front(image_units); } return err; }
0
[ "CWE-416" ]
libde265
f538254e4658ef5ea4e233c2185dcbfd165e8911
303,673,555,649,840,760,000,000,000,000,000,000,000
90
fix streams where SPS image size changes without refreshing PPS (#299)
static int mctp_serial_open(struct tty_struct *tty) { struct mctp_serial *dev; struct net_device *ndev; char name[32]; int idx, rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!tty->ops->write) return -EOPNOTSUPP; idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL); if (idx < 0) return idx; snprintf(name, sizeof(name), "mctpserial%d", idx); ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM, mctp_serial_setup); if (!ndev) { rc = -ENOMEM; goto free_ida; } dev = netdev_priv(ndev); dev->idx = idx; dev->tty = tty; dev->netdev = ndev; dev->txstate = STATE_IDLE; dev->rxstate = STATE_IDLE; spin_lock_init(&dev->lock); INIT_WORK(&dev->tx_work, mctp_serial_tx_work); rc = register_netdev(ndev); if (rc) goto free_netdev; tty->receive_room = 64 * 1024; tty->disc_data = dev; return 0; free_netdev: free_netdev(ndev); free_ida: ida_free(&mctp_serial_ida, idx); return rc; }
0
[]
linux
6c342ce2239c182c2428ce5a44cb32330434ae6e
194,518,310,080,049,700,000,000,000,000,000,000,000
50
mctp: serial: Cancel pending work from ndo_uninit handler We cannot do the cancel_work_sync from after the unregister_netdev, as the dev pointer is no longer valid, causing a uaf on ldisc unregister (or device close). Instead, do the cancel_work_sync from the ndo_uninit op, where the dev still exists, but the queue has stopped. Fixes: 7bd9890f3d74 ("mctp: serial: cancel tx work on ldisc close") Reported-by: Luo Likang <[email protected]> Tested-by: Luo Likang <[email protected]> Signed-off-by: Jeremy Kerr <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
bool testToStringCharsRequiredHelper(const wchar_t * text) { // Parse UriParserStateW state; UriUriW uri; state.uri = &uri; int res = uriParseUriW(&state, text); if (res != 0) { uriFreeUriMembersW(&uri); return false; } // Required space? int charsRequired; if (uriToStringCharsRequiredW(&uri, &charsRequired) != 0) { uriFreeUriMembersW(&uri); return false; } // Minimum wchar_t * buffer = new wchar_t[charsRequired + 1]; if (uriToStringW(buffer, &uri, charsRequired + 1, NULL) != 0) { uriFreeUriMembersW(&uri); delete [] buffer; return false; } // One less than minimum if (uriToStringW(buffer, &uri, charsRequired, NULL) == 0) { uriFreeUriMembersW(&uri); delete [] buffer; return false; } uriFreeUriMembersW(&uri); delete [] buffer; return true; }
0
[ "CWE-787" ]
uriparser
864f5d4c127def386dd5cc926ad96934b297f04e
193,205,957,190,229,100,000,000,000,000,000,000,000
37
UriQuery.c: Fix out-of-bounds-write in ComposeQuery and ...Ex Reported by Google Autofuzz team
PHP_MINIT_FUNCTION(http_url) { zend_class_entry ce = {0}; INIT_NS_CLASS_ENTRY(ce, "http", "Url", php_http_url_methods); php_http_url_class_entry = zend_register_internal_class(&ce TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("scheme"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("user"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("pass"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("host"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("port"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("path"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("query"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_property_null(php_http_url_class_entry, ZEND_STRL("fragment"), ZEND_ACC_PUBLIC TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("REPLACE"), PHP_HTTP_URL_REPLACE TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("JOIN_PATH"), PHP_HTTP_URL_JOIN_PATH TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("JOIN_QUERY"), PHP_HTTP_URL_JOIN_QUERY TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_USER"), PHP_HTTP_URL_STRIP_USER TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_PASS"), PHP_HTTP_URL_STRIP_PASS TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_AUTH"), PHP_HTTP_URL_STRIP_AUTH TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_PORT"), PHP_HTTP_URL_STRIP_PORT TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_PATH"), PHP_HTTP_URL_STRIP_PATH TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_QUERY"), PHP_HTTP_URL_STRIP_QUERY TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_FRAGMENT"), PHP_HTTP_URL_STRIP_FRAGMENT TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("STRIP_ALL"), PHP_HTTP_URL_STRIP_ALL TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("FROM_ENV"), PHP_HTTP_URL_FROM_ENV TSRMLS_CC); zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("SANITIZE_PATH"), PHP_HTTP_URL_SANITIZE_PATH TSRMLS_CC); #ifdef PHP_HTTP_HAVE_WCHAR zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("PARSE_MBLOC"), PHP_HTTP_URL_PARSE_MBLOC TSRMLS_CC); #endif zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("PARSE_MBUTF8"), PHP_HTTP_URL_PARSE_MBUTF8 TSRMLS_CC); #if defined(PHP_HTTP_HAVE_IDN2) || defined(PHP_HTTP_HAVE_IDN) || defined(HAVE_UIDNA_IDNTOASCII) zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("PARSE_TOIDN"), PHP_HTTP_URL_PARSE_TOIDN TSRMLS_CC); #endif zend_declare_class_constant_long(php_http_url_class_entry, ZEND_STRL("PARSE_TOPCT"), PHP_HTTP_URL_PARSE_TOPCT TSRMLS_CC); return SUCCESS; }
0
[ "CWE-119" ]
ext-http
3724cd76a28be1d6049b5537232e97ac567ae1f5
316,607,491,946,066,460,000,000,000,000,000,000,000
41
fix bug #71719 (Buffer overflow in HTTP url parsing functions) The parser's offset was not reset when we softfail in scheme parsing and continue to parse a path. Thanks to hlt99 at blinkenshell dot org for the report.
XML_GetErrorCode(XML_Parser parser) { if (parser == NULL) return XML_ERROR_INVALID_ARGUMENT; return parser->m_errorCode; }
0
[ "CWE-611", "CWE-776", "CWE-415", "CWE-125" ]
libexpat
c20b758c332d9a13afbbb276d30db1d183a85d43
75,490,996,230,561,620,000,000,000,000,000,000,000
5
xmlparse.c: Deny internal entities closing the doctype
static ssize_t ims_pcu_ofn_bit_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_ofn_bit_attribute *attr = container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, attr->addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr)));
0
[ "CWE-703" ]
linux
a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff
14,109,866,079,787,031,000,000,000,000,000,000,000
20
Input: ims-pcu - sanity check against missing interfaces A malicious device missing interface can make the driver oops. Add sanity checking. Signed-off-by: Oliver Neukum <[email protected]> CC: [email protected] Signed-off-by: Dmitry Torokhov <[email protected]>
int cli_stmt_execute(MYSQL_STMT *stmt) { DBUG_ENTER("cli_stmt_execute"); if (stmt->param_count) { MYSQL *mysql= stmt->mysql; NET *net= &mysql->net; MYSQL_BIND *param, *param_end; char *param_data; ulong length; uint null_count; my_bool result; if (!stmt->bind_param_done) { set_stmt_error(stmt, CR_PARAMS_NOT_BOUND, unknown_sqlstate, NULL); DBUG_RETURN(1); } if (mysql->status != MYSQL_STATUS_READY || mysql->server_status & SERVER_MORE_RESULTS_EXISTS) { set_stmt_error(stmt, CR_COMMANDS_OUT_OF_SYNC, unknown_sqlstate, NULL); DBUG_RETURN(1); } if (net->vio) net_clear(net, 1); /* Sets net->write_pos */ else { set_stmt_errmsg(stmt, net); DBUG_RETURN(1); } /* Reserve place for null-marker bytes */ null_count= (stmt->param_count+7) /8; if (my_realloc_str(net, null_count + 1)) { set_stmt_errmsg(stmt, net); DBUG_RETURN(1); } bzero((char*) net->write_pos, null_count); net->write_pos+= null_count; param_end= stmt->params + stmt->param_count; /* In case if buffers (type) altered, indicate to server */ *(net->write_pos)++= (uchar) stmt->send_types_to_server; if (stmt->send_types_to_server) { if (my_realloc_str(net, 2 * stmt->param_count)) { set_stmt_errmsg(stmt, net); DBUG_RETURN(1); } /* Store types of parameters in first in first package that is sent to the server. */ for (param= stmt->params; param < param_end ; param++) store_param_type(&net->write_pos, param); } for (param= stmt->params; param < param_end; param++) { /* check if mysql_stmt_send_long_data() was used */ if (param->long_data_used) param->long_data_used= 0; /* Clear for next execute call */ else if (store_param(stmt, param)) DBUG_RETURN(1); } length= (ulong) (net->write_pos - net->buff); /* TODO: Look into avoding the following memdup */ if (!(param_data= my_memdup(net->buff, length, MYF(0)))) { set_stmt_error(stmt, CR_OUT_OF_MEMORY, unknown_sqlstate, NULL); DBUG_RETURN(1); } result= execute(stmt, param_data, length); stmt->send_types_to_server=0; my_free(param_data); DBUG_RETURN(result); } DBUG_RETURN((int) execute(stmt,0,0)); }
0
[]
mysql-server
3d8134d2c9b74bc8883ffe2ef59c168361223837
22,244,735,513,238,014,000,000,000,000,000,000,000
84
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE() Description: If mysql_stmt_close() encountered error, it recorded error in prepared statement but then frees memory assigned to prepared statement. If mysql_stmt_error() is used to get error information, it will result into use after free. In all cases where mysql_stmt_close() can fail, error would have been set by cli_advanced_command in MYSQL structure. Solution: Don't copy error from MYSQL using set_stmt_errmsg. There is no automated way to test the fix since it is in mysql_stmt_close() which does not expect any reply from server. Reviewed-By: Georgi Kodinov <[email protected]> Reviewed-By: Ramil Kalimullin <[email protected]>
lyp_mand_check_ext(struct lys_ext_instance_complex *ext, const char *ext_name) { void *p; int i; struct ly_ctx *ctx = ext->module->ctx; /* check for mandatory substatements */ for (i = 0; ext->substmt[i].stmt; i++) { if (ext->substmt[i].cardinality == LY_STMT_CARD_OPT || ext->substmt[i].cardinality == LY_STMT_CARD_ANY) { /* not a mandatory */ continue; } else if (ext->substmt[i].cardinality == LY_STMT_CARD_SOME) { goto array; } /* * LY_STMT_ORDEREDBY - not checked, has a default value which is the same as explicit system order * LY_STMT_MODIFIER, LY_STMT_STATUS, LY_STMT_MANDATORY, LY_STMT_CONFIG - checked, but mandatory requirement * does not make sense since there is also a default value specified */ switch(ext->substmt[i].stmt) { case LY_STMT_ORDEREDBY: /* always ok */ break; case LY_STMT_REQINSTANCE: case LY_STMT_DIGITS: case LY_STMT_MODIFIER: p = lys_ext_complex_get_substmt(ext->substmt[i].stmt, ext, NULL); if (!*(uint8_t*)p) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, ly_stmt_str[ext->substmt[i].stmt], ext_name); goto error; } break; case LY_STMT_STATUS: p = lys_ext_complex_get_substmt(ext->substmt[i].stmt, ext, NULL); if (!(*(uint16_t*)p & LYS_STATUS_MASK)) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, ly_stmt_str[ext->substmt[i].stmt], ext_name); goto error; } break; case LY_STMT_MANDATORY: p = lys_ext_complex_get_substmt(ext->substmt[i].stmt, ext, NULL); if (!(*(uint16_t*)p & LYS_MAND_MASK)) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, ly_stmt_str[ext->substmt[i].stmt], ext_name); goto error; } break; case LY_STMT_CONFIG: p = lys_ext_complex_get_substmt(ext->substmt[i].stmt, ext, NULL); if (!(*(uint16_t*)p & LYS_CONFIG_MASK)) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, ly_stmt_str[ext->substmt[i].stmt], ext_name); goto error; } break; default: array: /* stored as a pointer */ p = lys_ext_complex_get_substmt(ext->substmt[i].stmt, ext, NULL); if (!(*(void**)p)) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, ly_stmt_str[ext->substmt[i].stmt], ext_name); goto error; } break; } } return EXIT_SUCCESS; error: return EXIT_FAILURE; }
0
[ "CWE-787" ]
libyang
f6d684ade99dd37b21babaa8a856f64faa1e2e0d
131,004,402,639,884,560,000,000,000,000,000,000,000
71
parser BUGFIX long identity name buffer overflow STRING_OVERFLOW (CWE-120)
find_split(const char *src, int step, int size) { int result = size; int n; if (size > 0) { /* check if that would split a backslash-sequence */ int mark = size; for (n = size - 1; n > 0; --n) { int ch = UChar(src[step + n]); if (ch == '\\') { if (n > 0 && src[step + n - 1] == ch) --n; mark = n; break; } else if (!isalnum(ch)) { break; } } if (mark < size) { result = mark; } else { /* check if that would split a backslash-sequence */ for (n = size - 1; n > 0; --n) { int ch = UChar(src[step + n]); if (ch == '%') { int need = op_length(src, step + n); if ((n + need) > size) { mark = n; } break; } } if (mark < size) { result = mark; } } } return result; }
0
[ "CWE-125" ]
ncurses
b025434573f466efe27862656a6a9d41dd2bd609
300,995,229,118,462,500,000,000,000,000,000,000,000
39
ncurses 6.1 - patch 20191012 + amend recent changes to ncurses*-config and pc-files to filter out Debian linker-flags (report by Sven Joachim, cf: 20150516). + clarify relationship between tic, infocmp and captoinfo in manpage. + check for invalid hashcode in _nc_find_type_entry and _nc_find_name_entry. > fix several errata in tic (reports/testcases by "zjuchenyuan"): + check for invalid hashcode in _nc_find_entry. + check for missing character after backslash in fmt_entry + check for acsc with odd length in dump_entry in check for one-one mapping (cf: 20060415); + check length when converting from old AIX box_chars_1 capability, overlooked in changes to eliminate strcpy (cf: 20001007). + amend the ncurses*-config and pc-files to take into account the rpath
usage (int status) { if (status != 0) { fprintf (stderr, _("Usage: %s [OPTION]... PATTERN [FILE]...\n"), program_name); fprintf (stderr, _("Try '%s --help' for more information.\n"), program_name); } else { printf (_("Usage: %s [OPTION]... PATTERN [FILE]...\n"), program_name); printf (_("\ Search for PATTERN in each FILE or standard input.\n")); fputs (_(before_options), stdout); printf (_("\ Example: %s -i 'hello world' menu.h main.c\n\ \n\ Regexp selection and interpretation:\n"), program_name); if (matchers[1].name) printf (_("\ -E, --extended-regexp PATTERN is an extended regular expression (ERE)\n\ -F, --fixed-strings PATTERN is a set of newline-separated fixed strings\n\ -G, --basic-regexp PATTERN is a basic regular expression (BRE)\n\ -P, --perl-regexp PATTERN is a Perl regular expression\n")); /* -X is undocumented on purpose. */ printf (_("\ -e, --regexp=PATTERN use PATTERN for matching\n\ -f, --file=FILE obtain PATTERN from FILE\n\ -i, --ignore-case ignore case distinctions\n\ -w, --word-regexp force PATTERN to match only whole words\n\ -x, --line-regexp force PATTERN to match only whole lines\n\ -z, --null-data a data line ends in 0 byte, not newline\n")); printf (_("\ \n\ Miscellaneous:\n\ -s, --no-messages suppress error messages\n\ -v, --invert-match select non-matching lines\n\ -V, --version print version information and exit\n\ --help display this help and exit\n\ --mmap deprecated no-op; evokes a warning\n")); printf (_("\ \n\ Output control:\n\ -m, --max-count=NUM stop after NUM matches\n\ -b, --byte-offset print the byte offset with output lines\n\ -n, --line-number print line number with output lines\n\ --line-buffered flush output on every line\n\ -H, --with-filename print the file name for each match\n\ -h, --no-filename suppress the file name prefix on output\n\ --label=LABEL use LABEL as the standard input file name prefix\n\ ")); printf (_("\ -o, --only-matching show only the part of a line matching PATTERN\n\ -q, --quiet, --silent suppress all normal output\n\ --binary-files=TYPE assume that binary files are TYPE;\n\ TYPE is `binary', `text', or `without-match'\n\ -a, --text equivalent to --binary-files=text\n\ ")); printf (_("\ -I equivalent to --binary-files=without-match\n\ -d, --directories=ACTION how to handle directories;\n\ ACTION is `read', `recurse', or `skip'\n\ -D, --devices=ACTION how to handle devices, FIFOs and sockets;\n\ ACTION is `read' or `skip'\n\ -R, -r, --recursive equivalent to --directories=recurse\n\ ")); printf (_("\ --include=FILE_PATTERN search only files that match FILE_PATTERN\n\ --exclude=FILE_PATTERN skip files and directories matching FILE_PATTERN\n\ --exclude-from=FILE skip files matching any file pattern from FILE\n\ --exclude-dir=PATTERN directories that match PATTERN will be skipped.\n\ ")); printf (_("\ -L, --files-without-match print only names of FILEs containing no match\n\ -l, --files-with-matches print only names of FILEs containing matches\n\ -c, --count print only a count of matching lines per FILE\n\ -T, --initial-tab make tabs line up (if needed)\n\ -Z, --null print 0 byte after FILE name\n")); printf (_("\ \n\ Context control:\n\ -B, --before-context=NUM print NUM lines of leading context\n\ -A, --after-context=NUM print NUM lines of trailing context\n\ -C, --context=NUM print NUM lines of output context\n\ ")); printf (_("\ -NUM same as --context=NUM\n\ --color[=WHEN],\n\ --colour[=WHEN] use markers to highlight the matching strings;\n\ WHEN is `always', `never', or `auto'\n\ -U, --binary do not strip CR characters at EOL (MSDOS/Windows)\n\ -u, --unix-byte-offsets report offsets as if CRs were not there\n\ (MSDOS/Windows)\n\ \n")); fputs (_(after_options), stdout); printf (_("\ When FILE is -, read standard input. With no FILE, read . if a command-line\n\ -r is given, - otherwise. If fewer than two FILEs are given, assume -h.\n\ Exit status is 0 if any line is selected, 1 otherwise;\n\ if any error occurs and -q is not given, the exit status is 2.\n")); printf (_("\nReport bugs to: %s\n"), PACKAGE_BUGREPORT); printf (_("GNU Grep home page: <%s>\n"), "http://www.gnu.org/software/grep/"); fputs (_("General help using GNU software: <http://www.gnu.org/gethelp/>\n"), stdout); } exit (status); }
0
[ "CWE-189" ]
grep
8fcf61523644df42e1905c81bed26838e0b04f91
134,785,270,345,340,720,000,000,000,000,000,000,000
110
grep: fix integer-overflow issues in main program * NEWS: Document this. * bootstrap.conf (gnulib_modules): Add inttypes, xstrtoimax. Remove xstrtoumax. * src/main.c: Include <inttypes.h>, for INTMAX_MAX, PRIdMAX. (context_length_arg, prtext, grepbuf, grep, grepfile) (get_nondigit_option, main): Use intmax_t, not int, for line counts. (context_length_arg, main): Silently ceiling line counts to maximum value, since there's no practical difference between doing that and using infinite-precision arithmetic. (out_before, out_after, pending): Now intmax_t, not int. (max_count, outleft): Now intmax_t, not off_t. (prepend_args, prepend_default_options, main): Use size_t, not int, for sizes. (prepend_default_options): Check for int and size_t overflow.
static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig) { return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL); }
0
[ "CWE-416" ]
linux
dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399
173,368,915,782,152,930,000,000,000,000,000,000,000
4
xfrm: clean up xfrm protocol checks In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()") I introduced a check for xfrm protocol, but according to Herbert IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so it should be removed from validate_tmpl(). And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific protocols, this is why xfrm_state_flush() could still miss IPPROTO_ROUTING, which leads that those entries are left in net->xfrm.state_all before exit net. Fix this by replacing IPSEC_PROTO_ANY with zero. This patch also extracts the check from validate_tmpl() to xfrm_id_proto_valid() and uses it in parse_ipsecrequest(). With this, no other protocols should be added into xfrm. Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()") Reported-by: [email protected] Cc: Steffen Klassert <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: Cong Wang <[email protected]> Acked-by: Herbert Xu <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
void mk_vhost_free_all() { struct host *host; struct host_alias *host_alias; struct error_page *ep; struct mk_list *head_host; struct mk_list *head_alias; struct mk_list *head_error; struct mk_list *tmp1, *tmp2; mk_list_foreach_safe(head_host, tmp1, &config->hosts) { host = mk_list_entry(head_host, struct host, _head); mk_list_del(&host->_head); mk_mem_free(host->file); /* Free aliases or servernames */ mk_list_foreach_safe(head_alias, tmp2, &host->server_names) { host_alias = mk_list_entry(head_alias, struct host_alias, _head); mk_list_del(&host_alias->_head); mk_mem_free(host_alias->name); mk_mem_free(host_alias); } /* Free error pages */ mk_list_foreach_safe(head_error, tmp2, &host->error_pages) { ep = mk_list_entry(head_error, struct error_page, _head); mk_list_del(&ep->_head); mk_mem_free(ep->file); mk_mem_free(ep->real_path); mk_mem_free(ep); } mk_ptr_free(&host->documentroot); mk_mem_free(host->host_signature); mk_ptr_free(&host->header_host_signature); /* Free source configuration */ if (host->config) mk_config_free(host->config); mk_mem_free(host); } }
0
[ "CWE-20" ]
monkey
b2d0e6f92310bb14a15aa2f8e96e1fb5379776dd
331,923,544,028,588,270,000,000,000,000,000,000,000
42
Request: new request session flag to mark those files opened by FDT This patch aims to fix a potential DDoS problem that can be caused in the server quering repetitive non-existent resources. When serving a static file, the core use Vhost FDT mechanism, but if it sends a static error page it does a direct open(2). When closing the resources for the same request it was just calling mk_vhost_close() which did not clear properly the file descriptor. This patch adds a new field on the struct session_request called 'fd_is_fdt', which contains MK_TRUE or MK_FALSE depending of how fd_file was opened. Thanks to Matthew Daley <[email protected]> for report and troubleshoot this problem. Signed-off-by: Eduardo Silva <[email protected]>
wrong_priority(dns_rdataset_t *rds, int pass, dns_rdatatype_t preferred_glue) { int pass_needed; /* * If we are not rendering class IN, this ordering is bogus. */ if (rds->rdclass != dns_rdataclass_in) return (false); switch (rds->type) { case dns_rdatatype_a: case dns_rdatatype_aaaa: if (preferred_glue == rds->type) pass_needed = 4; else pass_needed = 3; break; case dns_rdatatype_rrsig: case dns_rdatatype_dnskey: pass_needed = 2; break; default: pass_needed = 1; } if (pass_needed >= pass) return (false); return (true); }
0
[ "CWE-617" ]
bind9
6ed167ad0a647dff20c8cb08c944a7967df2d415
242,158,861,853,611,100,000,000,000,000,000,000,000
30
Always keep a copy of the message this allows it to be available even when dns_message_parse() returns a error.
PHPAPI void php_info_print_table_end(void) /* {{{ */ { if (!sapi_module.phpinfo_as_text) { php_info_print("</table>\n"); }
0
[ "CWE-200" ]
php-src
3804c0d00fa6e629173fb1c8c61f8f88d5fe39b9
56,443,051,893,955,510,000,000,000,000,000,000,000
7
Fix bug #67498 - phpinfo() Type Confusion Information Leak Vulnerability
static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge; struct net_device *parent; u_int8_t pf; if (!skb->nf_bridge) return NF_ACCEPT; /* Need exclusive nf_bridge_info since we might have multiple * different physoutdevs. */ if (!nf_bridge_unshare(skb)) return NF_DROP; parent = bridge_parent(out); if (!parent) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; nf_bridge_pull_encap_header(skb); nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } if (br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, br_nf_forward_finish); return NF_STOLEN; }
0
[]
linux-2.6
f8e9881c2aef1e982e5abc25c046820cd0b7cf64
273,055,919,454,914,360,000,000,000,000,000,000,000
54
bridge: reset IPCB in br_parse_ip_options Commit 462fb2af9788a82 (bridge : Sanitize skb before it enters the IP stack), missed one IPCB init before calling ip_options_compile() Thanks to Scot Doyle for his tests and bug reports. Reported-by: Scot Doyle <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Cc: Hiroaki SHIMODA <[email protected]> Acked-by: Bandan Das <[email protected]> Acked-by: Stephen Hemminger <[email protected]> Cc: Jan Lübbe <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR) ret = 0; if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; }
0
[ "CWE-254" ]
linux
397d425dc26da728396e66d392d5dcb8dac30c37
2,356,060,094,305,639,400,000,000,000,000,000,000
64
vfs: Test for and handle paths that are unreachable from their mnt_root In rare cases a directory can be renamed out from under a bind mount. In those cases without special handling it becomes possible to walk up the directory tree to the root dentry of the filesystem and down from the root dentry to every other file or directory on the filesystem. Like division by zero .. from an unconnected path can not be given a useful semantic as there is no predicting at which path component the code will realize it is unconnected. We certainly can not match the current behavior as the current behavior is a security hole. Therefore when encounting .. when following an unconnected path return -ENOENT. - Add a function path_connected to verify path->dentry is reachable from path->mnt.mnt_root. AKA to validate that rename did not do something nasty to the bind mount. To avoid races path_connected must be called after following a path component to it's next path component. Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: Al Viro <[email protected]>
static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, struct in6_addr *laddr, int dif, u32 tunnel_id) { struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); if (sk) sock_hold(sk); return sk; }
0
[ "CWE-20" ]
net
85fbaa75037d0b6b786ff18658ddf0b4014ce2a4
91,524,553,984,917,910,000,000,000,000,000,000,000
10
inet: fix addr_len/msg->msg_namelen assignment in recv_error and rxpmtu functions Commit bceaa90240b6019ed73b49965eac7d167610be69 ("inet: prevent leakage of uninitialized memory to user in recv syscalls") conditionally updated addr_len if the msg_name is written to. The recv_error and rxpmtu functions relied on the recvmsg functions to set up addr_len before. As this does not happen any more we have to pass addr_len to those functions as well and set it to the size of the corresponding sockaddr length. This broke traceroute and such. Fixes: bceaa90240b6 ("inet: prevent leakage of uninitialized memory to user in recv syscalls") Reported-by: Brad Spengler <[email protected]> Reported-by: Tom Labanowski Cc: mpb <[email protected]> Cc: David S. Miller <[email protected]> Cc: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
evalvars_init(void) { int i; struct vimvar *p; init_var_dict(&globvardict, &globvars_var, VAR_DEF_SCOPE); init_var_dict(&vimvardict, &vimvars_var, VAR_SCOPE); vimvardict.dv_lock = VAR_FIXED; hash_init(&compat_hashtab); for (i = 0; i < VV_LEN; ++i) { p = &vimvars[i]; if (STRLEN(p->vv_name) > DICTITEM16_KEY_LEN) { iemsg("INTERNAL: name too long, increase size of dictitem16_T"); getout(1); } STRCPY(p->vv_di.di_key, p->vv_name); if (p->vv_flags & VV_RO) p->vv_di.di_flags = DI_FLAGS_RO | DI_FLAGS_FIX; else if (p->vv_flags & VV_RO_SBX) p->vv_di.di_flags = DI_FLAGS_RO_SBX | DI_FLAGS_FIX; else p->vv_di.di_flags = DI_FLAGS_FIX; // add to v: scope dict, unless the value is not always available if (p->vv_tv_type != VAR_UNKNOWN) hash_add(&vimvarht, p->vv_di.di_key); if (p->vv_flags & VV_COMPAT) // add to compat scope dict hash_add(&compat_hashtab, p->vv_di.di_key); } set_vim_var_nr(VV_VERSION, VIM_VERSION_100); set_vim_var_nr(VV_VERSIONLONG, VIM_VERSION_100 * 10000 + highest_patch()); set_vim_var_nr(VV_SEARCHFORWARD, 1L); set_vim_var_nr(VV_HLSEARCH, 1L); set_vim_var_nr(VV_EXITING, VVAL_NULL); set_vim_var_dict(VV_COMPLETED_ITEM, dict_alloc_lock(VAR_FIXED)); set_vim_var_list(VV_ERRORS, list_alloc()); set_vim_var_dict(VV_EVENT, dict_alloc_lock(VAR_FIXED)); set_vim_var_nr(VV_FALSE, VVAL_FALSE); set_vim_var_nr(VV_TRUE, VVAL_TRUE); set_vim_var_nr(VV_NONE, VVAL_NONE); set_vim_var_nr(VV_NULL, VVAL_NULL); set_vim_var_nr(VV_NUMBERMAX, VARNUM_MAX); set_vim_var_nr(VV_NUMBERMIN, VARNUM_MIN); set_vim_var_nr(VV_NUMBERSIZE, sizeof(varnumber_T) * 8); set_vim_var_nr(VV_SIZEOFINT, sizeof(int)); set_vim_var_nr(VV_SIZEOFLONG, sizeof(long)); set_vim_var_nr(VV_SIZEOFPOINTER, sizeof(char *)); set_vim_var_nr(VV_MAXCOL, MAXCOL); set_vim_var_nr(VV_TYPE_NUMBER, VAR_TYPE_NUMBER); set_vim_var_nr(VV_TYPE_STRING, VAR_TYPE_STRING); set_vim_var_nr(VV_TYPE_FUNC, VAR_TYPE_FUNC); set_vim_var_nr(VV_TYPE_LIST, VAR_TYPE_LIST); set_vim_var_nr(VV_TYPE_DICT, VAR_TYPE_DICT); set_vim_var_nr(VV_TYPE_FLOAT, VAR_TYPE_FLOAT); set_vim_var_nr(VV_TYPE_BOOL, VAR_TYPE_BOOL); set_vim_var_nr(VV_TYPE_NONE, VAR_TYPE_NONE); set_vim_var_nr(VV_TYPE_JOB, VAR_TYPE_JOB); set_vim_var_nr(VV_TYPE_CHANNEL, VAR_TYPE_CHANNEL); set_vim_var_nr(VV_TYPE_BLOB, VAR_TYPE_BLOB); set_vim_var_nr(VV_ECHOSPACE, sc_col - 1); set_vim_var_dict(VV_COLORNAMES, dict_alloc()); // Default for v:register is not 0 but '"'. This is adjusted once the // clipboard has been setup by calling reset_reg_var(). set_reg_var(0); }
0
[ "CWE-476" ]
vim
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
247,395,542,538,012,940,000,000,000,000,000,000,000
75
patch 8.2.4428: crash when switching tabpage while in the cmdline window Problem: Crash when switching tabpage while in the cmdline window. Solution: Disallow switching tabpage when in the cmdline window.
add_mlist(struct mlist *mlp, struct magic_map *map, size_t idx) { struct mlist *ml; mlp->map = idx == 0 ? map : NULL; if ((ml = CAST(struct mlist *, malloc(sizeof(*ml)))) == NULL) return -1; ml->map = NULL; ml->magic = map->magic[idx]; ml->nmagic = map->nmagic[idx]; mlp->prev->next = ml; ml->prev = mlp->prev; ml->next = mlp; mlp->prev = ml; return 0; }
0
[ "CWE-399" ]
file
90018fe22ff8b74a22fcd142225b0a00f3f12677
44,142,282,365,134,310,000,000,000,000,000,000,000
18
bump recursion to 15, and allow it to be set from the command line.
mod_init(struct module *const restrict m) { if (! (saslsvs = service_add("saslserv", &saslserv_message_handler))) { (void) slog(LG_ERROR, "%s: service_add() failed", m->name); m->mflags |= MODFLAG_FAIL; return; } (void) hook_add_sasl_input(&sasl_input); (void) hook_add_user_add(&sasl_user_add); (void) hook_add_server_eob(&sasl_server_eob); sasl_delete_stale_timer = mowgli_timer_add(base_eventloop, "sasl_delete_stale", &sasl_delete_stale, NULL, SECONDS_PER_MINUTE / 2); authservice_loaded++; (void) add_bool_conf_item("HIDE_SERVER_NAMES", &saslsvs->conf_table, 0, &sasl_hide_server_names, false); }
0
[ "CWE-287", "CWE-288" ]
atheme
4e664c75d0b280a052eb8b5e81aa41944e593c52
201,934,566,036,018,640,000,000,000,000,000,000,000
18
saslserv/main: Track EID we're pending login to The existing model does not remember that we've sent a SVSLOGIN for a given SASL session, and simply assumes that if a client is introduced with a SASL session open, that session must have succeeded. The security of this approach requires ircd to implicitly abort SASL sessions on client registration. This also means that if a client successfully authenticates and then does something else its pending login is forgotten about, even though a SVSLOGIN has been sent for it, and the ircd is going to think it's logged in. This change removes the dependency on ircd's state machine by keeping explicit track of the pending login, i.e. the one we've most recently sent a SVSLOGIN for. The next commit will ensure that a client abort (even an implicit one) doesn't blow that information away.
rfbSendNewFBSize(rfbClientPtr cl, int w, int h) { rfbFramebufferUpdateRectHeader rect; if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) { if (!rfbSendUpdateBuf(cl)) return FALSE; } if (cl->PalmVNC==TRUE) rfbLog("Sending rfbEncodingNewFBSize in response to a PalmVNC style framebuffer resize (%dx%d)\n", w, h); else rfbLog("Sending rfbEncodingNewFBSize for resize to (%dx%d)\n", w, h); rect.encoding = Swap32IfLE(rfbEncodingNewFBSize); rect.r.x = 0; rect.r.y = 0; rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); memcpy(&cl->updateBuf[cl->ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); cl->ublen += sz_rfbFramebufferUpdateRectHeader; rfbStatRecordEncodingSent(cl, rfbEncodingNewFBSize, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader); return TRUE; }
0
[]
libvncserver
804335f9d296440bb708ca844f5d89b58b50b0c6
266,637,540,349,595,300,000,000,000,000,000,000,000
30
Thread safety for zrle, zlib, tight. Proposed tight security type fix for debian bug 517422.
TEST(IndexBoundsBuilderTest, CanUseCoveredMatchingForEqualityPredicate) { auto testIndex = buildSimpleIndexEntry(); BSONObj obj = fromjson("{a: {$eq: 3}}"); auto expr = parseMatchExpression(obj); ASSERT_TRUE(IndexBoundsBuilder::canUseCoveredMatching(expr.get(), testIndex)); }
0
[ "CWE-754" ]
mongo
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
24,497,609,487,423,560,000,000,000,000,000,000,000
6
SERVER-44377 generate correct plan for indexed inequalities to null
int FS_FindVM(void **startSearch, char *found, int foundlen, const char *name, int enableDll) { searchpath_t *search, *lastSearch; directory_t *dir; pack_t *pack; char dllName[MAX_OSPATH], qvmName[MAX_OSPATH]; char *netpath; if(!fs_searchpaths) Com_Error(ERR_FATAL, "Filesystem call made without initialization"); if(enableDll) Com_sprintf(dllName, sizeof(dllName), "%s" ARCH_STRING DLL_EXT, name); Com_sprintf(qvmName, sizeof(qvmName), "vm/%s.qvm", name); lastSearch = *startSearch; if(*startSearch == NULL) search = fs_searchpaths; else search = lastSearch->next; while(search) { if(search->dir && !fs_numServerPaks) { dir = search->dir; if(enableDll) { netpath = FS_BuildOSPath(dir->path, dir->gamedir, dllName); if(FS_FileInPathExists(netpath)) { Q_strncpyz(found, netpath, foundlen); *startSearch = search; return VMI_NATIVE; } } if(FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, qfalse) > 0) { *startSearch = search; return VMI_COMPILED; } } else if(search->pack) { pack = search->pack; if(lastSearch && lastSearch->pack) { // make sure we only try loading one VM file per game dir // i.e. if VM from pak7.pk3 fails we won't try one from pak6.pk3 if(!FS_FilenameCompare(lastSearch->pack->pakPathname, pack->pakPathname)) { search = search->next; continue; } } if(FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, qfalse) > 0) { *startSearch = search; return VMI_COMPILED; } } search = search->next; } return -1; }
0
[ "CWE-269" ]
ioq3
376267d534476a875d8b9228149c4ee18b74a4fd
337,849,067,189,791,180,000,000,000,000,000,000,000
76
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
PosibErr<bool> Config::read_in_settings(const Config * other) { if (settings_read_in_) return false; bool was_committed = committed_; set_committed_state(false); if (other && other->settings_read_in_) { assert(empty()); del(); // to clean up any notifiers and similar stuff copy(*other); } else { if (other) merge(*other); const char * env = getenv("ASPELL_CONF"); if (env != 0) { insert_point_ = &first_; RET_ON_ERR(read_in_string(env, _("ASPELL_CONF env var"))); } { insert_point_ = &first_; PosibErrBase pe = read_in_file(retrieve("per-conf-path")); if (pe.has_err() && !pe.has_err(cant_read_file)) return pe; } { insert_point_ = &first_; PosibErrBase pe = read_in_file(retrieve("conf-path")); if (pe.has_err() && !pe.has_err(cant_read_file)) return pe; } if (was_committed) RET_ON_ERR(commit_all()); settings_read_in_ = true; } return true; }
0
[ "CWE-125" ]
aspell
80fa26c74279fced8d778351cff19d1d8f44fe4e
85,122,093,606,760,010,000,000,000,000,000,000,000
43
Fix various bugs found by OSS-Fuze.
void *htp_tx_get_user_data(const htp_tx_t *tx) { if (tx == NULL) return NULL; return tx->user_data; }
0
[]
libhtp
c7c03843cd6b1cbf44eb435d160ba53aec948828
323,821,434,659,182,170,000,000,000,000,000,000,000
4
Harden decompress code against memory stress Under severe memory pressure the decompress code can fail to setup properly. Add checks before dereferencing pointers.
static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, struct block_device *bdev) { cd_dbg(CD_DO_IOCTL, "entering CDROM_RESET\n"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!CDROM_CAN(CDC_RESET)) return -ENOSYS; invalidate_bdev(bdev); return cdi->ops->reset(cdi); }
0
[ "CWE-119", "CWE-787" ]
linux
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
244,987,434,201,063,970,000,000,000,000,000,000,000
12
cdrom: information leak in cdrom_ioctl_media_changed() This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned long. The way the check is written now, if one of the high 32 bits is set then we could read outside the info->slots[] array. This bug is pretty old and it predates git. Reviewed-by: Christoph Hellwig <[email protected]> Cc: [email protected] Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, struct config_group *group, const char *name) { struct se_node_acl *se_nacl, *se_nacl_new; struct vhost_scsi_nacl *nacl; u64 wwpn = 0; u32 nexus_depth; /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); */ se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); if (!se_nacl_new) return ERR_PTR(-ENOMEM); nexus_depth = 1; /* * se_nacl_new may be released by core_tpg_add_initiator_node_acl() * when converting a NodeACL from demo mode -> explict */ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, name, nexus_depth); if (IS_ERR(se_nacl)) { vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); return se_nacl; } /* * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN */ nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); nacl->iport_wwpn = wwpn; return se_nacl; }
0
[ "CWE-200", "CWE-119" ]
linux
59c816c1f24df0204e01851431d3bab3eb76719c
202,398,884,648,501,120,000,000,000,000,000,000,000
34
vhost/scsi: potential memory corruption This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt" to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16. I looked at the context and it turns out that in vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so anything higher than 255 then it is invalid. I have made that the limit now. In vhost_scsi_send_evt() we mask away values higher than 255, but now that the limit has changed, we don't need the mask. Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Nicholas Bellinger <[email protected]>
GF_SceneEngine *gf_seng_init_from_string(void *calling_object, char * inputContext, u32 load_type, u32 width, u32 height, Bool usePixelMetrics, char *dump_path) { GF_SceneEngine *seng; GF_Err e = GF_OK; if (!inputContext) return NULL; GF_SAFEALLOC(seng, GF_SceneEngine) if (!seng) return NULL; seng->calling_object = calling_object; seng->dump_path = dump_path; /*Step 1: create context and load input*/ seng->sg = gf_sg_new(); seng->ctx = gf_sm_new(seng->sg); seng->owns_context = 1; memset(& seng->loader, 0, sizeof(GF_SceneLoader)); seng->loader.ctx = seng->ctx; seng->loader.type = load_type; /*since we're encoding in BIFS we must get MPEG-4 nodes only*/ seng->loader.flags = GF_SM_LOAD_MPEG4_STRICT; /* assign a loader type only if it was not requested (e.g. DIMS should not be overridden by SVG) */ if (!seng->loader.type) { if (inputContext[0] == '<') { if (strstr(inputContext, "<svg ")) seng->loader.type = GF_SM_LOAD_SVG; else if (strstr(inputContext, "<saf ")) seng->loader.type = GF_SM_LOAD_XSR; else if (strstr(inputContext, "XMT-A") || strstr(inputContext, "X3D")) seng->loader.type = GF_SM_LOAD_XMTA; } else { seng->loader.type = GF_SM_LOAD_BT; } } e = gf_sm_load_string(&seng->loader, inputContext, 0); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_SCENE, ("[SceneEngine] cannot load context from %s (error %s)\n", inputContext, gf_error_to_string(e))); goto exit; } if (!seng->ctx->root_od) { seng->ctx->is_pixel_metrics = usePixelMetrics; seng->ctx->scene_width = width; seng->ctx->scene_height = height; } e = gf_sm_live_setup(seng); if (e!=GF_OK) { GF_LOG(GF_LOG_ERROR, GF_LOG_SCENE, ("[SceneEngine] cannot init scene encoder for context (error %s)\n", gf_error_to_string(e))); goto exit; } return seng; exit: gf_seng_terminate(seng); return NULL; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
325,238,246,263,715,400,000,000,000,000,000,000,000
55
fixed #2138
chvt_rndsetup(const void *bp, size_t sz) { register uint32_t h; /* use LCG as seed but try to get them to deviate immediately */ h = lcg_state; (void)rndget(); BAFHFinish_reg(h); /* variation through pid, ppid, and the works */ BAFHUpdateMem_reg(h, &rndsetupstate, sizeof(rndsetupstate)); /* some variation, some possibly entropy, depending on OE */ BAFHUpdateMem_reg(h, bp, sz); /* mix them all up */ BAFHFinish_reg(h); return (h); }
0
[]
mksh
de53d2df1c3b812c262cc1bddbfe0b3bfc25c14b
240,628,156,088,321,930,000,000,000,000,000,000,000
17
SECURITY: do not permit += from environment either this makes our environment filter/sanitisation complete
explicit CImgList(const unsigned int n):_width(n) { if (n) _data = new CImg<T>[_allocated_width = std::max(16U,(unsigned int)cimg::nearest_pow2(n))]; else { _allocated_width = 0; _data = 0; } }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
267,362,621,134,107,830,000,000,000,000,000,000,000
4
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
CGroupContext *unit_get_cgroup_context(Unit *u) { size_t offset; offset = UNIT_VTABLE(u)->cgroup_context_offset; if (offset <= 0) return NULL; return (CGroupContext*) ((uint8_t*) u + offset); }
0
[]
systemd
5ba6985b6c8ef85a8bcfeb1b65239c863436e75b
205,844,715,201,769,700,000,000,000,000,000,000,000
9
core: allow PIDs to be watched by two units at the same time In some cases it is interesting to map a PID to two units at the same time. For example, when a user logs in via a getty, which is reexeced to /sbin/login that binary will be explicitly referenced as main pid of the getty service, as well as implicitly referenced as part of the session scope.
static size_t ssl_get_reassembly_buffer_size( size_t msg_len, unsigned add_bitmap ) { size_t alloc_len; alloc_len = 12; /* Handshake header */ alloc_len += msg_len; /* Content buffer */ if( add_bitmap ) alloc_len += msg_len / 8 + ( msg_len % 8 != 0 ); /* Bitmap */ return( alloc_len ); }
0
[ "CWE-787" ]
mbedtls
f333dfab4a6c2d8a604a61558a8f783145161de4
102,819,428,700,320,150,000,000,000,000,000,000,000
13
More SSL debug messages for ClientHello parsing In particular, be verbose when checking the ClientHello cookie in a possible DTLS reconnection. Signed-off-by: Gilles Peskine <[email protected]>
QPDF::readObject(PointerHolder<InputSource> input, std::string const& description, int objid, int generation, bool in_object_stream) { setLastObjectDescription(description, objid, generation); qpdf_offset_t offset = input->tell(); bool empty = false; PointerHolder<StringDecrypter> decrypter_ph; StringDecrypter* decrypter = 0; if (this->m->encrypted && (! in_object_stream)) { decrypter_ph = new StringDecrypter(this, objid, generation); decrypter = decrypter_ph.getPointer(); } QPDFObjectHandle object = QPDFObjectHandle::parse( input, description, this->m->tokenizer, empty, decrypter, this); if (empty) { // Nothing in the PDF spec appears to allow empty objects, but // they have been encountered in actual PDF files and Adobe // Reader appears to ignore them. warn(QPDFExc(qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, input->getLastOffset(), "empty object treated as null")); } else if (object.isDictionary() && (! in_object_stream)) { // check for stream qpdf_offset_t cur_offset = input->tell(); if (readToken(input) == QPDFTokenizer::Token(QPDFTokenizer::tt_word, "stream")) { // The PDF specification states that the word "stream" // should be followed by either a carriage return and // a newline or by a newline alone. It specifically // disallowed following it by a carriage return alone // since, in that case, there would be no way to tell // whether the NL in a CR NL sequence was part of the // stream data. However, some readers, including // Adobe reader, accept a carriage return by itself // when followed by a non-newline character, so that's // what we do here. { char ch; if (input->read(&ch, 1) == 0) { // A premature EOF here will result in some // other problem that will get reported at // another time. } else if (ch == '\n') { // ready to read stream data QTC::TC("qpdf", "QPDF stream with NL only"); } else if (ch == '\r') { // Read another character if (input->read(&ch, 1) != 0) { if (ch == '\n') { // Ready to read stream data QTC::TC("qpdf", "QPDF stream with CRNL"); } else { // Treat the \r by itself as the // whitespace after endstream and // start reading stream data in spite // of not having seen a newline. QTC::TC("qpdf", "QPDF stream with CR only"); input->unreadCh(ch); warn(QPDFExc( qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, input->tell(), "stream keyword followed" " by carriage return only")); } } } else { QTC::TC("qpdf", "QPDF stream without newline"); if (! QUtil::is_space(ch)) { QTC::TC("qpdf", "QPDF stream with non-space"); input->unreadCh(ch); } warn(QPDFExc(qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, input->tell(), "stream keyword not followed" " by proper line terminator")); } } // Must get offset before accessing any additional // objects since resolving a previously unresolved // indirect object will change file position. qpdf_offset_t stream_offset = input->tell(); size_t length = 0; try { std::map<std::string, QPDFObjectHandle> dict = object.getDictAsMap(); if (dict.count("/Length") == 0) { QTC::TC("qpdf", "QPDF stream without length"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, offset, "stream dictionary lacks /Length key"); } QPDFObjectHandle length_obj = dict["/Length"]; if (! length_obj.isInteger()) { QTC::TC("qpdf", "QPDF stream length not integer"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, offset, "/Length key in stream dictionary is not " "an integer"); } length = length_obj.getIntValue(); input->seek(stream_offset + length, SEEK_SET); if (! (readToken(input) == QPDFTokenizer::Token( QPDFTokenizer::tt_word, "endstream"))) { QTC::TC("qpdf", "QPDF missing endstream"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), this->m->last_object_description, input->getLastOffset(), "expected endstream"); } } catch (QPDFExc& e) { if (this->m->attempt_recovery) { warn(e); length = recoverStreamLength( input, objid, generation, stream_offset); } else { throw e; } } object = QPDFObjectHandle::Factory::newStream( this, objid, generation, object, stream_offset, length); } else { input->seek(cur_offset, SEEK_SET); } } // Override last_offset so that it points to the beginning of the // object we just read input->setLastOffset(offset); return object; }
0
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
231,382,504,302,999,000,000,000,000,000,000,000,000
170
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) { int ret = -1; Error *local_err = NULL; BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); ret = bdrv_reopen_multiple(queue, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); } return ret; }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
219,585,412,674,231,250,000,000,000,000,000,000,000
12
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
krb5_error_code kdb_samba_db_get_principal(krb5_context context, krb5_const_principal princ, unsigned int kflags, krb5_db_entry **kentry) { struct mit_samba_context *mit_ctx; krb5_error_code code; mit_ctx = ks_get_context(context); if (mit_ctx == NULL) { return KRB5_KDB_DBNOTINITED; } if (ks_is_master_key_principal(context, princ)) { return ks_get_master_key_principal(context, princ, kentry); } /* * Fake a kadmin/admin and kadmin/history principal so that kadmindd can * start */ if (ks_is_kadmin_admin(context, princ) || ks_is_kadmin_history(context, princ)) { return ks_get_admin_principal(context, princ, kentry); } code = ks_get_principal(context, princ, kflags, kentry); /* * This restricts the changepw account so it isn't able to request a * service ticket. It also marks the principal as the changepw service. */ if (ks_is_kadmin_changepw(context, princ)) { /* FIXME: shouldn't we also set KRB5_KDB_DISALLOW_TGT_BASED ? * testing showed that setpw kpasswd command fails then on the * server though... */ (*kentry)->attributes |= KRB5_KDB_PWCHANGE_SERVICE; (*kentry)->max_life = CHANGEPW_LIFETIME; } return code; }
0
[]
samba
4ef445a1f37e77df8016d240fcf22927165b8c03
203,313,802,235,584,100,000,000,000,000,000,000,000
42
CVE-2020-25719 mit-samba: Add ks_free_principal() BUG: https://bugzilla.samba.org/show_bug.cgi?id=14561 [[email protected] As submitted in patch to Samba bugzilla to address this issue as https://attachments.samba.org/attachment.cgi?id=16724 on overall bug https://bugzilla.samba.org/show_bug.cgi?id=14725] Signed-off-by: Andrew Bartlett <[email protected]> Reviewed-by: Douglas Bagnall <[email protected]>
static int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); return 0; }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
198,603,032,876,989,630,000,000,000,000,000,000,000
13
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
int sc_mutex_destroy(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->destroy_mutex != NULL) return ctx->thread_ctx->destroy_mutex(mutex); else return SC_SUCCESS; }
0
[ "CWE-415", "CWE-119" ]
OpenSC
360e95d45ac4123255a4c796db96337f332160ad
258,948,145,834,546,800,000,000,000,000,000,000,000
9
fixed out of bounds writes Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting the problems.
int propfind_aclrestrict(const xmlChar *name, xmlNsPtr ns, struct propfind_ctx *fctx, xmlNodePtr prop __attribute__((unused)), xmlNodePtr resp __attribute__((unused)), struct propstat propstat[], void *rock __attribute__((unused))) { xmlNodePtr node = xml_add_prop(HTTP_OK, fctx->ns[NS_DAV], &propstat[PROPSTAT_OK], name, ns, NULL, 0); xmlNewChild(node, NULL, BAD_CAST "no-invert", NULL); return 0; }
0
[]
cyrus-imapd
6703ff881b6056e0c045a7b795ce8ba1bbb87027
134,205,953,063,066,100,000,000,000,000,000,000,000
14
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
int set_zval_xmlrpc_type(zval* value, XMLRPC_VALUE_TYPE newtype) /* {{{ */ { int bSuccess = FAILURE; /* we only really care about strings because they can represent * base64 and datetime. all other types have corresponding php types */ if (Z_TYPE_P(value) == IS_STRING) { if (newtype == xmlrpc_base64 || newtype == xmlrpc_datetime) { const char* typestr = xmlrpc_type_as_str(newtype, xmlrpc_vector_none); zval type; ZVAL_STRING(&type, typestr); if (newtype == xmlrpc_datetime) { XMLRPC_VALUE v = XMLRPC_CreateValueDateTime_ISO8601(NULL, Z_STRVAL_P(value)); if (v) { time_t timestamp = (time_t) php_parse_date((char *)XMLRPC_GetValueDateTime_ISO8601(v), NULL); if (timestamp != -1) { zval ztimestamp; ZVAL_LONG(&ztimestamp, timestamp); convert_to_object(value); if (zend_hash_str_update(Z_OBJPROP_P(value), OBJECT_TYPE_ATTR, sizeof(OBJECT_TYPE_ATTR) - 1, &type)) { bSuccess = (zend_hash_str_update(Z_OBJPROP_P(value), OBJECT_VALUE_TS_ATTR, sizeof(OBJECT_VALUE_TS_ATTR) - 1, &ztimestamp) != NULL)? SUCCESS : FAILURE; } } else { zval_ptr_dtor(&type); } XMLRPC_CleanupValue(v); } else { zval_ptr_dtor(&type); } } else { convert_to_object(value); bSuccess = (zend_hash_str_update(Z_OBJPROP_P(value), OBJECT_TYPE_ATTR, sizeof(OBJECT_TYPE_ATTR) - 1, &type) != NULL)? SUCCESS : FAILURE; } } } return bSuccess; }
0
[]
php-src
f3c1863aa2721343245b63ac7bd68cfdc3dd41f3
125,778,513,291,655,640,000,000,000,000,000,000,000
43
Fixed #70728 Conflicts: ext/xmlrpc/xmlrpc-epi-php.c