func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
add_bwrap (GPtrArray *array, ScriptExec *script) { const char * const usrmerged_dirs[] = { "bin", "lib64", "lib", "sbin" }; int i; g_return_val_if_fail (script->outdir != NULL, FALSE); g_return_val_if_fail (script->s_infile != NULL, FALSE); add_args (array, "bwrap", "--ro-bind", "/usr", "/usr", "--ro-bind", "/etc/ld.so.cache", "/etc/ld.so.cache", NULL); /* These directories might be symlinks into /usr/... */ for (i = 0; i < G_N_ELEMENTS (usrmerged_dirs); i++) { g_autofree char *absolute_dir = g_strdup_printf ("/%s", usrmerged_dirs[i]); if (!g_file_test (absolute_dir, G_FILE_TEST_EXISTS)) continue; if (path_is_usrmerged (absolute_dir)) { g_autofree char *symlink_target = g_strdup_printf ("/usr/%s", absolute_dir); add_args (array, "--symlink", symlink_target, absolute_dir, NULL); } else { add_args (array, "--ro-bind", absolute_dir, absolute_dir, NULL); } } add_args (array, "--proc", "/proc", "--dev", "/dev", "--chdir", "/", "--setenv", "GIO_USE_VFS", "local", "--unshare-all", "--die-with-parent", NULL); add_env (array, "G_MESSAGES_DEBUG"); add_env (array, "G_MESSAGES_PREFIXED"); /* Add gnome-desktop's install prefix if needed */ if (g_strcmp0 (INSTALL_PREFIX, "") != 0 && g_strcmp0 (INSTALL_PREFIX, "/usr") != 0 && g_strcmp0 (INSTALL_PREFIX, "/usr/") != 0) { add_args (array, "--ro-bind", INSTALL_PREFIX, INSTALL_PREFIX, NULL); } g_ptr_array_add (array, g_strdup ("--bind")); g_ptr_array_add (array, g_strdup (script->outdir)); g_ptr_array_add (array, g_strdup ("/tmp")); /* We make sure to also re-use the original file's original * extension in case it's useful for the thumbnailer to * identify the file type */ g_ptr_array_add (array, g_strdup ("--ro-bind")); g_ptr_array_add (array, g_strdup (script->infile)); g_ptr_array_add (array, g_strdup (script->s_infile)); return TRUE; }
1
[]
nautilus
2ddba428ef2b13d0620bd599c3635b9c11044659
45,307,206,411,643,750,000,000,000,000,000,000,000
74
Update gnome-desktop code Closes https://gitlab.gnome.org/GNOME/nautilus/issues/987
amstar_estimate( application_argument_t *argument) { GPtrArray *argv_ptr; int nullfd; int pipefd; FILE *dumpout = NULL; off_t size = -1; char line[32768]; char *errmsg = NULL; char *qerrmsg; char *qdisk; amwait_t wait_status; int starpid; amregex_t *rp; times_t start_time; int level = 0; GSList *levels = NULL; char *option; char *star_realpath; if (!argument->level) { fprintf(stderr, "ERROR No level argument\n"); error(_("No level argument")); } if (!argument->dle.disk) { fprintf(stderr, "ERROR No disk argument\n"); error(_("No disk argument")); } if (!argument->dle.device) { fprintf(stderr, "ERROR No device argument\n"); error(_("No device argument")); } if (argument->dle.include_list && argument->dle.include_list->nb_element >= 0) { fprintf(stderr, "ERROR include-list not supported for backup\n"); } if (check_device(argument) == 0) { return; } if ((option = validate_command_options(argument))) { fprintf(stderr, "ERROR Invalid '%s' COMMAND-OPTIONS\n", option); error("Invalid '%s' COMMAND-OPTIONS", option); } qdisk = quote_string(argument->dle.disk); if (argument->calcsize) { char *dirname; if (star_directory) { dirname = star_directory; } else { dirname = argument->dle.device; } run_calcsize(argument->config, "STAR", argument->dle.disk, dirname, argument->level, NULL, NULL); return; } if (!star_path) { errmsg = vstrallocf(_("STAR-PATH not defined")); goto common_error; } if (!check_exec_for_suid("STAR_PATH", star_path, NULL, &star_realpath)) { errmsg = g_strdup_printf("'%s' binary is not secure", star_path); goto common_error; } start_time = curclock(); for (levels = argument->level; levels != NULL; levels = levels->next) { level = GPOINTER_TO_INT(levels->data); argv_ptr = amstar_build_argv(star_realpath, argument, level, CMD_ESTIMATE, NULL); if ((nullfd = open("/dev/null", O_RDWR)) == -1) { errmsg = vstrallocf(_("Cannot access /dev/null : %s"), strerror(errno)); goto common_error; } starpid = pipespawnv(star_realpath, STDERR_PIPE, 1, &nullfd, &nullfd, &pipefd, (char **)argv_ptr->pdata); dumpout = fdopen(pipefd,"r"); if (!dumpout) { errmsg = vstrallocf(_("Can't fdopen: %s"), strerror(errno)); goto common_error; } size = (off_t)-1; while (size < 0 && (fgets(line, sizeof(line), dumpout)) != NULL) { if (line[strlen(line)-1] == '\n') /* remove trailling \n */ line[strlen(line)-1] = '\0'; if (line[0] == '\0') continue; dbprintf("%s\n", line); /* check for size match */ /*@ignore@*/ for(rp = re_table; rp->regex != NULL; rp++) { if(match(rp->regex, line)) { if (rp->typ == DMP_SIZE) { size = ((the_num(line, rp->field)*rp->scale+1023.0)/1024.0); if(size < 0.0) size = 1.0; /* found on NeXT -- sigh */ } break; } } /*@end@*/ } while ((fgets(line, sizeof(line), dumpout)) != NULL) { dbprintf("%s", line); } dbprintf(".....\n"); dbprintf(_("estimate time for %s level %d: %s\n"), qdisk, level, walltime_str(timessub(curclock(), start_time))); if(size == (off_t)-1) { errmsg = vstrallocf(_("no size line match in %s output"), star_realpath); dbprintf(_("%s for %s\n"), errmsg, qdisk); dbprintf(".....\n"); } else if(size == (off_t)0 && argument->level == 0) { dbprintf(_("possible %s problem -- is \"%s\" really empty?\n"), star_realpath, argument->dle.disk); dbprintf(".....\n"); } dbprintf(_("estimate size for %s level %d: %lld KB\n"), qdisk, level, (long long)size); kill(-starpid, SIGTERM); dbprintf(_("waiting for %s \"%s\" child\n"), star_realpath, qdisk); waitpid(starpid, &wait_status, 0); if (WIFSIGNALED(wait_status)) { errmsg = vstrallocf(_("%s terminated with signal %d: see %s"), star_realpath, WTERMSIG(wait_status), dbfn()); } else if (WIFEXITED(wait_status)) { if (WEXITSTATUS(wait_status) != 0) { errmsg = vstrallocf(_("%s exited with status %d: see %s"), star_realpath, WEXITSTATUS(wait_status), dbfn()); } else { /* Normal exit */ } } else { errmsg = vstrallocf(_("%s got bad exit: see %s"), star_realpath, dbfn()); } dbprintf(_("after %s %s wait\n"), star_realpath, qdisk); g_ptr_array_free_full(argv_ptr); aclose(nullfd); afclose(dumpout); fprintf(stdout, "%d %lld 1\n", level, (long long)size); } amfree(qdisk); amfree(star_realpath); return; common_error: dbprintf("%s\n", errmsg); qerrmsg = quote_string(errmsg); amfree(qdisk); dbprintf("%s", errmsg); fprintf(stdout, "ERROR %s\n", qerrmsg); amfree(errmsg); amfree(qerrmsg); amfree(star_realpath); }
0
[ "CWE-264" ]
amanda
4bf5b9b356848da98560ffbb3a07a9cb5c4ea6d7
287,440,053,018,186,530,000,000,000,000,000,000,000
180
* Add a /etc/amanda-security.conf file git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/branches/3_3@6486 a8d146d6-cc15-0410-8900-af154a0219e0
check_minimum0(struct table *t, int min) { int i, w, ww; struct table_cell *cell; if (t->col < 0) return; if (t->tabwidth[t->col] < 0) return; check_row(t, t->row); w = table_colspan(t, t->row, t->col); min += t->indent; if (w == 1) ww = min; else { cell = &t->cell; ww = 0; if (cell->icell >= 0 && cell->minimum_width[cell->icell] < min) cell->minimum_width[cell->icell] = min; } for (i = t->col; i <= t->maxcol && (i == t->col || (t->tabattr[t->row][i] & HTT_X)); i++) { if (t->minimum_width[i] < ww) t->minimum_width[i] = ww; } }
0
[ "CWE-119" ]
w3m
67a3db378f5ee3047c158eae4342f7e3245a2ab1
229,872,871,307,403,700,000,000,000,000,000,000,000
27
Fix table rowspan and colspan Origin: https://github.com/tats/w3m/pull/19 Bug-Debian: https://github.com/tats/w3m/issues/8
formatint(char *buf, size_t buflen, int flags, int prec, int type, PyObject *v) { /* fmt = '%#.' + `prec` + 'l' + `type` worst case length = 3 + 19 (worst len of INT_MAX on 64-bit machine) + 1 + 1 = 24 */ char fmt[64]; /* plenty big enough! */ char *sign; long x; x = PyInt_AsLong(v); if (x == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "int argument required, not %.200s", Py_TYPE(v)->tp_name); return -1; } if (x < 0 && type == 'u') { type = 'd'; } if (x < 0 && (type == 'x' || type == 'X' || type == 'o')) sign = "-"; else sign = ""; if (prec < 0) prec = 1; if ((flags & F_ALT) && (type == 'x' || type == 'X')) { /* When converting under %#x or %#X, there are a number * of issues that cause pain: * - when 0 is being converted, the C standard leaves off * the '0x' or '0X', which is inconsistent with other * %#x/%#X conversions and inconsistent with Python's * hex() function * - there are platforms that violate the standard and * convert 0 with the '0x' or '0X' * (Metrowerks, Compaq Tru64) * - there are platforms that give '0x' when converting * under %#X, but convert 0 in accordance with the * standard (OS/2 EMX) * * We can achieve the desired consistency by inserting our * own '0x' or '0X' prefix, and substituting %x/%X in place * of %#x/%#X. * * Note that this is the same approach as used in * formatint() in unicodeobject.c */ PyOS_snprintf(fmt, sizeof(fmt), "%s0%c%%.%dl%c", sign, type, prec, type); } else { PyOS_snprintf(fmt, sizeof(fmt), "%s%%%s.%dl%c", sign, (flags&F_ALT) ? "#" : "", prec, type); } /* buf = '+'/'-'/'' + '0'/'0x'/'' + '[0-9]'*max(prec, len(x in octal)) * worst case buf = '-0x' + [0-9]*prec, where prec >= 11 */ if (buflen <= 14 || buflen <= (size_t)3 + (size_t)prec) { PyErr_SetString(PyExc_OverflowError, "formatted integer is too long (precision too large?)"); return -1; } if (sign[0]) PyOS_snprintf(buf, buflen, fmt, -x); else PyOS_snprintf(buf, buflen, fmt, x); return (int)strlen(buf); }
0
[ "CWE-190" ]
cpython
c3c9db89273fabc62ea1b48389d9a3000c1c03ae
196,323,800,790,619,840,000,000,000,000,000,000,000
71
[2.7] bpo-30657: Check & prevent integer overflow in PyString_DecodeEscape (#2174)
I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s) { return s->bus; }
0
[ "CWE-119" ]
qemu
caa881abe0e01f9931125a0977ec33c5343e4aa7
122,510,306,779,809,510,000,000,000,000,000,000,000
4
pxa2xx: avoid buffer overrun on incoming migration CVE-2013-4533 s->rx_level is read from the wire and used to determine how many bytes to subsequently read into s->rx_fifo[]. If s->rx_level exceeds the length of s->rx_fifo[] the buffer can be overrun with arbitrary data from the wire. Fix this by validating rx_level against the size of s->rx_fifo. Cc: Don Koch <[email protected]> Reported-by: Michael Roth <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Reviewed-by: Don Koch <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
static bool gatt_server_register_att_handlers(struct bt_gatt_server *server) { /* Exchange MTU */ server->mtu_id = bt_att_register(server->att, BT_ATT_OP_MTU_REQ, exchange_mtu_cb, server, NULL); if (!server->mtu_id) return false; /* Read By Group Type */ server->read_by_grp_type_id = bt_att_register(server->att, BT_ATT_OP_READ_BY_GRP_TYPE_REQ, read_by_grp_type_cb, server, NULL); if (!server->read_by_grp_type_id) return false; /* Read By Type */ server->read_by_type_id = bt_att_register(server->att, BT_ATT_OP_READ_BY_TYPE_REQ, read_by_type_cb, server, NULL); if (!server->read_by_type_id) return false; /* Find Information */ server->find_info_id = bt_att_register(server->att, BT_ATT_OP_FIND_INFO_REQ, find_info_cb, server, NULL); if (!server->find_info_id) return false; /* Find By Type Value */ server->find_by_type_value_id = bt_att_register(server->att, BT_ATT_OP_FIND_BY_TYPE_REQ, find_by_type_val_cb, server, NULL); if (!server->find_by_type_value_id) return false; /* Write Request */ server->write_id = bt_att_register(server->att, BT_ATT_OP_WRITE_REQ, write_cb, server, NULL); if (!server->write_id) return false; /* Write Command */ server->write_cmd_id = bt_att_register(server->att, BT_ATT_OP_WRITE_CMD, write_cb, server, NULL); if (!server->write_cmd_id) return false; /* Read Request */ server->read_id = bt_att_register(server->att, BT_ATT_OP_READ_REQ, read_cb, server, NULL); if (!server->read_id) return false; /* Read Blob Request */ server->read_blob_id = bt_att_register(server->att, BT_ATT_OP_READ_BLOB_REQ, read_blob_cb, server, NULL); if (!server->read_blob_id) return false; /* Read Multiple Request */ server->read_multiple_id = bt_att_register(server->att, BT_ATT_OP_READ_MULT_REQ, read_multiple_cb, server, NULL); if (!server->read_multiple_id) return false; /* Read Multiple Variable Length Request */ server->read_multiple_vl_id = bt_att_register(server->att, BT_ATT_OP_READ_MULT_VL_REQ, read_multiple_cb, server, NULL); if (!server->read_multiple_vl_id) return false; /* Prepare Write Request */ server->prep_write_id = bt_att_register(server->att, BT_ATT_OP_PREP_WRITE_REQ, prep_write_cb, server, NULL); if (!server->prep_write_id) return false; /* Execute Write Request */ server->exec_write_id = bt_att_register(server->att, BT_ATT_OP_EXEC_WRITE_REQ, exec_write_cb, server, NULL); if (!server->exec_write_id) return NULL; return true; }
0
[ "CWE-287" ]
bluez
00da0fb4972cf59e1c075f313da81ea549cb8738
183,658,714,042,551,700,000,000,000,000,000,000,000
105
shared/gatt-server: Fix not properly checking for secure flags When passing the mask to check_permissions all valid permissions for the operation must be set including BT_ATT_PERM_SECURE flags.
size_t MemIo::write(const byte* data, size_t wcount) { p_->reserve(wcount); assert(p_->isMalloced_); if (data != nullptr) { std::memcpy(&p_->data_[p_->idx_], data, wcount); } p_->idx_ += wcount; return wcount; }
0
[ "CWE-125" ]
exiv2
bd0afe0390439b2c424d881c8c6eb0c5624e31d9
132,736,616,973,961,280,000,000,000,000,000,000,000
10
Add bounds check to MemIo::seek(). (#944) - Regression test for missing bounds check in MemIo::seek() - Add bounds check to MemIo::seek(), this fixes CVE-2019-13504
static inline int __queue_kicked_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; assert_spin_locked(&ctx->ctx_lock); if (list_empty(&iocb->ki_run_list)) { list_add_tail(&iocb->ki_run_list, &ctx->run_list); return 1; } return 0; }
0
[ "CWE-190" ]
linux-2.6
75e1c70fc31490ef8a373ea2a4bea2524099b478
40,628,005,164,507,577,000,000,000,000,000,000,000
13
aio: check for multiplication overflow in do_io_submit Tavis Ormandy pointed out that do_io_submit does not do proper bounds checking on the passed-in iocb array:        if (unlikely(nr < 0))                return -EINVAL;        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(iocbpp)))))                return -EFAULT;                      ^^^^^^^^^^^^^^^^^^ The attached patch checks for overflow, and if it is detected, the number of iocbs submitted is scaled down to a number that will fit in the long.  This is an ok thing to do, as sys_io_submit is documented as returning the number of iocbs submitted, so callers should handle a return value of less than the 'nr' argument passed in. Reported-by: Tavis Ormandy <[email protected]> Signed-off-by: Jeff Moyer <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
UnicodeString::refCount() const { return umtx_loadAcquire(*((u_atomic_int32_t *)fUnion.fFields.fArray - 1)); }
0
[ "CWE-190", "CWE-787" ]
icu
b7d08bc04a4296982fcef8b6b8a354a9e4e7afca
85,412,626,633,807,710,000,000,000,000,000,000,000
3
ICU-20958 Prevent SEGV_MAPERR in append See #971
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!(v->arch.pending_exceptions) || kvm_request_pending(v); }
0
[ "CWE-476" ]
linux
ac64115a66c18c01745bbd3c47a36b124e5fd8c0
10,496,674,813,555,218,000,000,000,000,000,000,000
4
KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM The following program causes a kernel oops: #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/ioctl.h> #include <linux/kvm.h> main() { int fd = open("/dev/kvm", O_RDWR); ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM); } This happens because when using the global KVM fd with KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets called with a NULL kvm argument, which gets dereferenced in is_kvmppc_hv_enabled(). Spotted while reading the code. Let's use the hv_enabled fallback variable, like everywhere else in this function. Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM") Cc: [email protected] # v4.7+ Signed-off-by: Greg Kurz <[email protected]> Reviewed-by: David Gibson <[email protected]> Reviewed-by: Thomas Huth <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
static void tipc_node_delete_from_list(struct tipc_node *node) { #ifdef CONFIG_TIPC_CRYPTO tipc_crypto_key_flush(node->crypto_rx); #endif list_del_rcu(&node->list); hlist_del_rcu(&node->hash); tipc_node_put(node); }
0
[]
linux
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
190,532,839,997,302,600,000,000,000,000,000,000,000
9
tipc: better validate user input in tipc_nl_retrieve_key() Before calling tipc_aead_key_size(ptr), we need to ensure we have enough data to dereference ptr->keylen. We probably also want to make sure tipc_aead_key_size() wont overflow with malicious ptr->keylen values. Syzbot reported: BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x21c/0x280 lib/dump_stack.c:120 kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197 __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline] genl_family_rcv_msg net/netlink/genetlink.c:783 [inline] genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800 netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494 genl_rcv+0x63/0x80 net/netlink/genetlink.c:811 netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline] netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330 netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c RIP: 0023:0xf7f60549 Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00 RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline] kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104 kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76 slab_alloc_node mm/slub.c:2907 [inline] __kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527 __kmalloc_reserve net/core/skbuff.c:142 [inline] __alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210 alloc_skb include/linux/skbuff.h:1099 [inline] netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline] netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink") Signed-off-by: Eric Dumazet <[email protected]> Cc: Tuong Lien <[email protected]> Cc: Jon Maloy <[email protected]> Cc: Ying Xue <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
my_ulonglong STDCALL mysql_stmt_insert_id(MYSQL_STMT *stmt) { return stmt->insert_id; }
0
[]
mysql-server
3d8134d2c9b74bc8883ffe2ef59c168361223837
318,735,219,444,780,560,000,000,000,000,000,000,000
4
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE() Description: If mysql_stmt_close() encountered error, it recorded error in prepared statement but then frees memory assigned to prepared statement. If mysql_stmt_error() is used to get error information, it will result into use after free. In all cases where mysql_stmt_close() can fail, error would have been set by cli_advanced_command in MYSQL structure. Solution: Don't copy error from MYSQL using set_stmt_errmsg. There is no automated way to test the fix since it is in mysql_stmt_close() which does not expect any reply from server. Reviewed-By: Georgi Kodinov <[email protected]> Reviewed-By: Ramil Kalimullin <[email protected]>
char get_header_code(struct hdr_field *hf) { switch(hf->type){ case HDR_CALLID_T: return 'i'; case HDR_CONTACT_T: return 'm'; case HDR_CONTENTLENGTH_T: return 'l'; case HDR_CONTENTTYPE_T: return 'c'; case HDR_FROM_T: return 'f'; case HDR_SUBJECT_T: return 's'; case HDR_SUPPORTED_T: return 'k'; case HDR_TO_T: return 't'; case HDR_VIA_T: return 'v'; case HDR_ROUTE_T: return 'r'; case HDR_RECORDROUTE_T: return 'R'; case HDR_ALLOW_T: return 'a'; case HDR_ACCEPT_T: return 'A'; case HDR_CSEQ_T: return 'S'; case HDR_REFER_TO_T: return 'o'; case HDR_RPID_T: return 'p'; case HDR_EXPIRES_T: return 'P'; case HDR_AUTHORIZATION_T: return 'H'; case HDR_PROXYAUTH_T: return 'z'; default: return 'x'; } return 'x'; }
0
[ "CWE-119", "CWE-284" ]
kamailio
f50c9c853e7809810099c970780c30b0765b0643
19,641,402,871,440,398,000,000,000,000,000,000,000
46
seas: safety check for target buffer size before copying message in encode_msg() - avoid buffer overflow for large SIP messages - reported by Stelios Tsampas
static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) { enum ppe_qid_mode qid_mode; struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0); mdelay(100); dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1); mdelay(100); if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { switch (dsaf_mode) { case DSAF_MODE_ENABLE_FIX: case DSAF_MODE_DISABLE_FIX: qid_mode = PPE_QID_MODE0; hns_ppe_set_qid(ppe_common, 0); break; case DSAF_MODE_ENABLE_0VM: case DSAF_MODE_DISABLE_2PORT_64VM: qid_mode = PPE_QID_MODE3; break; case DSAF_MODE_ENABLE_8VM: case DSAF_MODE_DISABLE_2PORT_16VM: qid_mode = PPE_QID_MODE4; break; case DSAF_MODE_ENABLE_16VM: case DSAF_MODE_DISABLE_6PORT_0VM: qid_mode = PPE_QID_MODE5; break; case DSAF_MODE_ENABLE_32VM: case DSAF_MODE_DISABLE_6PORT_16VM: qid_mode = PPE_QID_MODE2; break; case DSAF_MODE_ENABLE_128VM: case DSAF_MODE_DISABLE_6PORT_4VM: qid_mode = PPE_QID_MODE1; break; case DSAF_MODE_DISABLE_2PORT_8VM: qid_mode = PPE_QID_MODE7; break; case DSAF_MODE_DISABLE_6PORT_2VM: qid_mode = PPE_QID_MODE6; break; default: dev_err(ppe_common->dev, "get ppe queue mode failed! dsaf_mode=%d\n", dsaf_mode); return -EINVAL; } hns_ppe_set_qid_mode(ppe_common, qid_mode); } dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG, PPE_COMMON_CNT_CLR_CE_B, 1); return 0; }
0
[ "CWE-119", "CWE-703" ]
linux
412b65d15a7f8a93794653968308fc100f2aa87c
41,530,377,909,045,352,000,000,000,000,000,000,000
58
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
get_term_code(char_u *tname) { int opt_idx; char_u *varp; if (tname[0] != 't' || tname[1] != '_' || tname[2] == NUL || tname[3] == NUL) return NULL; if ((opt_idx = findoption(tname)) >= 0) { varp = get_varp(&(options[opt_idx])); if (varp != NULL) varp = *(char_u **)(varp); return varp; } return find_termcode(tname + 2); }
0
[ "CWE-20" ]
vim
d0b5138ba4bccff8a744c99836041ef6322ed39a
269,302,810,778,548,000,000,000,000,000,000,000,000
17
patch 8.0.0056 Problem: When setting 'filetype' there is no check for a valid name. Solution: Only allow valid characters in 'filetype', 'syntax' and 'keymap'.
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) { struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); u64 valid; /* * Copy legacy XSAVE area, to avoid complications with CPUID * leaves 0 and 1 in the loop below. */ memcpy(xsave, src, XSAVE_HDR_OFFSET); /* Set XSTATE_BV and possibly XCOMP_BV. */ xsave->header.xfeatures = xstate_bv; if (cpu_has_xsaves) xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; /* * Copy each region from the non-compacted offset to the * possibly compacted offset. */ valid = xstate_bv & ~XFEATURE_MASK_FPSSE; while (valid) { u64 feature = valid & -valid; int index = fls64(feature) - 1; void *dest = get_xsave_addr(xsave, feature); if (dest) { u32 size, offset, ecx, edx; cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); memcpy(dest, src + offset, size); } valid -= feature; } }
0
[ "CWE-369" ]
linux
0185604c2d82c560dab2f2933a18f797e74ab5a8
4,806,329,731,430,965,500,000,000,000,000,000,000
37
KVM: x86: Reload pit counters for all channels when restoring state Currently if userspace restores the pit counters with a count of 0 on channels 1 or 2 and the guest attempts to read the count on those channels, then KVM will perform a mod of 0 and crash. This will ensure that 0 values are converted to 65536 as per the spec. This is CVE-2015-7513. Signed-off-by: Andy Honig <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
set_salt_padata(METHOD_DATA *md, Salt *salt) { if (salt) { realloc_method_data(md); md->val[md->len - 1].padata_type = salt->type; der_copy_octet_string(&salt->salt, &md->val[md->len - 1].padata_value); } }
0
[ "CWE-476" ]
heimdal
1a6a6e462dc2ac6111f9e02c6852ddec4849b887
207,736,870,850,332,200,000,000,000,000,000,000,000
9
Security: Avoid NULL structure pointer member dereference This can happen in the error path when processing malformed AS requests with a NULL client name. Bug originally introduced on Fri Feb 13 09:26:01 2015 +0100 in commit: a873e21d7c06f22943a90a41dc733ae76799390d kdc: base _kdc_fast_mk_error() on krb5_mk_error_ext() Original patch by Jeffrey Altman <[email protected]>
xmlXPtrEvalFullXPtr(xmlXPathParserContextPtr ctxt, xmlChar *name) { if (name == NULL) name = xmlXPathParseName(ctxt); if (name == NULL) XP_ERROR(XPATH_EXPR_ERROR); while (name != NULL) { ctxt->error = XPATH_EXPRESSION_OK; xmlXPtrEvalXPtrPart(ctxt, name); /* in case of syntax error, break here */ if ((ctxt->error != XPATH_EXPRESSION_OK) && (ctxt->error != XML_XPTR_UNKNOWN_SCHEME)) return; /* * If the returned value is a non-empty nodeset * or location set, return here. */ if (ctxt->value != NULL) { xmlXPathObjectPtr obj = ctxt->value; switch (obj->type) { case XPATH_LOCATIONSET: { xmlLocationSetPtr loc = ctxt->value->user; if ((loc != NULL) && (loc->locNr > 0)) return; break; } case XPATH_NODESET: { xmlNodeSetPtr loc = ctxt->value->nodesetval; if ((loc != NULL) && (loc->nodeNr > 0)) return; break; } default: break; } /* * Evaluating to improper values is equivalent to * a sub-resource error, clean-up the stack */ do { obj = valuePop(ctxt); if (obj != NULL) { xmlXPathFreeObject(obj); } } while (obj != NULL); } /* * Is there another XPointer part. */ SKIP_BLANKS; name = xmlXPathParseName(ctxt); } }
0
[ "CWE-415" ]
libxml2
f5048b3e71fc30ad096970b8df6e7af073bae4cb
259,916,513,709,300,560,000,000,000,000,000,000,000
57
Hardening of XPath evaluation Add a mechanism of frame for XPath evaluation when entering a function or a scoped evaluation, also fix a potential problem in predicate evaluation.
entry_guard_consider_retry(entry_guard_t *guard) { if (guard->is_reachable != GUARD_REACHABLE_NO) return; /* No retry needed. */ const time_t now = approx_time(); const int delay = get_retry_schedule(guard->failing_since, now, guard->is_primary); const time_t last_attempt = guard->last_tried_to_connect; if (BUG(last_attempt == 0) || now >= last_attempt + delay) { /* We should mark this retriable. */ char tbuf[ISO_TIME_LEN+1]; format_local_iso_time(tbuf, last_attempt); log_info(LD_GUARD, "Marked %s%sguard %s for possible retry, since we " "haven't tried to use it since %s.", guard->is_primary?"primary ":"", guard->confirmed_idx>=0?"confirmed ":"", entry_guard_describe(guard), tbuf); guard->is_reachable = GUARD_REACHABLE_MAYBE; if (guard->is_filtered_guard) guard->is_usable_filtered_guard = 1; } }
0
[ "CWE-200" ]
tor
665baf5ed5c6186d973c46cdea165c0548027350
137,271,603,939,493,160,000,000,000,000,000,000,000
27
Consider the exit family when applying guard restrictions. When the new path selection logic went into place, I accidentally dropped the code that considered the _family_ of the exit node when deciding if the guard was usable, and we didn't catch that during code review. This patch makes the guard_restriction_t code consider the exit family as well, and adds some (hopefully redundant) checks for the case where we lack a node_t for a guard but we have a bridge_info_t for it. Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006 and CVE-2017-0377.
static int smtp_data(struct Connection *conn, const char *msgfile) { char buf[1024]; struct Progress progress; struct stat st; int rc, term = 0; size_t buflen = 0; FILE *fp = fopen(msgfile, "r"); if (!fp) { mutt_error(_("SMTP session failed: unable to open %s"), msgfile); return -1; } stat(msgfile, &st); unlink(msgfile); mutt_progress_init(&progress, _("Sending message..."), MUTT_PROGRESS_NET, st.st_size); snprintf(buf, sizeof(buf), "DATA\r\n"); if (mutt_socket_send(conn, buf) == -1) { mutt_file_fclose(&fp); return SMTP_ERR_WRITE; } rc = smtp_get_resp(conn); if (rc != 0) { mutt_file_fclose(&fp); return rc; } while (fgets(buf, sizeof(buf) - 1, fp)) { buflen = mutt_str_strlen(buf); term = buflen && buf[buflen - 1] == '\n'; if (term && ((buflen == 1) || (buf[buflen - 2] != '\r'))) snprintf(buf + buflen - 1, sizeof(buf) - buflen + 1, "\r\n"); if (buf[0] == '.') { if (mutt_socket_send_d(conn, ".", MUTT_SOCK_LOG_FULL) == -1) { mutt_file_fclose(&fp); return SMTP_ERR_WRITE; } } if (mutt_socket_send_d(conn, buf, MUTT_SOCK_LOG_FULL) == -1) { mutt_file_fclose(&fp); return SMTP_ERR_WRITE; } mutt_progress_update(&progress, ftell(fp), -1); } if (!term && buflen && (mutt_socket_send_d(conn, "\r\n", MUTT_SOCK_LOG_FULL) == -1)) { mutt_file_fclose(&fp); return SMTP_ERR_WRITE; } mutt_file_fclose(&fp); /* terminate the message body */ if (mutt_socket_send(conn, ".\r\n") == -1) return SMTP_ERR_WRITE; rc = smtp_get_resp(conn); if (rc != 0) return rc; return 0; }
0
[ "CWE-94", "CWE-74" ]
neomutt
fb013ec666759cb8a9e294347c7b4c1f597639cc
50,402,002,451,614,270,000,000,000,000,000,000,000
69
tls: clear data after a starttls acknowledgement After a starttls acknowledgement message, clear the buffers of any incoming data / commands. This will ensure that all future data is handled securely. Co-authored-by: Pietro Cerutti <[email protected]>
static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; int i, err; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->speculative = src->speculative; dst_state->curframe = src->curframe; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; }
0
[ "CWE-703", "CWE-189" ]
linux
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
155,805,037,833,285,600,000,000,000,000,000,000,000
27
bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
void flush_font_metrics(void) { TFMPool *ptr; for(; (ptr = (TFMPool *)tfmpool.head); ) { tfmpool.head = LIST(ptr->next); mdvi_free(ptr->short_name); mdvi_free(ptr->tfminfo.chars); mdvi_free(ptr); } mdvi_hash_reset(&tfmhash, 0); }
0
[ "CWE-20" ]
evince
d4139205b010ed06310d14284e63114e88ec6de2
41,875,252,252,887,255,000,000,000,000,000,000,000
13
backends: Fix several security issues in the dvi-backend. See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
smb2_negotiate(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { int rc; spin_lock(&GlobalMid_Lock); server->CurrentMid = 0; spin_unlock(&GlobalMid_Lock); rc = SMB2_negotiate(xid, ses, server); /* BB we probably don't need to retry with modern servers */ if (rc == -EAGAIN) rc = -EHOSTDOWN; return rc; }
0
[ "CWE-476" ]
linux
d6f5e358452479fa8a773b5c6ccc9e4ec5a20880
81,395,044,481,338,110,000,000,000,000,000,000,000
15
cifs: fix NULL ptr dereference in smb2_ioctl_query_info() When calling smb2_ioctl_query_info() with invalid smb_query_info::flags, a NULL ptr dereference is triggered when trying to kfree() uninitialised rqst[n].rq_iov array. This also fixes leaked paths that are created in SMB2_open_init() which required SMB2_open_free() to properly free them. Here is a small C reproducer that triggers it #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #define die(s) perror(s), exit(1) #define QUERY_INFO 0xc018cf07 int main(int argc, char *argv[]) { int fd; if (argc < 2) exit(1); fd = open(argv[1], O_RDONLY); if (fd == -1) die("open"); if (ioctl(fd, QUERY_INFO, (uint32_t[]) { 0, 0, 0, 4, 0, 0}) == -1) die("ioctl"); close(fd); return 0; } mount.cifs //srv/share /mnt -o ... gcc repro.c && ./a.out /mnt/f0 [ 1832.124468] CIFS: VFS: \\w22-dc.zelda.test\test Invalid passthru query flags: 0x4 [ 1832.125043] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 1832.125764] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] [ 1832.126241] CPU: 3 PID: 1133 Comm: a.out Not tainted 5.17.0-rc8 #2 [ 1832.126630] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014 [ 1832.127322] RIP: 0010:smb2_ioctl_query_info+0x7a3/0xe30 [cifs] [ 1832.127749] Code: 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 00 0f 85 6c 05 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 74 24 28 4c 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 cb 04 00 00 49 8b 3e e8 bb fc fa ff 48 89 da 48 [ 1832.128911] RSP: 0018:ffffc90000957b08 EFLAGS: 00010256 [ 1832.129243] RAX: dffffc0000000000 RBX: ffff888117e9b850 RCX: ffffffffa020580d [ 1832.129691] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffa043a2c0 [ 1832.130137] RBP: ffff888117e9b878 R08: 0000000000000001 R09: 0000000000000003 [ 1832.130585] R10: fffffbfff4087458 R11: 0000000000000001 R12: ffff888117e9b800 [ 1832.131037] R13: 00000000ffffffea R14: 0000000000000000 R15: ffff888117e9b8a8 [ 1832.131485] FS: 00007fcee9900740(0000) GS:ffff888151a00000(0000) knlGS:0000000000000000 [ 1832.131993] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1832.132354] CR2: 00007fcee9a1ef5e CR3: 0000000114cd2000 CR4: 0000000000350ee0 [ 1832.132801] Call Trace: [ 1832.132962] <TASK> [ 1832.133104] ? smb2_query_reparse_tag+0x890/0x890 [cifs] [ 1832.133489] ? cifs_mapchar+0x460/0x460 [cifs] [ 1832.133822] ? rcu_read_lock_sched_held+0x3f/0x70 [ 1832.134125] ? cifs_strndup_to_utf16+0x15b/0x250 [cifs] [ 1832.134502] ? lock_downgrade+0x6f0/0x6f0 [ 1832.134760] ? cifs_convert_path_to_utf16+0x198/0x220 [cifs] [ 1832.135170] ? smb2_check_message+0x1080/0x1080 [cifs] [ 1832.135545] cifs_ioctl+0x1577/0x3320 [cifs] [ 1832.135864] ? lock_downgrade+0x6f0/0x6f0 [ 1832.136125] ? cifs_readdir+0x2e60/0x2e60 [cifs] [ 1832.136468] ? rcu_read_lock_sched_held+0x3f/0x70 [ 1832.136769] ? __rseq_handle_notify_resume+0x80b/0xbe0 [ 1832.137096] ? __up_read+0x192/0x710 [ 1832.137327] ? __ia32_sys_rseq+0xf0/0xf0 [ 1832.137578] ? __x64_sys_openat+0x11f/0x1d0 [ 1832.137850] __x64_sys_ioctl+0x127/0x190 [ 1832.138103] do_syscall_64+0x3b/0x90 [ 1832.138378] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 1832.138702] RIP: 0033:0x7fcee9a253df [ 1832.138937] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00 [ 1832.140107] RSP: 002b:00007ffeba94a8a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 1832.140606] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fcee9a253df [ 1832.141058] RDX: 00007ffeba94a910 RSI: 00000000c018cf07 RDI: 0000000000000003 [ 1832.141503] RBP: 00007ffeba94a930 R08: 00007fcee9b24db0 R09: 00007fcee9b45c4e [ 1832.141948] R10: 00007fcee9918d40 R11: 0000000000000246 R12: 00007ffeba94aa48 [ 1832.142396] R13: 0000000000401176 R14: 0000000000403df8 R15: 00007fcee9b78000 [ 1832.142851] </TASK> [ 1832.142994] Modules linked in: cifs cifs_arc4 cifs_md4 bpf_preload [last unloaded: cifs] Cc: [email protected] Signed-off-by: Paulo Alcantara (SUSE) <[email protected]> Signed-off-by: Steve French <[email protected]>
poly_path(PG_FUNCTION_ARGS) { POLYGON *poly = PG_GETARG_POLYGON_P(0); PATH *path; int size; int i; size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * poly->npts; path = (PATH *) palloc(size); SET_VARSIZE(path, size); path->npts = poly->npts; path->closed = TRUE; /* prevent instability in unused pad bytes */ path->dummy = 0; for (i = 0; i < poly->npts; i++) { path->p[i].x = poly->p[i].x; path->p[i].y = poly->p[i].y; } PG_RETURN_PATH_P(path); }
1
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
65,909,878,879,363,220,000,000,000,000,000,000,000
24
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
static void test_unescape_char_quoted_one(const char *string, size_t n_string, bool escaped) { char buf[2]; char *out = buf; size_t n_out = sizeof(buf); const char *in = string; size_t n_in = n_string; int r; r = c_shquote_unescape_char_quoted(&out, &n_out, &in, &n_in); c_assert(!r); c_assert(in == string + 2); c_assert(n_in == n_string - 2); if (escaped) { c_assert(out == buf + 1); c_assert(n_out == sizeof(buf) - 1); c_assert(buf[0] == string[1]); } else { c_assert(out == buf + 2); c_assert(n_out == sizeof(buf) - 2); c_assert(!memcmp(buf, string, 2)); } }
0
[ "CWE-787" ]
c-shquote
7fd15f8e272136955f7ffc37df29fbca9ddceca1
144,194,658,417,052,410,000,000,000,000,000,000,000
23
strnspn: fix buffer overflow Fix the strnspn and strncspn functions to use a properly sized buffer. It used to be 1 byte too short. Checking for `0xff` in a string will thus write `0xff` once byte beyond the stack space of the local buffer. Note that the public API does not allow to pass `0xff` to those functions. Therefore, this is a read-only buffer overrun, possibly causing bogus reports from the parser, but still well-defined. Reported-by: Steffen Robertz Signed-off-by: David Rheinsberg <[email protected]>
bool ZrtpQueue::srtpSecretsReady(SrtpSecret_t* secrets, EnableSecurity part) { CryptoContext* recvCryptoContext; CryptoContext* senderCryptoContext; CryptoContextCtrl* recvCryptoContextCtrl; CryptoContextCtrl* senderCryptoContextCtrl; int cipher; int authn; int authKeyLen; if (secrets->authAlgorithm == Sha1) { authn = SrtpAuthenticationSha1Hmac; authKeyLen = 20; } if (secrets->authAlgorithm == Skein) { authn = SrtpAuthenticationSkeinHmac; authKeyLen = 32; } if (secrets->symEncAlgorithm == Aes) cipher = SrtpEncryptionAESCM; if (secrets->symEncAlgorithm == TwoFish) cipher = SrtpEncryptionTWOCM; if (part == ForSender) { // To encrypt packets: intiator uses initiator keys, // responder uses responder keys // Create a "half baked" crypto context first and store it. This is // the main crypto context for the sending part of the connection. if (secrets->role == Initiator) { senderCryptoContext = new CryptoContext( 0, 0, 0L, // keyderivation << 48, cipher, // encryption algo authn, // authtentication algo (unsigned char*)secrets->keyInitiator, // Master Key secrets->initKeyLen / 8, // Master Key length (unsigned char*)secrets->saltInitiator, // Master Salt secrets->initSaltLen / 8, // Master Salt length secrets->initKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->initSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag lenA senderCryptoContextCtrl = new CryptoContextCtrl(0, cipher, // encryption algo authn, // authtication algo (unsigned char*)secrets->keyInitiator, // Master Key secrets->initKeyLen / 8, // Master Key length (unsigned char*)secrets->saltInitiator, // Master Salt secrets->initSaltLen / 8, // Master Salt length secrets->initKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->initSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len } else { senderCryptoContext = new CryptoContext( 0, 0, 0L, // keyderivation << 48, cipher, // encryption algo authn, // authtentication algo (unsigned char*)secrets->keyResponder, // Master Key secrets->respKeyLen / 8, // Master Key length (unsigned char*)secrets->saltResponder, // Master Salt secrets->respSaltLen / 8, // Master Salt length secrets->respKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->respSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len senderCryptoContextCtrl = new CryptoContextCtrl(0, cipher, // encryption algo authn, // authtication algo (unsigned char*)secrets->keyResponder, // Master Key secrets->respKeyLen / 8, // Master Key length (unsigned char*)secrets->saltResponder, // Master Salt secrets->respSaltLen / 8, // Master Salt length secrets->respKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->respSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len } if (senderCryptoContext == NULL) { return false; } // Insert the Crypto templates (SSRC == 0) into the queue. When we send // the first RTP or RTCP packet the real crypto context will be created. // Refer to putData(), sendImmediate() in ccrtp's outqueue.cpp and // takeinControlPacket() in ccrtp's control.cpp. // setOutQueueCryptoContext(senderCryptoContext); setOutQueueCryptoContextCtrl(senderCryptoContextCtrl); } if (part == ForReceiver) { // To decrypt packets: intiator uses responder keys, // responder initiator keys // See comment above. if (secrets->role == Initiator) { recvCryptoContext = new CryptoContext( 0, 0, 0L, // keyderivation << 48, cipher, // encryption algo authn, // authtentication algo (unsigned char*)secrets->keyResponder, // Master Key secrets->respKeyLen / 8, // Master Key length (unsigned char*)secrets->saltResponder, // Master Salt secrets->respSaltLen / 8, // Master Salt length secrets->respKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->respSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len recvCryptoContextCtrl = new CryptoContextCtrl(0, cipher, // encryption algo authn, // authtication algo (unsigned char*)secrets->keyResponder, // Master Key secrets->respKeyLen / 8, // Master Key length (unsigned char*)secrets->saltResponder, // Master Salt secrets->respSaltLen / 8, // Master Salt length secrets->respKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->respSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len } else { recvCryptoContext = new CryptoContext( 0, 0, 0L, // keyderivation << 48, cipher, // encryption algo authn, // authtentication algo (unsigned char*)secrets->keyInitiator, // Master Key secrets->initKeyLen / 8, // Master Key length (unsigned char*)secrets->saltInitiator, // Master Salt secrets->initSaltLen / 8, // Master Salt length secrets->initKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->initSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len recvCryptoContextCtrl = new CryptoContextCtrl(0, cipher, // encryption algo authn, // authtication algo (unsigned char*)secrets->keyInitiator, // Master Key secrets->initKeyLen / 8, // Master Key length (unsigned char*)secrets->saltInitiator, // Master Salt secrets->initSaltLen / 8, // Master Salt length secrets->initKeyLen / 8, // encryption keyl authKeyLen, // authentication key len secrets->initSaltLen / 8, // session salt len secrets->srtpAuthTagLen / 8); // authentication tag len } if (recvCryptoContext == NULL) { return false; } // Insert the Crypto templates (SSRC == 0) into the queue. When we receive // the first RTP or RTCP packet the real crypto context will be created. // Refer to rtpDataPacket() above and takeinControlPacket in ccrtp's control.cpp. // setInQueueCryptoContext(recvCryptoContext); setInQueueCryptoContextCtrl(recvCryptoContextCtrl); } return true; }
0
[ "CWE-119" ]
ZRTPCPP
c8617100f359b217a974938c5539a1dd8a120b0e
41,215,603,201,823,900,000,000,000,000,000,000,000
168
Fix vulnerabilities found and reported by Mark Dowd - limit length of memcpy - limit number of offered algorithms in Hello packet - length check in PING packet - fix a small coding error
uint32_t ldb_req_get_custom_flags(struct ldb_request *req) { if (req != NULL && req->handle != NULL) { return req->handle->custom_flags; } /* * 0 is not something any better or worse than * anything else as req or the handle is NULL */ return 0; }
0
[ "CWE-476" ]
samba
d8b9bb274b7e7a390cf3bda9cd732cb2227bdbde
4,846,619,180,965,207,000,000,000,000,000,000,000
12
CVE-2020-10730: lib ldb: Check if ldb_lock_backend_callback called twice Prevent use after free issues if ldb_lock_backend_callback is called twice, usually due to ldb_module_done being called twice. This can happen if a module ignores the return value from function a function that calls ldb_module_done as part of it's error handling. BUG: https://bugzilla.samba.org/show_bug.cgi?id=14364 Signed-off-by: Gary Lockyer <[email protected]> Reviewed-by: Andrew Bartlett <[email protected]>
dlz_allowzonexfr(void *dbdata, const char *name, const char *client) { isc_result_t result; result = dlz_findzonedb(dbdata, name, NULL, NULL); if (result != ISC_R_SUCCESS) { return (result); } /* * Exception for 10.53.0.5 so we can test that allow-transfer * is effective. */ if (strcmp(client, "10.53.0.5") == 0) { return (ISC_R_NOPERM); } return (ISC_R_SUCCESS); }
0
[ "CWE-732" ]
bind9
34348d9ee4db15307c6c42db294419b4df569f76
260,277,336,593,785,400,000,000,000,000,000,000,000
18
denied axfr requests were not effective for writable DLZ zones (cherry picked from commit d9077cd0038e59726e1956de18b4b7872038a283)
process_tree(void) { int mymask = 0; tcpr_buildcidr_t *bcdata; tcpprep_opt_t *options = tcpprep->options; dbg(1, "Running: process_tree()"); bcdata = (tcpr_buildcidr_t *)safe_malloc(sizeof(tcpr_buildcidr_t)); for (mymask = options->max_mask; mymask <= options->min_mask; mymask++) { dbgx(1, "Current mask: %u", mymask); /* set starting vals */ bcdata->type = DIR_SERVER; bcdata->masklen = mymask; /* build cidrdata with servers */ tree_buildcidr(&treeroot, bcdata); /* calculate types of all IP's */ tree_calculate(&treeroot); /* try to find clients in cidrdata */ bcdata->type = DIR_CLIENT; if (! tree_checkincidr(&treeroot, bcdata)) { /* didn't find any clients in cidrdata */ safe_free(bcdata); return (mymask); /* success! */ } else { destroy_cidr(options->cidrdata); /* clean up after our mess */ options->cidrdata = NULL; } } safe_free(bcdata); /* we failed to find a valid cidr list */ notice("Unable to determine any IP addresses as a clients."); notice("Perhaps you should change the --ratio, --minmask/maxmask settings, or try another mode?"); return (0); }
0
[ "CWE-476" ]
tcpreplay
46cf964a7db636da76abeebf10482acf6f682a87
133,676,918,551,316,140,000,000,000,000,000,000,000
43
Bug #677 - fixes for tcpprep tree
ex_cc(exarg_T *eap) { qf_info_T *qi; int errornr; if ((qi = qf_cmd_get_stack(eap, TRUE)) == NULL) return; if (eap->addr_count > 0) errornr = (int)eap->line2; else { switch (eap->cmdidx) { case CMD_cc: case CMD_ll: errornr = 0; break; case CMD_crewind: case CMD_lrewind: case CMD_cfirst: case CMD_lfirst: errornr = 1; break; default: errornr = 32767; } } // For cdo and ldo commands, jump to the nth valid error. // For cfdo and lfdo commands, jump to the nth valid file entry. if (eap->cmdidx == CMD_cdo || eap->cmdidx == CMD_ldo || eap->cmdidx == CMD_cfdo || eap->cmdidx == CMD_lfdo) errornr = qf_get_nth_valid_entry(qf_get_curlist(qi), eap->addr_count > 0 ? (int)eap->line1 : 1, eap->cmdidx == CMD_cfdo || eap->cmdidx == CMD_lfdo); qf_jump(qi, 0, errornr, eap->forceit); }
0
[ "CWE-416" ]
vim
4f1b083be43f351bc107541e7b0c9655a5d2c0bb
190,555,960,361,996,830,000,000,000,000,000,000,000
36
patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set Problem: Crash when no errors and 'quickfixtextfunc' is set. Solution: Do not handle errors if there aren't any.
bytesPerDeepLineTable (const Header &header, char* base, int xStride, int yStride, vector<size_t> &bytesPerLine) { return bytesPerDeepLineTable(header, header.dataWindow().min.y, header.dataWindow().max.y, base, xStride, yStride, bytesPerLine); }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
152,629,910,955,578,040,000,000,000,000,000,000,000
14
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
intrusive_ptr<Expression> ExpressionReplaceAll::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vps) { auto [input, find, replacement] = parseExpressionReplaceBase(opName, expCtx, expr, vps); return make_intrusive<ExpressionReplaceAll>( expCtx, std::move(input), std::move(find), std::move(replacement)); }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
133,434,317,841,076,780,000,000,000,000,000,000,000
7
SERVER-49404 Enforce additional checks in $arrayToObject
int rom_copy(uint8_t *dest, hwaddr addr, size_t size) { hwaddr end = addr + size; uint8_t *s, *d = dest; size_t l = 0; Rom *rom; QTAILQ_FOREACH(rom, &roms, next) { if (rom->fw_file) { continue; } if (rom->mr) { continue; } if (rom->addr + rom->romsize < addr) { continue; } if (rom->addr > end || rom->addr < addr) { break; } d = dest + (rom->addr - addr); s = rom->data; l = rom->datasize; if ((d + l) > (dest + size)) { l = dest - d; } if (l > 0) { memcpy(d, s, l); } if (rom->romsize > rom->datasize) { /* If datasize is less than romsize, it means that we didn't * allocate all the ROM because the trailing data are only zeros. */ d += l; l = rom->romsize - rom->datasize; if ((d + l) > (dest + size)) { /* Rom size doesn't fit in the destination area. Adjust to avoid * overflow. */ l = dest - d; } if (l > 0) { memset(d, 0x0, l); } } } return (d + l) - dest; }
0
[ "CWE-787" ]
qemu
e423455c4f23a1a828901c78fe6d03b7dde79319
11,941,405,663,358,838,000,000,000,000,000,000,000
56
hw/core/loader: Fix possible crash in rom_copy() Both, "rom->addr" and "addr" are derived from the binary image that can be loaded with the "-kernel" paramer. The code in rom_copy() then calculates: d = dest + (rom->addr - addr); and uses "d" as destination in a memcpy() some lines later. Now with bad kernel images, it is possible that rom->addr is smaller than addr, thus "rom->addr - addr" gets negative and the memcpy() then tries to copy contents from the image to a bad memory location. This could maybe be used to inject code from a kernel image into the QEMU binary, so we better fix it with an additional sanity check here. Cc: [email protected] Reported-by: Guangming Liu Buglink: https://bugs.launchpad.net/qemu/+bug/1844635 Message-Id: <[email protected]> Reviewed-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Thomas Huth <[email protected]>
GF_Err pssh_dump(GF_Box *a, FILE * trace) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ProtectionSystemHeaderBox", trace); fprintf(trace, "SystemID=\""); dump_data_hex(trace, (char *) ptr->SystemID, 16); fprintf(trace, "\">\n"); if (ptr->KID_count) { u32 i; for (i=0; i<ptr->KID_count; i++) { fprintf(trace, " <PSSHKey KID=\""); dump_data_hex(trace, (char *) ptr->KIDs[i], 16); fprintf(trace, "\"/>\n"); } } if (ptr->private_data_size) { fprintf(trace, " <PSSHData size=\"%d\" value=\"", ptr->private_data_size); dump_data_hex(trace, (char *) ptr->private_data, ptr->private_data_size); fprintf(trace, "\"/>\n"); } if (!ptr->size) { fprintf(trace, " <PSSHKey KID=\"\"/>\n"); fprintf(trace, " <PSSHData size=\"\" value=\"\"/>\n"); } gf_isom_box_dump_done("ProtectionSystemHeaderBox", a, trace); return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
76,518,510,112,924,930,000,000,000,000,000,000,000
31
fixed 2 possible heap overflows (inc. #1088)
handle_client_initial_response_cookie_sha1_mech (DBusAuth *auth, DBusString *response) { DBusString username; dbus_bool_t retval; retval = FALSE; if (!_dbus_string_init (&username)) return FALSE; if (!_dbus_append_user_from_current_process (&username)) goto out_0; if (!_dbus_string_hex_encode (&username, 0, response, _dbus_string_get_length (response))) goto out_0; retval = TRUE; out_0: _dbus_string_free (&username); return retval; }
0
[ "CWE-59" ]
dbus
47b1a4c41004bf494b87370987b222c934b19016
233,231,418,772,199,750,000,000,000,000,000,000,000
26
auth: Reject DBUS_COOKIE_SHA1 for users other than the server owner The DBUS_COOKIE_SHA1 authentication mechanism aims to prove ownership of a shared home directory by having the server write a secret "cookie" into a .dbus-keyrings subdirectory of the desired identity's home directory with 0700 permissions, and having the client prove that it can read the cookie. This never actually worked for non-malicious clients in the case where server uid != client uid (unless the server and client both have privileges, such as Linux CAP_DAC_OVERRIDE or traditional Unix uid 0) because an unprivileged server would fail to write out the cookie, and an unprivileged client would be unable to read the resulting file owned by the server. Additionally, since dbus 1.7.10 we have checked that ~/.dbus-keyrings is owned by the uid of the server (a side-effect of a check added to harden our use of XDG_RUNTIME_DIR), further ruling out successful use by a non-malicious client with a uid differing from the server's. Joe Vennix of Apple Information Security discovered that the implementation of DBUS_COOKIE_SHA1 was susceptible to a symbolic link attack: a malicious client with write access to its own home directory could manipulate a ~/.dbus-keyrings symlink to cause the DBusServer to read and write in unintended locations. In the worst case this could result in the DBusServer reusing a cookie that is known to the malicious client, and treating that cookie as evidence that a subsequent client connection came from an attacker-chosen uid, allowing authentication bypass. This is mitigated by the fact that by default, the well-known system dbus-daemon (since 2003) and the well-known session dbus-daemon (in stable releases since dbus 1.10.0 in 2015) only accept the EXTERNAL authentication mechanism, and as a result will reject DBUS_COOKIE_SHA1 at an early stage, before manipulating cookies. As a result, this vulnerability only applies to: * system or session dbus-daemons with non-standard configuration * third-party dbus-daemon invocations such as at-spi2-core (although in practice at-spi2-core also only accepts EXTERNAL by default) * third-party uses of DBusServer such as the one in Upstart Avoiding symlink attacks in a portable way is difficult, because APIs like openat() and Linux /proc/self/fd are not universally available. However, because DBUS_COOKIE_SHA1 already doesn't work in practice for a non-matching uid, we can solve this vulnerability in an easier way without regressions, by rejecting it early (before looking at ~/.dbus-keyrings) whenever the requested identity doesn't match the identity of the process hosting the DBusServer. Signed-off-by: Simon McVittie <[email protected]> Closes: https://gitlab.freedesktop.org/dbus/dbus/issues/269 Closes: CVE-2019-12749
server_connect_success (server *serv) { #ifdef USE_OPENSSL #define SSLDOCONNTMOUT 300 if (serv->use_ssl) { char *err; /* it'll be a memory leak, if connection isn't terminated by server_cleanup() */ serv->ssl = _SSL_socket (serv->ctx, serv->sok); if ((err = _SSL_set_verify (serv->ctx, ssl_cb_verify, NULL))) { EMIT_SIGNAL (XP_TE_CONNFAIL, serv->server_session, err, NULL, NULL, NULL, 0); server_cleanup (serv); /* ->connecting = FALSE */ return; } /* FIXME: it'll be needed by new servers */ /* send(serv->sok, "STLS\r\n", 6, 0); sleep(1); */ set_nonblocking (serv->sok); serv->ssl_do_connect_tag = fe_timeout_add (SSLDOCONNTMOUT, ssl_do_connect, serv); return; } serv->ssl = NULL; #endif server_stopconnecting (serv); /* ->connecting = FALSE */ /* activate glib poll */ server_connected (serv); }
0
[ "CWE-310" ]
hexchat
c9b63f7f9be01692b03fa15275135a4910a7e02d
88,211,740,694,399,160,000,000,000,000,000,000,000
32
ssl: Validate hostnames Closes #524
i2rgb(UINT8 *out, const UINT8 *in_, int xsize) { int x; INT32 *in = (INT32 *)in_; for (x = 0; x < xsize; x++, in++, out += 4) { if (*in <= 0) { out[0] = out[1] = out[2] = 0; } else if (*in >= 255) { out[0] = out[1] = out[2] = 255; } else { out[0] = out[1] = out[2] = (UINT8)*in; } out[3] = 255; } }
0
[ "CWE-120" ]
Pillow
518ee3722a99d7f7d890db82a20bd81c1c0327fb
56,082,130,523,875,750,000,000,000,000,000,000,000
14
Use snprintf instead of sprintf
virtual unsigned long long lastUsed() const { lock_guard<boost::mutex> lock(simpleFieldSyncher); return m_lastUsed; }
0
[]
passenger
8c6693e0818772c345c979840d28312c2edd4ba4
280,787,364,527,966,640,000,000,000,000,000,000,000
4
Security check socket filenames reported by spawned application processes.
compile_cclass_node(CClassNode* cc, regex_t* reg) { int r; if (IS_NULL(cc->mbuf)) { r = add_op(reg, IS_NCCLASS_NOT(cc) ? OP_CCLASS_NOT : OP_CCLASS); if (r != 0) return r; COP(reg)->cclass.bsp = xmalloc(SIZE_BITSET); CHECK_NULL_RETURN_MEMERR(COP(reg)->cclass.bsp); xmemcpy(COP(reg)->cclass.bsp, cc->bs, SIZE_BITSET); } else { void* p; if (ONIGENC_MBC_MINLEN(reg->enc) > 1 || bitset_is_empty(cc->bs)) { r = add_op(reg, IS_NCCLASS_NOT(cc) ? OP_CCLASS_MB_NOT : OP_CCLASS_MB); if (r != 0) return r; p = set_multi_byte_cclass(cc->mbuf, reg); CHECK_NULL_RETURN_MEMERR(p); COP(reg)->cclass_mb.mb = p; } else { r = add_op(reg, IS_NCCLASS_NOT(cc) ? OP_CCLASS_MIX_NOT : OP_CCLASS_MIX); if (r != 0) return r; COP(reg)->cclass_mix.bsp = xmalloc(SIZE_BITSET); CHECK_NULL_RETURN_MEMERR(COP(reg)->cclass_mix.bsp); xmemcpy(COP(reg)->cclass_mix.bsp, cc->bs, SIZE_BITSET); p = set_multi_byte_cclass(cc->mbuf, reg); CHECK_NULL_RETURN_MEMERR(p); COP(reg)->cclass_mix.mb = p; } } return 0; }
0
[ "CWE-476", "CWE-125" ]
oniguruma
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
335,111,535,721,970,350,000,000,000,000,000,000,000
39
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
RGWOp *RGWHandler_REST_Obj_SWIFT::op_head() { return get_obj_op(false); }
0
[ "CWE-617" ]
ceph
f44a8ae8aa27ecef69528db9aec220f12492810e
101,711,086,821,814,900,000,000,000,000,000,000,000
4
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name checking for empty name avoids later assertion in RGWObjectCtx::set_atomic Fixes: CVE-2021-3531 Reviewed-by: Casey Bodley <[email protected]> Signed-off-by: Casey Bodley <[email protected]> (cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
StrUtil_SafeStrcat(char **prefix, // IN/OUT const char *str) // IN { char *tmp; size_t plen = *prefix != NULL ? strlen(*prefix) : 0; size_t slen = strlen(str); /* Check for overflow */ VERIFY((size_t)-1 - plen > slen + 1); tmp = Util_SafeRealloc(*prefix, plen + slen + 1 /* NUL */); memcpy(tmp + plen, str, slen + 1 /* NUL */); *prefix = tmp; }
0
[ "CWE-362" ]
open-vm-tools
b3068b04880eda4ca3e13f2d34fb8ce336ad1a4f
167,992,234,797,040,130,000,000,000,000,000,000,000
15
randomly generate tmp directory name, and add StrUtil_ReplaceAll() function
void Server::updateChannel(const Channel *c) { if (c->bTemporary) return; TransactionHolder th; Group *g; ChanACL *acl; QSqlQuery &query = *th.qsqQuery; SQLPREP("UPDATE `%1channels` SET `name` = ?, `parent_id` = ?, `inheritacl` = ? WHERE `server_id` = ? AND `channel_id` = ?"); query.addBindValue(c->qsName); query.addBindValue(c->cParent ? c->cParent->iId : QVariant()); query.addBindValue(c->bInheritACL ? 1 : 0); query.addBindValue(iServerNum); query.addBindValue(c->iId); SQLEXEC(); // Update channel description information SQLPREP("REPLACE INTO `%1channel_info` (`server_id`, `channel_id`, `key`, `value`) VALUES (?,?,?,?)"); query.addBindValue(iServerNum); query.addBindValue(c->iId); query.addBindValue(ServerDB::Channel_Description); query.addBindValue(c->qsDesc); SQLEXEC(); // Update channel position information query.addBindValue(iServerNum); query.addBindValue(c->iId); query.addBindValue(ServerDB::Channel_Position); query.addBindValue(QVariant(c->iPosition).toString()); SQLEXEC(); SQLPREP("DELETE FROM `%1groups` WHERE `server_id` = ? AND `channel_id` = ?"); query.addBindValue(iServerNum); query.addBindValue(c->iId); SQLEXEC(); SQLPREP("DELETE FROM `%1acl` WHERE `server_id` = ? AND `channel_id` = ?"); query.addBindValue(iServerNum); query.addBindValue(c->iId); SQLEXEC(); foreach(g, c->qhGroups) { SQLPREP("INSERT INTO `%1groups` (`server_id`, `channel_id`, `name`, `inherit`, `inheritable`) VALUES (?,?,?,?,?)"); query.addBindValue(iServerNum); query.addBindValue(g->c->iId); query.addBindValue(g->qsName); query.addBindValue(g->bInherit ? 1 : 0); query.addBindValue(g->bInheritable ? 1 : 0); SQLEXEC(); int id = query.lastInsertId().toInt(); int pid; foreach(pid, g->qsAdd) { SQLPREP("INSERT INTO `%1group_members` (`group_id`, `server_id`, `user_id`, `addit`) VALUES (?, ?, ?, ?)"); query.addBindValue(id); query.addBindValue(iServerNum); query.addBindValue(pid); query.addBindValue(1); SQLEXEC(); } foreach(pid, g->qsRemove) { SQLPREP("INSERT INTO `%1group_members` (`group_id`, `server_id`, `user_id`, `addit`) VALUES (?, ?, ?, ?)"); query.addBindValue(id); query.addBindValue(iServerNum); query.addBindValue(pid); query.addBindValue(0); SQLEXEC(); } } int pri = 5; foreach(acl, c->qlACL) { SQLPREP("INSERT INTO `%1acl` (`server_id`, `channel_id`, `priority`, `user_id`, `group_name`, `apply_here`, `apply_sub`, `grantpriv`, `revokepriv`) VALUES (?,?,?,?,?,?,?,?,?)"); query.addBindValue(iServerNum); query.addBindValue(acl->c->iId); query.addBindValue(pri++); query.addBindValue((acl->iUserId == -1) ? QVariant() : acl->iUserId); query.addBindValue((acl->qsGroup.isEmpty()) ? QVariant() : acl->qsGroup); query.addBindValue(acl->bApplyHere ? 1 : 0); query.addBindValue(acl->bApplySubs ? 1 : 0); query.addBindValue(static_cast<int>(acl->pAllow)); query.addBindValue(static_cast<int>(acl->pDeny)); SQLEXEC(); } }
0
[ "CWE-20" ]
mumble
6b33dda344f89e5a039b7d79eb43925040654242
225,932,056,309,649,700,000,000,000,000,000,000,000
88
Don't crash on long usernames
static void pf1(struct mg_connection *c, int ev, void *ev_data, void *fn_data) { if (ev == MG_EV_READ) mg_iobuf_free(&c->recv); (void) ev_data, (void) fn_data; }
0
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
162,318,162,741,130,730,000,000,000,000,000,000,000
4
Protect against the directory traversal in mg_upload()
int LibRaw::dcraw_process(void) { int quality,i; int iterations=-1, dcb_enhance=1, noiserd=0; int eeci_refine_fl=0, es_med_passes_fl=0; float cared=0,cablue=0; float linenoise=0; float lclean=0,cclean=0; float thresh=0; float preser=0; float expos=1.0; CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); // CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE); try { int no_crop = 1; if (~O.cropbox[2] && ~O.cropbox[3]) no_crop=0; libraw_decoder_info_t di; get_decoder_info(&di); bool is_bayer = (imgdata.idata.filters || P1.colors == 1); int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && is_bayer && !IO.zero_is_bad; raw2image_ex(subtract_inline); // allocate imgdata.image and copy data! // Adjust sizes int save_4color = O.four_color_rgb; if (IO.zero_is_bad) { remove_zeroes(); SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES); } if(O.bad_pixels && no_crop) { bad_pixels(O.bad_pixels); SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS); } if (O.dark_frame && no_crop) { subtract (O.dark_frame); SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME); } if (O.wf_debanding) { wf_remove_banding(); } quality = 2 + !IO.fuji_width; if (O.user_qual >= 0) quality = O.user_qual; if(!subtract_inline || !C.data_maximum) { adjust_bl(); subtract_black_internal(); } if(!(di.decoder_flags & LIBRAW_DECODER_FIXEDMAXC)) adjust_maximum(); if (O.user_sat > 0) C.maximum = O.user_sat; if (P1.is_foveon) { if(load_raw == &LibRaw::x3f_load_raw) { // Filter out zeroes for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 else if(load_raw == &LibRaw::foveon_dp_load_raw) { for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } else { foveon_interpolate(); } #endif SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE); } if (O.green_matching && !O.half_size) { green_matching(); } if ( #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 (!P1.is_foveon || O.force_foveon_x3f) && #endif !O.no_auto_scale) { scale_colors(); SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS); } pre_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE); if (O.dcb_iterations >= 0) iterations = O.dcb_iterations; if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl; if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd; if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine; if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes; // LIBRAW_DEMOSAIC_PACK_GPL3 if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);} if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);} if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);} if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);} if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);} if (P1.filters && !O.no_interpolation) { if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd); if (quality == 0) lin_interpolate(); else if (quality == 1 || P1.colors > 3) vng_interpolate(); else if (quality == 2 && P1.filters > 1000) ppg_interpolate(); else if (P1.filters == LIBRAW_XTRANS) { // Fuji X-Trans xtrans_interpolate(quality>2?3:1); } else if (quality == 3) ahd_interpolate(); // really don't need it here due to fallback op else if (quality == 4) dcb(iterations, dcb_enhance); // LIBRAW_DEMOSAIC_PACK_GPL2 else if (quality == 5) ahd_interpolate_mod(); else if (quality == 6) afd_interpolate_pl(2,1); else if (quality == 7) vcd_interpolate(0); else if (quality == 8) vcd_interpolate(12); else if (quality == 9) lmmse_interpolate(1); // LIBRAW_DEMOSAIC_PACK_GPL3 else if (quality == 10) amaze_demosaic_RT(); // LGPL2 else if (quality == 11) dht_interpolate(); else if (quality == 12) aahd_interpolate(); // fallback to AHD else { ahd_interpolate(); imgdata.process_warnings |= LIBRAW_WARN_FALLBACK_TO_AHD; } SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE); } if (IO.mix_green) { for (P1.colors=3, i=0; i < S.height * S.width; i++) imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1; SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN); } if(!P1.is_foveon) { if (P1.colors == 3) { if (quality == 8) { if (eeci_refine_fl == 1) refinement(); if (O.med_passes > 0) median_filter_new(); if (es_med_passes_fl > 0) es_median_filter(); } else { median_filter(); } SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER); } } if (O.highlight == 2) { blend_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.highlight > 2) { recover_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.use_fuji_rotate) { fuji_rotate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); } if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()"); } #ifndef NO_LCMS if(O.camera_profile) { apply_profile(O.camera_profile,O.output_profile); SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE); } #endif convert_to_rgb(); SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB); if (O.use_fuji_rotate) { stretch(); SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH); } O.four_color_rgb = save_4color; // also, restore return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } }
0
[ "CWE-129" ]
LibRaw
89d065424f09b788f443734d44857289489ca9e2
155,588,362,309,437,130,000,000,000,000,000,000,000
249
fixed two more problems found by fuzzer
static int input_default_setkeycode(struct input_dev *dev, const struct input_keymap_entry *ke, unsigned int *old_keycode) { unsigned int index; int error; int i; if (!dev->keycodesize) return -EINVAL; if (ke->flags & INPUT_KEYMAP_BY_INDEX) { index = ke->index; } else { error = input_scancode_to_scalar(ke, &index); if (error) return error; } if (index >= dev->keycodemax) return -EINVAL; if (dev->keycodesize < sizeof(ke->keycode) && (ke->keycode >> (dev->keycodesize * 8))) return -EINVAL; switch (dev->keycodesize) { case 1: { u8 *k = (u8 *)dev->keycode; *old_keycode = k[index]; k[index] = ke->keycode; break; } case 2: { u16 *k = (u16 *)dev->keycode; *old_keycode = k[index]; k[index] = ke->keycode; break; } default: { u32 *k = (u32 *)dev->keycode; *old_keycode = k[index]; k[index] = ke->keycode; break; } } __clear_bit(*old_keycode, dev->keybit); __set_bit(ke->keycode, dev->keybit); for (i = 0; i < dev->keycodemax; i++) { if (input_fetch_keycode(dev, i) == *old_keycode) { __set_bit(*old_keycode, dev->keybit); break; /* Setting the bit twice is useless, so break */ } } return 0; }
1
[ "CWE-703", "CWE-787" ]
linux
cb222aed03d798fc074be55e59d9a112338ee784
121,416,750,715,190,980,000,000,000,000,000,000,000
59
Input: add safety guards to input_set_keycode() If we happen to have a garbage in input device's keycode table with values too big we'll end up doing clear_bit() with offset way outside of our bitmaps, damaging other objects within an input device or even outside of it. Let's add sanity checks to the returned old keycodes. Reported-by: [email protected] Reported-by: [email protected] Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws Signed-off-by: Dmitry Torokhov <[email protected]>
static inline void tcp_prequeue_init(struct tcp_sock *tp) { tp->ucopy.task = NULL; tp->ucopy.len = 0; tp->ucopy.memory = 0; skb_queue_head_init(&tp->ucopy.prequeue); }
0
[ "CWE-416", "CWE-269" ]
linux
bb1fceca22492109be12640d49f5ea5a544c6bb4
116,928,512,287,699,490,000,000,000,000,000,000,000
7
tcp: fix use after free in tcp_xmit_retransmit_queue() When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the tail of the write queue using tcp_add_write_queue_tail() Then it attempts to copy user data into this fresh skb. If the copy fails, we undo the work and remove the fresh skb. Unfortunately, this undo lacks the change done to tp->highest_sack and we can leave a dangling pointer (to a freed skb) Later, tcp_xmit_retransmit_queue() can dereference this pointer and access freed memory. For regular kernels where memory is not unmapped, this might cause SACK bugs because tcp_highest_sack_seq() is buggy, returning garbage instead of tp->snd_nxt, but with various debug features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel. This bug was found by Marco Grassi thanks to syzkaller. Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb") Reported-by: Marco Grassi <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Cc: Ilpo Järvinen <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Neal Cardwell <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
v8::MaybeLocal<v8::Value> GetSubjectAltNameString( Environment* env, const BIOPointer& bio, X509* cert) { int index = X509_get_ext_by_NID(cert, NID_subject_alt_name, -1); if (index < 0) return Undefined(env->isolate()); X509_EXTENSION* ext = X509_get_ext(cert, index); CHECK_NOT_NULL(ext); if (!SafeX509SubjectAltNamePrint(bio, ext)) { USE(BIO_reset(bio.get())); return v8::Null(env->isolate()); } return ToV8Value(env, bio); }
0
[ "CWE-295" ]
node
466e5415a2b7b3574ab5403acb87e89a94a980d1
138,897,010,698,791,220,000,000,000,000,000,000,000
18
crypto,tls: implement safe x509 GeneralName format This change introduces JSON-compatible escaping rules for strings that include X.509 GeneralName components (see RFC 5280). This non-standard format avoids ambiguities and prevents injection attacks that could previously lead to X.509 certificates being accepted even though they were not valid for the target hostname. These changes affect the format of subject alternative names and the format of authority information access. The checkServerIdentity function has been modified to safely handle the new format, eliminating the possibility of injecting subject alternative names into the verification logic. Because each subject alternative name is only encoded as a JSON string literal if necessary for security purposes, this change will only be visible in rare cases. This addresses CVE-2021-44532. CVE-ID: CVE-2021-44532 PR-URL: https://github.com/nodejs-private/node-private/pull/300 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Rich Trott <[email protected]>
static void qxl_vm_change_state_handler(void *opaque, int running, RunState state) { PCIQXLDevice *qxl = opaque; if (running) { /* * if qxl_send_events was called from spice server context before * migration ended, qxl_update_irq for these events might not have been * called */ qxl_update_irq(qxl); } else { /* make sure surfaces are saved before migration */ qxl_dirty_surfaces(qxl); } }
0
[ "CWE-476" ]
qemu
d52680fc932efb8a2f334cc6993e705ed1e31e99
192,950,551,598,433,900,000,000,000,000,000,000,000
17
qxl: check release info object When releasing spice resources in release_resource() routine, if release info object 'ext.info' is null, it leads to null pointer dereference. Add check to avoid it. Reported-by: Bugs SysSec <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) { DBUG_ASSERT(thd); str->append(STRING_WITH_LEN("select ")); if (join && join->cleaned) { /* JOIN already cleaned up so it is dangerous to print items because temporary tables they pointed on could be freed. */ str->append('#'); str->append(select_number); return; } /* First add options */ if (options & SELECT_STRAIGHT_JOIN) str->append(STRING_WITH_LEN("straight_join ")); if (options & SELECT_HIGH_PRIORITY) str->append(STRING_WITH_LEN("high_priority ")); if (options & SELECT_DISTINCT) str->append(STRING_WITH_LEN("distinct ")); if (options & SELECT_SMALL_RESULT) str->append(STRING_WITH_LEN("sql_small_result ")); if (options & SELECT_BIG_RESULT) str->append(STRING_WITH_LEN("sql_big_result ")); if (options & OPTION_BUFFER_RESULT) str->append(STRING_WITH_LEN("sql_buffer_result ")); if (options & OPTION_FOUND_ROWS) str->append(STRING_WITH_LEN("sql_calc_found_rows ")); switch (sql_cache) { case SQL_NO_CACHE: str->append(STRING_WITH_LEN("sql_no_cache ")); break; case SQL_CACHE: str->append(STRING_WITH_LEN("sql_cache ")); break; case SQL_CACHE_UNSPECIFIED: break; default: DBUG_ASSERT(0); } //Item List bool first= 1; /* outer_select() can not be used here because it is for name resolution and will return NULL at any end of name resolution chain (view/derived) */ bool top_level= (get_master()->get_master() == 0); List_iterator_fast<Item> it(item_list); Item *item; while ((item= it++)) { if (first) first= 0; else str->append(','); if ((is_subquery_function() && item->is_autogenerated_name) || !item->name) { /* Do not print auto-generated aliases in subqueries. It has no purpose in a view definition or other contexts where the query is printed. */ item->print(str, query_type); } else { /* Do not print illegal names (if it is not top level SELECT). Top level view checked (and correct name are assigned), other cases of top level SELECT are not important, because it is not "table field". */ if (top_level || !item->is_autogenerated_name || !check_column_name(item->name)) item->print_item_w_name(str, query_type); else item->print(str, query_type); } } /* from clause TODO: support USING/FORCE/IGNORE index */ if (table_list.elements) { str->append(STRING_WITH_LEN(" from ")); /* go through join tree */ print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type); } else if (where) { /* "SELECT 1 FROM DUAL WHERE 2" should not be printed as "SELECT 1 WHERE 2": the 1st syntax is valid, but the 2nd is not. */ str->append(STRING_WITH_LEN(" from DUAL ")); } // Where Item *cur_where= where; if (join) cur_where= join->conds; if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); if (cur_where) cur_where->print(str, query_type); else str->append(cond_value != Item::COND_FALSE ? "1" : "0"); } // group by & olap if (group_list.elements) { str->append(STRING_WITH_LEN(" group by ")); print_order(str, group_list.first, query_type); switch (olap) { case CUBE_TYPE: str->append(STRING_WITH_LEN(" with cube")); break; case ROLLUP_TYPE: str->append(STRING_WITH_LEN(" with rollup")); break; default: ; //satisfy compiler } } // having Item *cur_having= having; if (join) cur_having= join->having; if (cur_having || having_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" having ")); if (cur_having) cur_having->print(str, query_type); else str->append(having_value != Item::COND_FALSE ? "1" : "0"); } if (order_list.elements) { str->append(STRING_WITH_LEN(" order by ")); print_order(str, order_list.first, query_type); } // limit print_limit(thd, str, query_type); // lock type if (lock_type == TL_READ_WITH_SHARED_LOCKS) str->append(" lock in share mode"); else if (lock_type == TL_WRITE) str->append(" for update"); // PROCEDURE unsupported here }
0
[ "CWE-89" ]
server
5ba77222e9fe7af8ff403816b5338b18b342053c
12,690,589,502,390,920,000,000,000,000,000,000,000
169
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view if the view has algorithm=temptable it is not updatable, so DEFAULT() for its fields is meaningless, and thus it's NULL or 0/'' for NOT NULL columns.
static void nft_fwd_neigh_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_fwd_neigh *priv = nft_expr_priv(expr); void *addr = &regs->data[priv->sreg_addr]; int oif = regs->data[priv->sreg_dev]; unsigned int verdict = NF_STOLEN; struct sk_buff *skb = pkt->skb; struct net_device *dev; int neigh_table; switch (priv->nfproto) { case NFPROTO_IPV4: { struct iphdr *iph; if (skb->protocol != htons(ETH_P_IP)) { verdict = NFT_BREAK; goto out; } if (skb_try_make_writable(skb, sizeof(*iph))) { verdict = NF_DROP; goto out; } iph = ip_hdr(skb); ip_decrease_ttl(iph); neigh_table = NEIGH_ARP_TABLE; break; } case NFPROTO_IPV6: { struct ipv6hdr *ip6h; if (skb->protocol != htons(ETH_P_IPV6)) { verdict = NFT_BREAK; goto out; } if (skb_try_make_writable(skb, sizeof(*ip6h))) { verdict = NF_DROP; goto out; } ip6h = ipv6_hdr(skb); ip6h->hop_limit--; neigh_table = NEIGH_ND_TABLE; break; } default: verdict = NFT_BREAK; goto out; } dev = dev_get_by_index_rcu(nft_net(pkt), oif); if (dev == NULL) return; skb->dev = dev; skb->tstamp = 0; neigh_xmit(neigh_table, dev, addr, skb); out: regs->verdict.code = verdict; }
0
[ "CWE-269" ]
nf
b1a5983f56e371046dcf164f90bfaf704d2b89f6
22,565,014,169,365,477,000,000,000,000,000,000,000
60
netfilter: nf_tables_offload: incorrect flow offload action array size immediate verdict expression needs to allocate one slot in the flow offload action array, however, immediate data expression does not need to do so. fwd and dup expression need to allocate one slot, this is missing. Add a new offload_action interface to report if this expression needs to allocate one slot in the flow offload action array. Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support") Reported-and-tested-by: Nick Gregory <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
ews_backend_sync_authentication (EEwsBackend *ews_backend, ESource *child_source) { ESourceAuthentication *coll_authentication_extension, *child_authentication_extension; ESource *collection_source; g_return_if_fail (E_IS_EWS_BACKEND (ews_backend)); g_return_if_fail (E_IS_SOURCE (child_source)); collection_source = e_backend_get_source (E_BACKEND (ews_backend)); coll_authentication_extension = e_source_get_extension (collection_source, E_SOURCE_EXTENSION_AUTHENTICATION); child_authentication_extension = e_source_get_extension (child_source, E_SOURCE_EXTENSION_AUTHENTICATION); e_source_authentication_set_host (child_authentication_extension, e_source_authentication_get_host (coll_authentication_extension)); e_source_authentication_set_user (child_authentication_extension, e_source_authentication_get_user (coll_authentication_extension)); }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
215,908,019,433,903,800,000,000,000,000,000,000,000
20
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
DeepTiledInputFile::rawTileData (int &dx, int &dy, int &lx, int &ly, char * pixelData, Int64 &pixelDataSize) const { if (!isValidTile (dx, dy, lx, ly)) throw IEX_NAMESPACE::ArgExc ("Tried to read a tile outside " "the image file's data window."); Int64 tileOffset = _data->tileOffsets (dx, dy, lx, ly); if(tileOffset == 0) { THROW (IEX_NAMESPACE::InputExc, "Tile (" << dx << ", " << dy << ", " << lx << ", " << ly << ") is missing."); } #if ILMBASE_THREADING_ENABLED std::lock_guard<std::mutex> lock(*_data->_streamData); #endif if (_data->_streamData->is->tellg() != tileOffset) _data->_streamData->is->seekg (tileOffset); // // Read the first few bytes of the tile (the header). // Verify that the tile coordinates and the level number // are correct. // int tileXCoord, tileYCoord, levelX, levelY; if (isMultiPart(_data->version)) { int partNumber; Xdr::read <StreamIO> (*_data->_streamData->is, partNumber); if (partNumber != _data->partNumber) { THROW (IEX_NAMESPACE::ArgExc, "Unexpected part number " << partNumber << ", should be " << _data->partNumber << "."); } } Xdr::read <StreamIO> (*_data->_streamData->is, tileXCoord); Xdr::read <StreamIO> (*_data->_streamData->is, tileYCoord); Xdr::read <StreamIO> (*_data->_streamData->is, levelX); Xdr::read <StreamIO> (*_data->_streamData->is, levelY); Int64 sampleCountTableSize; Int64 packedDataSize; Xdr::read <StreamIO> (*_data->_streamData->is, sampleCountTableSize); Xdr::read <StreamIO> (*_data->_streamData->is, packedDataSize); if (tileXCoord != dx) throw IEX_NAMESPACE::InputExc ("Unexpected tile x coordinate."); if (tileYCoord != dy) throw IEX_NAMESPACE::InputExc ("Unexpected tile y coordinate."); if (levelX != lx) throw IEX_NAMESPACE::InputExc ("Unexpected tile x level number coordinate."); if (levelY != ly) throw IEX_NAMESPACE::InputExc ("Unexpected tile y level number coordinate."); // total requirement for reading all the data Int64 totalSizeRequired=40+sampleCountTableSize+packedDataSize; bool big_enough = totalSizeRequired<=pixelDataSize; pixelDataSize = totalSizeRequired; // was the block we were given big enough? if(!big_enough || pixelData==NULL) { // special case: seek stream back to start if we are at the beginning (regular reading pixels assumes it doesn't need to seek // in single part files) if(!isMultiPart(_data->version)) { _data->_streamData->is->seekg(_data->_streamData->currentPosition); } // leave lock here - bail before reading more data return; } // copy the values we have read into the output block *(int *) (pixelData+0) = dx; *(int *) (pixelData+4) = dy; *(int *) (pixelData+8) = levelX; *(int *) (pixelData+12) = levelY; *(Int64 *) (pixelData+16) =sampleCountTableSize; *(Int64 *) (pixelData+24) = packedDataSize; // didn't read the unpackedsize - do that now Xdr::read<StreamIO> (*_data->_streamData->is, *(Int64 *) (pixelData+32)); // read the actual data _data->_streamData->is->read(pixelData+40, sampleCountTableSize+packedDataSize); if(!isMultiPart(_data->version)) { _data->_streamData->currentPosition+=sampleCountTableSize+packedDataSize+40; } // leave lock here }
0
[ "CWE-125" ]
openexr
467be80b75642efbbe6bdace558079f68c16acb1
104,486,942,944,329,900,000,000,000,000,000,000,000
114
Fix overflow computing deeptile sample table size (#861) Signed-off-by: Peter Hillman <[email protected]>
utf16le_is_mbc_ambiguous(OnigCaseFoldType flag, const UChar** pp, const UChar* end) { const UChar* p = *pp; (*pp) += EncLen_UTF16[*(p+1)]; if (*(p+1) == 0) { int c, v; if (*p == 0xdf && (flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) { return TRUE; } c = *p; v = ONIGENC_IS_UNICODE_ISO_8859_1_BIT_CTYPE(c, (BIT_CTYPE_UPPER | BIT_CTYPE_LOWER)); if ((v | BIT_CTYPE_LOWER) != 0) { /* 0xaa, 0xb5, 0xba are lower case letter, but can't convert. */ if (c >= 0xaa && c <= 0xba) return FALSE; else return TRUE; } return (v != 0 ? TRUE : FALSE); } return FALSE; }
0
[ "CWE-125" ]
php-src
b6fe458ef9ac1372b60c3d3810b0358e2e20840d
91,349,128,941,983,670,000,000,000,000,000,000,000
29
Fix bug #77418 - Heap overflow in utf32be_mbc_to_code (cherry picked from commit aeec40cb50eca6a97975765e2bacc28a5950cfa9)
static inline void set_max_mapnr(unsigned long limit) { }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
161,348,896,810,716,150,000,000,000,000,000,000,000
1
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
**/ inline double rand(const double val_max=1) { return cimg::rand(0,val_max);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
112,492,017,600,279,850,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
int ber_write_contextual_tag(wStream* s, BYTE tag, int length, BOOL pc) { Stream_Write_UINT8(s, (BER_CLASS_CTXT | BER_PC(pc)) | (BER_TAG_MASK & tag)); return 1 + ber_write_length(s, length); }
0
[ "CWE-476" ]
FreeRDP
0dc22d5a30a1c7d146b2a835b2032668127c33e9
93,156,606,400,829,170,000,000,000,000,000,000,000
5
Fixed a range of BER boundary encoding bugs which would occur when any NLA packet hit the 127 character mark. Removed ber#get_content_length as it was not behaving deterministically.
DECLAREContigPutFunc(putcontig8bitYCbCr44tile) { uint32* cp1 = cp+w+toskew; uint32* cp2 = cp1+w+toskew; uint32* cp3 = cp2+w+toskew; int32 incr = 3*w+4*toskew; (void) y; /* adjust fromskew */ fromskew = (fromskew / 4) * (4*2+2); if ((h & 3) == 0 && (w & 3) == 0) { for (; h >= 4; h -= 4) { x = w>>2; do { int32 Cb = pp[16]; int32 Cr = pp[17]; YCbCrtoRGB(cp [0], pp[ 0]); YCbCrtoRGB(cp [1], pp[ 1]); YCbCrtoRGB(cp [2], pp[ 2]); YCbCrtoRGB(cp [3], pp[ 3]); YCbCrtoRGB(cp1[0], pp[ 4]); YCbCrtoRGB(cp1[1], pp[ 5]); YCbCrtoRGB(cp1[2], pp[ 6]); YCbCrtoRGB(cp1[3], pp[ 7]); YCbCrtoRGB(cp2[0], pp[ 8]); YCbCrtoRGB(cp2[1], pp[ 9]); YCbCrtoRGB(cp2[2], pp[10]); YCbCrtoRGB(cp2[3], pp[11]); YCbCrtoRGB(cp3[0], pp[12]); YCbCrtoRGB(cp3[1], pp[13]); YCbCrtoRGB(cp3[2], pp[14]); YCbCrtoRGB(cp3[3], pp[15]); cp += 4; cp1 += 4; cp2 += 4; cp3 += 4; pp += 18; } while (--x); cp += incr; cp1 += incr; cp2 += incr; cp3 += incr; pp += fromskew; } } else { while (h > 0) { for (x = w; x > 0;) { int32 Cb = pp[16]; int32 Cr = pp[17]; switch (x) { default: switch (h) { default: YCbCrtoRGB(cp3[3], pp[15]); /* FALLTHROUGH */ case 3: YCbCrtoRGB(cp2[3], pp[11]); /* FALLTHROUGH */ case 2: YCbCrtoRGB(cp1[3], pp[ 7]); /* FALLTHROUGH */ case 1: YCbCrtoRGB(cp [3], pp[ 3]); /* FALLTHROUGH */ } /* FALLTHROUGH */ case 3: switch (h) { default: YCbCrtoRGB(cp3[2], pp[14]); /* FALLTHROUGH */ case 3: YCbCrtoRGB(cp2[2], pp[10]); /* FALLTHROUGH */ case 2: YCbCrtoRGB(cp1[2], pp[ 6]); /* FALLTHROUGH */ case 1: YCbCrtoRGB(cp [2], pp[ 2]); /* FALLTHROUGH */ } /* FALLTHROUGH */ case 2: switch (h) { default: YCbCrtoRGB(cp3[1], pp[13]); /* FALLTHROUGH */ case 3: YCbCrtoRGB(cp2[1], pp[ 9]); /* FALLTHROUGH */ case 2: YCbCrtoRGB(cp1[1], pp[ 5]); /* FALLTHROUGH */ case 1: YCbCrtoRGB(cp [1], pp[ 1]); /* FALLTHROUGH */ } /* FALLTHROUGH */ case 1: switch (h) { default: YCbCrtoRGB(cp3[0], pp[12]); /* FALLTHROUGH */ case 3: YCbCrtoRGB(cp2[0], pp[ 8]); /* FALLTHROUGH */ case 2: YCbCrtoRGB(cp1[0], pp[ 4]); /* FALLTHROUGH */ case 1: YCbCrtoRGB(cp [0], pp[ 0]); /* FALLTHROUGH */ } /* FALLTHROUGH */ } if (x < 4) { cp += x; cp1 += x; cp2 += x; cp3 += x; x = 0; } else { cp += 4; cp1 += 4; cp2 += 4; cp3 += 4; x -= 4; } pp += 18; } if (h <= 4) break; h -= 4; cp += incr; cp1 += incr; cp2 += incr; cp3 += incr; pp += fromskew; } } }
0
[ "CWE-787" ]
libtiff
4bb584a35f87af42d6cf09d15e9ce8909a839145
69,150,184,260,576,835,000,000,000,000,000,000,000
102
RGBA interface: fix integer overflow potentially causing write heap buffer overflow, especially on 32 bit builds. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16443. Credit to OSS Fuzz
static void *mmap_file(char const *fname) { void *addr; fd_map = open(fname, O_RDWR); if (fd_map < 0 || fstat(fd_map, &sb) < 0) { perror(fname); fail_file(); } if (!S_ISREG(sb.st_mode)) { fprintf(stderr, "not a regular file: %s\n", fname); fail_file(); } addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd_map, 0); if (addr == MAP_FAILED) { mmap_failed = 1; fprintf(stderr, "Could not mmap file: %s\n", fname); fail_file(); } return addr; }
0
[ "CWE-264" ]
linux
548acf19234dbda5a52d5a8e7e205af46e9da840
110,848,906,115,665,640,000,000,000,000,000,000,000
22
x86/mm: Expand the exception table logic to allow new handling options Huge amounts of help from Andy Lutomirski and Borislav Petkov to produce this. Andy provided the inspiration to add classes to the exception table with a clever bit-squeezing trick, Boris pointed out how much cleaner it would all be if we just had a new field. Linus Torvalds blessed the expansion with: ' I'd rather not be clever in order to save just a tiny amount of space in the exception table, which isn't really criticial for anybody. ' The third field is another relative function pointer, this one to a handler that executes the actions. We start out with three handlers: 1: Legacy - just jumps the to fixup IP 2: Fault - provide the trap number in %ax to the fixup code 3: Cleaned up legacy for the uaccess error hack Signed-off-by: Tony Luck <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com Signed-off-by: Ingo Molnar <[email protected]>
nbd_unlocked_get_private_data (struct nbd_handle *h) { return h->private_data; }
0
[ "CWE-252" ]
libnbd
c79706af4e7475bf58861a143b77b77a54e7a1cd
144,734,179,965,775,600,000,000,000,000,000,000,000
4
api: Add new API nbd_set_pread_initialize() The recent patch series for CVE-2022-0485 demonstrated that when applications using libnbd are not careful about error checking, the difference on whether a data leak is at least sanitized (all zeroes, partial reads, or data leftover from a prior read) vs. a dangerous information leak (uninitialized data from the heap) was partly under libnbd's control. The previous two patches changed libnbd to always sanitize, as a security hardening technique that prevents heap leaks no matter how buggy the client app is. But a blind memset() also adds an execution delay, even if it doesn't show up as the hot spot in our profiling when compared to the time spent with network traffic. At any rate, if client apps choose to pre-initialize their buffers, or otherwise audit their code to take on their own risk about not dereferencing a buffer on failure paths, then the time spent by libnbd doing memset() is wasted; so it is worth adding a knob to let a user opt in to faster execution at the expense of giving up our memset() hardening on their behalf. In addition to adding two new APIs, this patch also causes changes to the four existing APIs nbd_{aio_,}pread{,_structured}, with those generated lib/api.c changes looking like: | --- lib/api.c.bak 2022-02-10 08:17:09.973381979 -0600 | +++ lib/api.c 2022-02-10 08:22:27.503428024 -0600 | @@ -2871,7 +2914,8 @@ nbd_pread (struct nbd_handle *h, void *b | debug (h, "enter: buf=<buf> count=%zu offset=%" PRIu64 " flags=0x%x", count, offset, flags); | } | | - memset (buf, 0, count); | + if (h->pread_initialize) | + memset (buf, 0, count); | if (unlikely (!pread_in_permitted_state (h))) { | ret = -1; | goto out; Message-Id: <[email protected]> Acked-by: Laszlo Ersek <[email protected]> [eblake: enhance commit message to show generated file diff, mention CVE in doc text] Reviewed-by: Richard W.M. Jones <[email protected]> (cherry picked from commit e0953cb71250947bb97b25e34ff1ea34bd504bf3)
bool run_set_statement_if_requested(THD *thd, LEX *lex) { if (!lex->stmt_var_list.is_empty() && !thd->slave_thread) { Query_arena backup; DBUG_PRINT("info", ("SET STATEMENT %d vars", lex->stmt_var_list.elements)); lex->old_var_list.empty(); List_iterator_fast<set_var_base> it(lex->stmt_var_list); set_var_base *var; if (lex->set_arena_for_set_stmt(&backup)) return true; MEM_ROOT *mem_root= thd->mem_root; while ((var= it++)) { DBUG_ASSERT(var->is_system()); set_var *o= NULL, *v= (set_var*)var; if (!v->var->is_set_stmt_ok()) { my_error(ER_SET_STATEMENT_NOT_SUPPORTED, MYF(0), v->var->name.str); lex->reset_arena_for_set_stmt(&backup); lex->old_var_list.empty(); lex->free_arena_for_set_stmt(); return true; } if (v->var->session_is_default(thd)) o= new set_var(thd,v->type, v->var, &v->base, NULL); else { switch (v->var->option.var_type & GET_TYPE_MASK) { case GET_BOOL: case GET_INT: case GET_LONG: case GET_LL: { bool null_value; longlong val= v->var->val_int(&null_value, thd, v->type, &v->base); o= new set_var(thd, v->type, v->var, &v->base, (null_value ? (Item *) new (mem_root) Item_null(thd) : (Item *) new (mem_root) Item_int(thd, val))); } break; case GET_UINT: case GET_ULONG: case GET_ULL: { bool null_value; ulonglong val= v->var->val_int(&null_value, thd, v->type, &v->base); o= new set_var(thd, v->type, v->var, &v->base, (null_value ? (Item *) new (mem_root) Item_null(thd) : (Item *) new (mem_root) Item_uint(thd, val))); } break; case GET_DOUBLE: { bool null_value; double val= v->var->val_real(&null_value, thd, v->type, &v->base); o= new set_var(thd, v->type, v->var, &v->base, (null_value ? (Item *) new (mem_root) Item_null(thd) : (Item *) new (mem_root) Item_float(thd, val, 1))); } break; default: case GET_NO_ARG: case GET_DISABLED: DBUG_ASSERT(0); /* fall through */ case 0: case GET_FLAGSET: case GET_ENUM: case GET_SET: case GET_STR: case GET_STR_ALLOC: { char buff[STRING_BUFFER_USUAL_SIZE]; String tmp(buff, sizeof(buff), v->var->charset(thd)),*val; val= v->var->val_str(&tmp, thd, v->type, &v->base); if (val) { Item_string *str= new (mem_root) Item_string(thd, v->var->charset(thd), val->ptr(), val->length()); o= new set_var(thd, v->type, v->var, &v->base, str); } else o= new set_var(thd, v->type, v->var, &v->base, new (mem_root) Item_null(thd)); } break; } } DBUG_ASSERT(o); lex->old_var_list.push_back(o, thd->mem_root); } lex->reset_arena_for_set_stmt(&backup); if (lex->old_var_list.is_empty()) lex->free_arena_for_set_stmt(); if (thd->is_error() || sql_set_variables(thd, &lex->stmt_var_list, false)) { if (!thd->is_error()) my_error(ER_WRONG_ARGUMENTS, MYF(0), "SET"); lex->restore_set_statement_var(); return true; } /* The value of last_insert_id is remembered in THD to be written to binlog when it's used *the first time* in the statement. But SET STATEMENT must read the old value of last_insert_id to be able to restore it at the end. This should not count at "reading of last_insert_id" and should not remember last_insert_id for binlog. That is, it should clear stmt_depends_on_first_successful_insert_id_in_prev_stmt flag. */ if (!thd->in_sub_stmt) { thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; } } return false; }
0
[]
server
ba4927e520190bbad763bb5260ae154f29a61231
231,583,606,422,985,330,000,000,000,000,000,000,000
128
MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ... Window Functions code tries to minimize the number of times it needs to sort the select's resultset by finding "compatible" OVER (PARTITION BY ... ORDER BY ...) clauses. This employs compare_order_elements(). That function assumed that the order expressions are Item_field-derived objects (that refer to a temp.table). But this is not always the case: one can construct queries order expressions are arbitrary item expressions. Add handling for such expressions: sort them according to the window specification they appeared in. This means we cannot detect that two compatible PARTITION BY clauses that use expressions can share the sorting step. But at least we won't crash.
bool AuthorizationSessionImpl::isAuthorizedForActionsOnNamespace(const NamespaceString& ns, const ActionSet& actions) { return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), actions)); }
0
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
166,975,117,887,451,090,000,000,000,000,000,000,000
4
SERVER-38984 Validate unique User ID on UserCache hit
Symbol* Binary::add_local_symbol(uint64_t address, const std::string& name) { Symbol* symbol = nullptr; auto sym = std::make_unique<Symbol>(); sym->category_ = Symbol::CATEGORY::LOCAL; sym->origin_ = SYMBOL_ORIGINS::SYM_ORIGIN_LC_SYMTAB; sym->numberof_sections_ = 0; sym->description_ = static_cast<uint16_t>(SYMBOL_DESCRIPTIONS::N_NO_DEAD_STRIP); sym->value(address); sym->name(name); symbol = sym.get(); symbols_.push_back(std::move(sym)); return symbol; }
0
[ "CWE-703" ]
LIEF
7acf0bc4224081d4f425fcc8b2e361b95291d878
272,792,758,769,406,330,000,000,000,000,000,000,000
15
Resolve #764
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length) { int frg_cnt = 0; skb_frag_t *frag = NULL; struct page *page = NULL; int copy, left; int offset = 0; int ret; do { /* Return error if we don't have space for new frag */ frg_cnt = skb_shinfo(skb)->nr_frags; if (frg_cnt >= MAX_SKB_FRAGS) return -EFAULT; /* allocate a new page for next frag */ page = alloc_pages(sk->sk_allocation, 0); /* If alloc_page fails just return failure and caller will * free previous allocated pages by doing kfree_skb() */ if (page == NULL) return -ENOMEM; /* initialize the next frag */ sk->sk_sndmsg_page = page; sk->sk_sndmsg_off = 0; skb_fill_page_desc(skb, frg_cnt, page, 0, 0); skb->truesize += PAGE_SIZE; atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); /* get the new initialized frag */ frg_cnt = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; /* copy the user data to page */ left = PAGE_SIZE - frag->page_offset; copy = (length > left)? left : length; ret = getfrag(from, (page_address(frag->page) + frag->page_offset + frag->size), offset, copy, 0, skb); if (ret < 0) return -EFAULT; /* copy was successful so update the size parameters */ sk->sk_sndmsg_off += copy; frag->size += copy; skb->len += copy; skb->data_len += copy; offset += copy; length -= copy; } while (length > 0); return 0; }
0
[]
linux
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
272,388,274,174,741,530,000,000,000,000,000,000,000
60
[IPv4/IPv6]: UFO Scatter-gather approach Attached is kernel patch for UDP Fragmentation Offload (UFO) feature. 1. This patch incorporate the review comments by Jeff Garzik. 2. Renamed USO as UFO (UDP Fragmentation Offload) 3. udp sendfile support with UFO This patches uses scatter-gather feature of skb to generate large UDP datagram. Below is a "how-to" on changes required in network device driver to use the UFO interface. UDP Fragmentation Offload (UFO) Interface: ------------------------------------------- UFO is a feature wherein the Linux kernel network stack will offload the IP fragmentation functionality of large UDP datagram to hardware. This will reduce the overhead of stack in fragmenting the large UDP datagram to MTU sized packets 1) Drivers indicate their capability of UFO using dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG NETIF_F_HW_CSUM is required for UFO over ipv6. 2) UFO packet will be submitted for transmission using driver xmit routine. UFO packet will have a non-zero value for "skb_shinfo(skb)->ufo_size" skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP fragment going out of the adapter after IP fragmentation by hardware. skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[] contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW indicating that hardware has to do checksum calculation. Hardware should compute the UDP checksum of complete datagram and also ip header checksum of each fragmented IP packet. For IPV6 the UFO provides the fragment identification-id in skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating IPv6 fragments. Signed-off-by: Ananda Raju <[email protected]> Signed-off-by: Rusty Russell <[email protected]> (forwarded) Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
TEST_F(QuicServerTransportTest, SwitchServerCidsNoOtherIds) { auto& conn = server->getNonConstConn(); EXPECT_EQ(conn.retireAndSwitchPeerConnectionIds(), false); EXPECT_EQ(conn.pendingEvents.frames.size(), 0); EXPECT_EQ(conn.peerConnectionIds.size(), 1); }
0
[ "CWE-617", "CWE-703" ]
mvfst
a67083ff4b8dcbb7ee2839da6338032030d712b0
24,955,416,925,691,153,000,000,000,000,000,000,000
7
Close connection if we derive an extra 1-rtt write cipher Summary: Fixes CVE-2021-24029 Reviewed By: mjoras, lnicco Differential Revision: D26613890 fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
TEST(RouterFilterUtilityTest, SetTimeoutHeaders) { { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(0); FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, false); EXPECT_EQ("200", headers.get_( "x-envoy-expected-rq-timeout-ms")); // No per try configured, use global timeout } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(0); FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, false); EXPECT_EQ("50", headers.get_("x-envoy-expected-rq-timeout-ms")); // Remains of global timeout } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, false); EXPECT_EQ("150", headers.get_("x-envoy-expected-rq-timeout-ms")); // Per try timeout } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(25, timeout, route, headers, true, false, false); EXPECT_EQ("150", headers.get_("x-envoy-expected-rq-timeout-ms")); // Per try timeout } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, false); EXPECT_EQ("50", headers.get_("x-envoy-expected-rq-timeout-ms")); // Remains of global timeout } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(0); FilterUtility::setTimeoutHeaders(300, timeout, route, headers, true, false, false); EXPECT_EQ("1", headers.get_("x-envoy-expected-rq-timeout-ms")); // Over time } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, true); EXPECT_EQ("200", headers.get_("x-envoy-expected-rq-timeout-ms")); // Global timeout as hedged } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(25, timeout, route, headers, true, false, true); EXPECT_EQ("175", headers.get_( "x-envoy-expected-rq-timeout-ms")); // Remains of global timeout as hedged } { NiceMock<MockRouteEntry> route; Http::TestRequestHeaderMapImpl headers; FilterUtility::TimeoutData timeout; timeout.global_timeout_ = std::chrono::milliseconds(200); timeout.per_try_timeout_ = std::chrono::milliseconds(150); FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, true); EXPECT_EQ("50", headers.get_( "x-envoy-expected-rq-timeout-ms")); // Remains of global timeout as hedged } }
0
[ "CWE-703" ]
envoy
5bf9b0f1e7f247a4eee7180849cb0823926f7fff
255,914,644,773,912,400,000,000,000,000,000,000,000
96
[1.21] CVE-2022-21655 Signed-off-by: Otto van der Schaaf <[email protected]>
static NTSTATUS cli_connect_sock_recv(struct tevent_req *req, int *pfd, uint16_t *pport) { struct cli_connect_sock_state *state = tevent_req_data( req, struct cli_connect_sock_state); NTSTATUS status; if (tevent_req_is_nterror(req, &status)) { return status; } *pfd = state->fd; *pport = state->port; return NT_STATUS_OK; }
0
[ "CWE-94" ]
samba
94295b7aa22d2544af5323bca70d3dcb97fd7c64
276,811,201,469,890,560,000,000,000,000,000,000,000
14
CVE-2016-2019: s3:libsmb: add comment regarding smbXcli_session_is_guest() with mandatory signing BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860 Signed-off-by: Stefan Metzmacher <[email protected]>
Error Box_url::parse(BitstreamRange& range) { parse_full_box_header(range); m_location = range.read_string(); return range.get_error(); }
0
[ "CWE-703" ]
libheif
2710c930918609caaf0a664e9c7bc3dce05d5b58
36,291,652,175,160,833,000,000,000,000,000,000,000
8
force fraction to a limited resolution to finally solve those pesky numerical edge cases
static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx) { struct pipe_scissor_state *ss; GLint y; GLuint idx; unsigned mask = sub_ctx->scissor_state_dirty; while (mask) { idx = u_bit_scan(&mask); if (idx >= PIPE_MAX_VIEWPORTS) { vrend_report_buffer_error(sub_ctx->parent, 0); break; } ss = &sub_ctx->ss[idx]; y = ss->miny; if (idx > 0 && has_feature(feat_viewport_array)) glScissorIndexed(idx, ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny); else glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny); } sub_ctx->scissor_state_dirty = 0; }
0
[ "CWE-787" ]
virglrenderer
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
41,699,862,078,099,783,000,000,000,000,000,000,000
23
vrend: Add test to resource OOB write and fix it v2: Also check that no depth != 1 has been send when none is due Closes: #250 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Chia-I Wu <[email protected]>
bool Item_subselect::fix_fields(THD *thd_param, Item **ref) { char const *save_where= thd_param->where; uint8 uncacheable; bool res; thd= thd_param; DBUG_ASSERT(unit->thd == thd); status_var_increment(thd_param->status_var.feature_subquery); DBUG_ASSERT(fixed == 0); engine->set_thd((thd= thd_param)); if (!done_first_fix_fields) { done_first_fix_fields= TRUE; inside_first_fix_fields= TRUE; upper_refs.empty(); /* psergey-todo: remove _first_fix_fields calls, we need changes on every execution */ } eliminated= FALSE; parent_select= thd_param->lex->current_select; if (check_stack_overrun(thd, STACK_MIN_SIZE, (uchar*)&res)) return TRUE; if (!(res= engine->prepare(thd))) { // all transformation is done (used by prepared statements) changed= 1; inside_first_fix_fields= FALSE; /* Substitute the current item with an Item_in_optimizer that was created by Item_in_subselect::select_in_like_transformer and call fix_fields for the substituted item which in turn calls engine->prepare for the subquery predicate. */ if (substitution) { /* If the top item of the WHERE/HAVING condition changed, set correct WHERE/HAVING for PS. */ if (unit->outer_select()->where == (*ref)) unit->outer_select()->where= substitution; else if (unit->outer_select()->having == (*ref)) unit->outer_select()->having= substitution; (*ref)= substitution; substitution->name= name; substitution->name_length= name_length; if (have_to_be_excluded) engine->exclude(); substitution= 0; thd->where= "checking transformed subquery"; if (!(*ref)->fixed) res= (*ref)->fix_fields(thd, ref); goto end; } // Is it one field subselect? if (engine->cols() > max_columns) { my_error(ER_OPERAND_COLUMNS, MYF(0), 1); res= TRUE; goto end; } if (fix_length_and_dec()) { res= TRUE; goto end; } } else goto end; if ((uncacheable= engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || with_recursive_reference) { const_item_cache= 0; if (uncacheable & UNCACHEABLE_RAND) used_tables_cache|= RAND_TABLE_BIT; } fixed= 1; end: done_first_fix_fields= FALSE; inside_first_fix_fields= FALSE; thd->where= save_where; return res; }
0
[ "CWE-89" ]
server
3c209bfc040ddfc41ece8357d772547432353fd2
193,748,442,790,990,500,000,000,000,000,000,000,000
98
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause When single-row subquery fails with "Subquery reutrns more than 1 row" error, it will raise an error and return NULL. On the other hand, Item_singlerow_subselect sets item->maybe_null=0 for table-less subqueries like "(SELECT not_null_value)" (*) This discrepancy (item with maybe_null=0 returning NULL) causes the code in Type_handler_decimal_result::make_sort_key_part() to crash. Fixed this by allowing inference (*) only when the subquery is NOT a UNION.
plperl_inline_callback(void *arg) { errcontext("PL/Perl anonymous code block"); }
0
[ "CWE-264" ]
postgres
537cbd35c893e67a63c59bc636c3e888bd228bc7
153,308,397,904,276,460,000,000,000,000,000,000,000
4
Prevent privilege escalation in explicit calls to PL validators. The primary role of PL validators is to be called implicitly during CREATE FUNCTION, but they are also normal functions that a user can call explicitly. Add a permissions check to each validator to ensure that a user cannot use explicit validator calls to achieve things he could not otherwise achieve. Back-patch to 8.4 (all supported versions). Non-core procedural language extensions ought to make the same two-line change to their own validators. Andres Freund, reviewed by Tom Lane and Noah Misch. Security: CVE-2014-0061
void CoreUserInputHandler::handleMsg(const BufferInfo &bufferInfo, const QString &msg) { Q_UNUSED(bufferInfo); if (!msg.contains(' ')) return; QString target = msg.section(' ', 0, 0); QByteArray encMsg = userEncode(target, msg.section(' ', 1)); #ifdef HAVE_QCA2 putPrivmsg(serverEncode(target), encMsg, network()->cipher(target)); #else putPrivmsg(serverEncode(target), encMsg); #endif }
1
[ "CWE-399" ]
quassel
b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8
96,090,654,480,204,370,000,000,000,000,000,000,000
15
Improve the message-splitting algorithm for PRIVMSG and CTCP This introduces a new message splitting algorithm based on QTextBoundaryFinder. It works by first starting with the entire message to be sent, encoding it, and checking to see if it is over the maximum message length. If it is, it uses QTBF to find the word boundary most immediately preceding the maximum length. If no suitable boundary can be found, it falls back to searching for grapheme boundaries. It repeats this process until the entire message has been sent. Unlike what it replaces, the new splitting code is not recursive and cannot cause stack overflows. Additionally, if it is unable to split a string, it will give up gracefully and not crash the core or cause a thread to run away. This patch fixes two bugs. The first is garbage characters caused by accidentally splitting the string in the middle of a multibyte character. Since the new code splits at a character level instead of a byte level, this will no longer be an issue. The second is the core crash caused by sending an overlength CTCP query ("/me") containing only multibyte characters. This bug was caused by the old CTCP splitter using the byte index from lastParamOverrun() as a character index for a QString.
init_decompression(struct archive_read *a, struct _7zip *zip, const struct _7z_coder *coder1, const struct _7z_coder *coder2) { int r; zip->codec = coder1->codec; zip->codec2 = -1; switch (zip->codec) { case _7Z_COPY: case _7Z_BZ2: case _7Z_DEFLATE: case _7Z_PPMD: if (coder2 != NULL) { if (coder2->codec != _7Z_X86 && coder2->codec != _7Z_X86_BCJ2) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Unsupported filter %lx for %lx", coder2->codec, coder1->codec); return (ARCHIVE_FAILED); } zip->codec2 = coder2->codec; zip->bcj_state = 0; if (coder2->codec == _7Z_X86) x86_Init(zip); } break; default: break; } switch (zip->codec) { case _7Z_COPY: break; case _7Z_LZMA: case _7Z_LZMA2: #ifdef HAVE_LZMA_H #if LZMA_VERSION_MAJOR >= 5 /* Effectively disable the limiter. */ #define LZMA_MEMLIMIT UINT64_MAX #else /* NOTE: This needs to check memory size which running system has. */ #define LZMA_MEMLIMIT (1U << 30) #endif { lzma_options_delta delta_opt; lzma_filter filters[LZMA_FILTERS_MAX], *ff; int fi = 0; if (zip->lzstream_valid) { lzma_end(&(zip->lzstream)); zip->lzstream_valid = 0; } /* * NOTE: liblzma incompletely handle the BCJ+LZMA compressed * data made by 7-Zip because 7-Zip does not add End-Of- * Payload Marker(EOPM) at the end of LZMA compressed data, * and so liblzma cannot know the end of the compressed data * without EOPM. So consequently liblzma will not return last * three or four bytes of uncompressed data because * LZMA_FILTER_X86 filter does not handle input data if its * data size is less than five bytes. If liblzma detect EOPM * or know the uncompressed data size, liblzma will flush out * the remaining that three or four bytes of uncompressed * data. That is why we have to use our converting program * for BCJ+LZMA. If we were able to tell the uncompressed * size to liblzma when using lzma_raw_decoder() liblzma * could correctly deal with BCJ+LZMA. But unfortunately * there is no way to do that. * Discussion about this can be found at XZ Utils forum. */ if (coder2 != NULL) { zip->codec2 = coder2->codec; filters[fi].options = NULL; switch (zip->codec2) { case _7Z_X86: if (zip->codec == _7Z_LZMA2) { filters[fi].id = LZMA_FILTER_X86; fi++; } else /* Use our filter. */ x86_Init(zip); break; case _7Z_X86_BCJ2: /* Use our filter. */ zip->bcj_state = 0; break; case _7Z_DELTA: filters[fi].id = LZMA_FILTER_DELTA; memset(&delta_opt, 0, sizeof(delta_opt)); delta_opt.type = LZMA_DELTA_TYPE_BYTE; delta_opt.dist = 1; filters[fi].options = &delta_opt; fi++; break; /* Following filters have not been tested yet. */ case _7Z_POWERPC: filters[fi].id = LZMA_FILTER_POWERPC; fi++; break; case _7Z_IA64: filters[fi].id = LZMA_FILTER_IA64; fi++; break; case _7Z_ARM: filters[fi].id = LZMA_FILTER_ARM; fi++; break; case _7Z_ARMTHUMB: filters[fi].id = LZMA_FILTER_ARMTHUMB; fi++; break; case _7Z_SPARC: filters[fi].id = LZMA_FILTER_SPARC; fi++; break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Unexpected codec ID: %lX", zip->codec2); return (ARCHIVE_FAILED); } } if (zip->codec == _7Z_LZMA2) filters[fi].id = LZMA_FILTER_LZMA2; else filters[fi].id = LZMA_FILTER_LZMA1; filters[fi].options = NULL; ff = &filters[fi]; r = lzma_properties_decode(&filters[fi], NULL, coder1->properties, (size_t)coder1->propertiesSize); if (r != LZMA_OK) { set_error(a, r); return (ARCHIVE_FAILED); } fi++; filters[fi].id = LZMA_VLI_UNKNOWN; filters[fi].options = NULL; r = lzma_raw_decoder(&(zip->lzstream), filters); free(ff->options); if (r != LZMA_OK) { set_error(a, r); return (ARCHIVE_FAILED); } zip->lzstream_valid = 1; zip->lzstream.total_in = 0; zip->lzstream.total_out = 0; break; } #else archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "LZMA codec is unsupported"); return (ARCHIVE_FAILED); #endif case _7Z_BZ2: #if defined(HAVE_BZLIB_H) && defined(BZ_CONFIG_ERROR) if (zip->bzstream_valid) { BZ2_bzDecompressEnd(&(zip->bzstream)); zip->bzstream_valid = 0; } r = BZ2_bzDecompressInit(&(zip->bzstream), 0, 0); if (r == BZ_MEM_ERROR) r = BZ2_bzDecompressInit(&(zip->bzstream), 0, 1); if (r != BZ_OK) { int err = ARCHIVE_ERRNO_MISC; const char *detail = NULL; switch (r) { case BZ_PARAM_ERROR: detail = "invalid setup parameter"; break; case BZ_MEM_ERROR: err = ENOMEM; detail = "out of memory"; break; case BZ_CONFIG_ERROR: detail = "mis-compiled library"; break; } archive_set_error(&a->archive, err, "Internal error initializing decompressor: %s", detail != NULL ? detail : "??"); zip->bzstream_valid = 0; return (ARCHIVE_FAILED); } zip->bzstream_valid = 1; zip->bzstream.total_in_lo32 = 0; zip->bzstream.total_in_hi32 = 0; zip->bzstream.total_out_lo32 = 0; zip->bzstream.total_out_hi32 = 0; break; #else archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "BZ2 codec is unsupported"); return (ARCHIVE_FAILED); #endif case _7Z_DEFLATE: #ifdef HAVE_ZLIB_H if (zip->stream_valid) r = inflateReset(&(zip->stream)); else r = inflateInit2(&(zip->stream), -15 /* Don't check for zlib header */); if (r != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Couldn't initialize zlib stream."); return (ARCHIVE_FAILED); } zip->stream_valid = 1; zip->stream.total_in = 0; zip->stream.total_out = 0; break; #else archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "DEFLATE codec is unsupported"); return (ARCHIVE_FAILED); #endif case _7Z_PPMD: { unsigned order; uint32_t msize; if (zip->ppmd7_valid) { __archive_ppmd7_functions.Ppmd7_Free( &zip->ppmd7_context); zip->ppmd7_valid = 0; } if (coder1->propertiesSize < 5) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Malformed PPMd parameter"); return (ARCHIVE_FAILED); } order = coder1->properties[0]; msize = archive_le32dec(&(coder1->properties[1])); if (order < PPMD7_MIN_ORDER || order > PPMD7_MAX_ORDER || msize < PPMD7_MIN_MEM_SIZE || msize > PPMD7_MAX_MEM_SIZE) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Malformed PPMd parameter"); return (ARCHIVE_FAILED); } __archive_ppmd7_functions.Ppmd7_Construct(&zip->ppmd7_context); r = __archive_ppmd7_functions.Ppmd7_Alloc( &zip->ppmd7_context, msize); if (r == 0) { archive_set_error(&a->archive, ENOMEM, "Coludn't allocate memory for PPMd"); return (ARCHIVE_FATAL); } __archive_ppmd7_functions.Ppmd7_Init( &zip->ppmd7_context, order); __archive_ppmd7_functions.Ppmd7z_RangeDec_CreateVTable( &zip->range_dec); zip->ppmd7_valid = 1; zip->ppmd7_stat = 0; zip->ppstream.overconsumed = 0; zip->ppstream.total_in = 0; zip->ppstream.total_out = 0; break; } case _7Z_X86: case _7Z_X86_BCJ2: case _7Z_POWERPC: case _7Z_IA64: case _7Z_ARM: case _7Z_ARMTHUMB: case _7Z_SPARC: case _7Z_DELTA: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Unexpected codec ID: %lX", zip->codec); return (ARCHIVE_FAILED); case _7Z_CRYPTO_MAIN_ZIP: case _7Z_CRYPTO_RAR_29: case _7Z_CRYPTO_AES_256_SHA_256: if (a->entry) { archive_entry_set_is_metadata_encrypted(a->entry, 1); archive_entry_set_is_data_encrypted(a->entry, 1); zip->has_encrypted_entries = 1; } archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Crypto codec not supported yet (ID: 0x%lX)", zip->codec); return (ARCHIVE_FAILED); default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Unknown codec ID: %lX", zip->codec); return (ARCHIVE_FAILED); } return (ARCHIVE_OK); }
0
[ "CWE-125" ]
libarchive
65a23f5dbee4497064e9bb467f81138a62b0dae1
6,438,359,954,045,534,000,000,000,000,000,000,000
294
7zip: fix crash when parsing certain archives Fuzzing with CRCs disabled revealed that a call to get_uncompressed_data() would sometimes fail to return at least 'minimum' bytes. This can cause the crc32() invocation in header_bytes to read off into invalid memory. A specially crafted archive can use this to cause a crash. An ASAN trace is below, but ASAN is not required - an uninstrumented binary will also crash. ==7719==ERROR: AddressSanitizer: SEGV on unknown address 0x631000040000 (pc 0x7fbdb3b3ec1d bp 0x7ffe77a51310 sp 0x7ffe77a51150 T0) ==7719==The signal is caused by a READ memory access. #0 0x7fbdb3b3ec1c in crc32_z (/lib/x86_64-linux-gnu/libz.so.1+0x2c1c) #1 0x84f5eb in header_bytes (/tmp/libarchive/bsdtar+0x84f5eb) #2 0x856156 in read_Header (/tmp/libarchive/bsdtar+0x856156) #3 0x84e134 in slurp_central_directory (/tmp/libarchive/bsdtar+0x84e134) #4 0x849690 in archive_read_format_7zip_read_header (/tmp/libarchive/bsdtar+0x849690) #5 0x5713b7 in _archive_read_next_header2 (/tmp/libarchive/bsdtar+0x5713b7) #6 0x570e63 in _archive_read_next_header (/tmp/libarchive/bsdtar+0x570e63) #7 0x6f08bd in archive_read_next_header (/tmp/libarchive/bsdtar+0x6f08bd) #8 0x52373f in read_archive (/tmp/libarchive/bsdtar+0x52373f) #9 0x5257be in tar_mode_x (/tmp/libarchive/bsdtar+0x5257be) #10 0x51daeb in main (/tmp/libarchive/bsdtar+0x51daeb) #11 0x7fbdb27cab96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310 #12 0x41dd09 in _start (/tmp/libarchive/bsdtar+0x41dd09) This was primarly done with afl and FairFuzz. Some early corpus entries may have been generated by qsym.
ex_insn_addr(const struct exception_table_entry *x) { return (unsigned long)&x->insn + x->insn; }
0
[ "CWE-264" ]
linux
548acf19234dbda5a52d5a8e7e205af46e9da840
154,373,708,503,100,590,000,000,000,000,000,000,000
4
x86/mm: Expand the exception table logic to allow new handling options Huge amounts of help from Andy Lutomirski and Borislav Petkov to produce this. Andy provided the inspiration to add classes to the exception table with a clever bit-squeezing trick, Boris pointed out how much cleaner it would all be if we just had a new field. Linus Torvalds blessed the expansion with: ' I'd rather not be clever in order to save just a tiny amount of space in the exception table, which isn't really criticial for anybody. ' The third field is another relative function pointer, this one to a handler that executes the actions. We start out with three handlers: 1: Legacy - just jumps the to fixup IP 2: Fault - provide the trap number in %ax to the fixup code 3: Cleaned up legacy for the uaccess error hack Signed-off-by: Tony Luck <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com Signed-off-by: Ingo Molnar <[email protected]>
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) { int max_sdu; if (!tp->traffic_class) return 0; switch (aal) { case ATM_AAL0: max_sdu = ATM_CELL_SIZE-1; break; case ATM_AAL34: max_sdu = ATM_MAX_AAL34_PDU; break; default: pr_warning("AAL problems ... (%d)\n", aal); /* fall through */ case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; } if (!tp->max_sdu) tp->max_sdu = max_sdu; else if (tp->max_sdu > max_sdu) return -EINVAL; if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; return 0; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
2,957,859,293,233,192,000,000,000,000,000,000,000
27
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
free_deregistered_data (gpointer data, gpointer user_data _U_) { g_free (data); }
0
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
60,987,378,938,341,920,000,000,000,000,000,000,000
4
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
void cgit_vprint_error(const char *fmt, va_list ap) { va_list cp; html("<div class='error'>"); va_copy(cp, ap); html_vtxtf(fmt, cp); va_end(cp); html("</div>\n"); }
0
[]
cgit
513b3863d999f91b47d7e9f26710390db55f9463
155,062,210,170,017,290,000,000,000,000,000,000,000
9
ui-shared: prevent malicious filename from injecting headers
get_external_key_retries(struct sc_card *card, unsigned char kid, unsigned char *retries) { int r; struct sc_apdu apdu; unsigned char random[16] = { 0 }; r = sc_get_challenge(card, random, 8); LOG_TEST_RET(card->ctx, r, "get challenge get_external_key_retries failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x82, 0x01, 0x80 | kid); apdu.resp = NULL; apdu.resplen = 0; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU get_external_key_retries failed"); if (retries && ((0x63 == (apdu.sw1 & 0xff)) && (0xC0 == (apdu.sw2 & 0xf0)))) { *retries = (apdu.sw2 & 0x0f); r = SC_SUCCESS; } else { LOG_TEST_RET(card->ctx, r, "get_external_key_retries failed"); r = SC_ERROR_CARD_CMD_FAILED; } return r; }
0
[ "CWE-415", "CWE-119" ]
OpenSC
360e95d45ac4123255a4c796db96337f332160ad
70,085,580,604,630,860,000,000,000,000,000,000,000
27
fixed out of bounds writes Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting the problems.
window_select_shape_events (GSWindow *window) { #ifdef HAVE_SHAPE_EXT unsigned long events; int shape_error_base; gdk_error_trap_push (); if (XShapeQueryExtension (GDK_DISPLAY_XDISPLAY (gdk_display_get_default ()), &window->priv->shape_event_base, &shape_error_base)) { events = ShapeNotifyMask; XShapeSelectInput (GDK_DISPLAY_XDISPLAY (gdk_display_get_default ()), GDK_WINDOW_XID (gtk_widget_get_window (GTK_WIDGET (window))), events); } gdk_error_trap_pop_ignored (); #endif }
0
[ "CWE-284" ]
cinnamon-screensaver
da7af55f1fa966c52e15cc288d4f8928eca8cc9f
78,259,456,656,284,620,000,000,000,000,000,000,000
16
Workaround gtk3 bug, don't allow GtkWindow to handle popup_menu.
ldp_pdu_print(netdissect_options *ndo, register const u_char *pptr) { const struct ldp_common_header *ldp_com_header; const struct ldp_msg_header *ldp_msg_header; const u_char *tptr,*msg_tptr; u_short tlen; u_short pdu_len,msg_len,msg_type,msg_tlen; int hexdump,processed; ldp_com_header = (const struct ldp_common_header *)pptr; ND_TCHECK(*ldp_com_header); /* * Sanity checking of the header. */ if (EXTRACT_16BITS(&ldp_com_header->version) != LDP_VERSION) { ND_PRINT((ndo, "%sLDP version %u packet not supported", (ndo->ndo_vflag < 1) ? "" : "\n\t", EXTRACT_16BITS(&ldp_com_header->version))); return 0; } pdu_len = EXTRACT_16BITS(&ldp_com_header->pdu_length); if (pdu_len < sizeof(const struct ldp_common_header)-4) { /* length too short */ ND_PRINT((ndo, "%sLDP, pdu-length: %u (too short, < %u)", (ndo->ndo_vflag < 1) ? "" : "\n\t", pdu_len, (u_int)(sizeof(const struct ldp_common_header)-4))); return 0; } /* print the LSR-ID, label-space & length */ ND_PRINT((ndo, "%sLDP, Label-Space-ID: %s:%u, pdu-length: %u", (ndo->ndo_vflag < 1) ? "" : "\n\t", ipaddr_string(ndo, &ldp_com_header->lsr_id), EXTRACT_16BITS(&ldp_com_header->label_space), pdu_len)); /* bail out if non-verbose */ if (ndo->ndo_vflag < 1) return 0; /* ok they seem to want to know everything - lets fully decode it */ tptr = pptr + sizeof(const struct ldp_common_header); tlen = pdu_len - (sizeof(const struct ldp_common_header)-4); /* Type & Length fields not included */ while(tlen>0) { /* did we capture enough for fully decoding the msg header ? */ ND_TCHECK2(*tptr, sizeof(struct ldp_msg_header)); ldp_msg_header = (const struct ldp_msg_header *)tptr; msg_len=EXTRACT_16BITS(ldp_msg_header->length); msg_type=LDP_MASK_MSG_TYPE(EXTRACT_16BITS(ldp_msg_header->type)); if (msg_len < sizeof(struct ldp_msg_header)-4) { /* length too short */ /* FIXME vendor private / experimental check */ ND_PRINT((ndo, "\n\t %s Message (0x%04x), length: %u (too short, < %u)", tok2str(ldp_msg_values, "Unknown", msg_type), msg_type, msg_len, (u_int)(sizeof(struct ldp_msg_header)-4))); return 0; } /* FIXME vendor private / experimental check */ ND_PRINT((ndo, "\n\t %s Message (0x%04x), length: %u, Message ID: 0x%08x, Flags: [%s if unknown]", tok2str(ldp_msg_values, "Unknown", msg_type), msg_type, msg_len, EXTRACT_32BITS(&ldp_msg_header->id), LDP_MASK_U_BIT(EXTRACT_16BITS(&ldp_msg_header->type)) ? "continue processing" : "ignore")); msg_tptr=tptr+sizeof(struct ldp_msg_header); msg_tlen=msg_len-(sizeof(struct ldp_msg_header)-4); /* Type & Length fields not included */ /* did we capture enough for fully decoding the message ? */ ND_TCHECK2(*tptr, msg_len); hexdump=FALSE; switch(msg_type) { case LDP_MSG_NOTIF: case LDP_MSG_HELLO: case LDP_MSG_INIT: case LDP_MSG_KEEPALIVE: case LDP_MSG_ADDRESS: case LDP_MSG_LABEL_MAPPING: case LDP_MSG_ADDRESS_WITHDRAW: case LDP_MSG_LABEL_WITHDRAW: while(msg_tlen >= 4) { processed = ldp_tlv_print(ndo, msg_tptr, msg_tlen); if (processed == 0) break; msg_tlen-=processed; msg_tptr+=processed; } break; /* * FIXME those are the defined messages that lack a decoder * you are welcome to contribute code ;-) */ case LDP_MSG_LABEL_REQUEST: case LDP_MSG_LABEL_RELEASE: case LDP_MSG_LABEL_ABORT_REQUEST: default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, msg_tptr, "\n\t ", msg_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo, tptr+sizeof(struct ldp_msg_header), "\n\t ", msg_len); tptr += msg_len+4; tlen -= msg_len+4; } return pdu_len+4; trunc: ND_PRINT((ndo, "\n\t\t packet exceeded snapshot")); return 0; }
1
[ "CWE-125" ]
tcpdump
aa5c6b710dfd8020d2c908d6b3bd41f1da719b3b
88,213,586,602,458,170,000,000,000,000,000,000,000
132
(for 4.9.3) CVE-2018-14461/LDP: Fix a bounds check In ldp_tlv_print(), the FT Session TLV length must be 12, not 8 (RFC3479) This fixes a buffer over-read discovered by Konrad Rieck and Bhargava Shastry. Add a test using the capture file supplied by the reporter(s). Moreover: Add and use tstr[]. Add a comment.
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) { /* We have to repoint aux->prog to self, as we don't * know whether fp here is the clone or the original. */ fp->aux->prog = fp; bpf_prog_clone_free(fp_other); }
0
[ "CWE-120" ]
linux
050fad7c4534c13c8eb1d9c2ba66012e014773cb
10,502,584,426,034,164,000,000,000,000,000,000,000
8
bpf: fix truncated jump targets on heavy expansions Recently during testing, I ran into the following panic: [ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP [ 207.901637] Modules linked in: binfmt_misc [...] [ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7 [ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017 [ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO) [ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 207.992603] lr : 0xffff000000bdb754 [ 207.996080] sp : ffff000013703ca0 [ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001 [ 208.004688] x27: 0000000000000001 x26: 0000000000000000 [ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00 [ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000 [ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a [ 208.025903] x19: ffff000009578000 x18: 0000000000000a03 [ 208.031206] x17: 0000000000000000 x16: 0000000000000000 [ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000 [ 208.041813] x13: 0000000000000000 x12: 0000000000000000 [ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18 [ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000 [ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000 [ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6 [ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500 [ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08 [ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974) [ 208.086235] Call trace: [ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 208.093713] 0xffff000000bdb754 [ 208.096845] bpf_test_run+0x78/0xf8 [ 208.100324] bpf_prog_test_run_skb+0x148/0x230 [ 208.104758] sys_bpf+0x314/0x1198 [ 208.108064] el0_svc_naked+0x30/0x34 [ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680) [ 208.117717] ---[ end trace 263cb8a59b5bf29f ]--- The program itself which caused this had a long jump over the whole instruction sequence where all of the inner instructions required heavy expansions into multiple BPF instructions. Additionally, I also had BPF hardening enabled which requires once more rewrites of all constant values in order to blind them. Each time we rewrite insns, bpf_adj_branches() would need to potentially adjust branch targets which cross the patchlet boundary to accommodate for the additional delta. Eventually that lead to the case where the target offset could not fit into insn->off's upper 0x7fff limit anymore where then offset wraps around becoming negative (in s16 universe), or vice versa depending on the jump direction. Therefore it becomes necessary to detect and reject any such occasions in a generic way for native eBPF and cBPF to eBPF migrations. For the latter we can simply check bounds in the bpf_convert_filter()'s BPF_EMIT_JMP helper macro and bail out once we surpass limits. The bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case of subsequent hardening) is a bit more complex in that we need to detect such truncations before hitting the bpf_prog_realloc(). Thus the latter is split into an extra pass to probe problematic offsets on the original program in order to fail early. With that in place and carefully tested I no longer hit the panic and the rewrites are rejected properly. The above example panic I've seen on bpf-next, though the issue itself is generic in that a guard against this issue in bpf seems more appropriate in this case. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Martin KaFai Lau <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
bit_and(PG_FUNCTION_ARGS) { VarBit *arg1 = PG_GETARG_VARBIT_P(0); VarBit *arg2 = PG_GETARG_VARBIT_P(1); VarBit *result; int len, bitlen1, bitlen2, i; bits8 *p1, *p2, *r; bitlen1 = VARBITLEN(arg1); bitlen2 = VARBITLEN(arg2); if (bitlen1 != bitlen2) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH), errmsg("cannot AND bit strings of different sizes"))); len = VARSIZE(arg1); result = (VarBit *) palloc(len); SET_VARSIZE(result, len); VARBITLEN(result) = bitlen1; p1 = VARBITS(arg1); p2 = VARBITS(arg2); r = VARBITS(result); for (i = 0; i < VARBITBYTES(arg1); i++) *r++ = *p1++ & *p2++; /* Padding is not needed as & of 0 pad is 0 */ PG_RETURN_VARBIT_P(result); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
141,113,275,517,656,290,000,000,000,000,000,000,000
35
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
void sendReplyToClient(connection *conn) { client *c = connGetPrivateData(conn); writeToClient(c,1); }
0
[ "CWE-770" ]
redis
5674b0057ff2903d43eaff802017eddf37c360f8
17,075,351,359,346,460,000,000,000,000,000,000,000
4
Prevent unauthenticated client from easily consuming lots of memory (CVE-2021-32675) This change sets a low limit for multibulk and bulk length in the protocol for unauthenticated connections, so that they can't easily cause redis to allocate massive amounts of memory by sending just a few characters on the network. The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
void NumberFormatTest::expectCurrency(NumberFormat& nf, const Locale& locale, double value, const UnicodeString& string) { UErrorCode ec = U_ZERO_ERROR; DecimalFormat& fmt = * (DecimalFormat*) &nf; const UChar DEFAULT_CURR[] = {45/*-*/,0}; UChar curr[4]; u_strcpy(curr, DEFAULT_CURR); if (*locale.getLanguage() != 0) { ucurr_forLocale(locale.getName(), curr, 4, &ec); assertSuccess("ucurr_forLocale", ec); fmt.setCurrency(curr, ec); assertSuccess("DecimalFormat::setCurrency", ec); fmt.setCurrency(curr); //Deprecated variant, for coverage only } UnicodeString s; fmt.format(value, s); s.findAndReplace((UChar32)0x00A0, (UChar32)0x0020); // Default display of the number yields "1234.5599999999999" // instead of "1234.56". Use a formatter to fix this. NumberFormat* f = NumberFormat::createInstance(Locale::getUS(), ec); UnicodeString v; if (U_FAILURE(ec)) { // Oops; bad formatter. Use default op+= display. v = (UnicodeString)"" + value; } else { f->setMaximumFractionDigits(4); f->setGroupingUsed(FALSE); f->format(value, v); } delete f; if (s == string) { logln((UnicodeString)"Ok: " + v + " x " + curr + " => " + prettify(s)); } else { errln((UnicodeString)"FAIL: " + v + " x " + curr + " => " + prettify(s) + ", expected " + prettify(string)); } }
0
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
282,891,844,453,077,300,000,000,000,000,000,000,000
40
ICU-20246 Fixing another integer overflow in number parsing.
splice_stream_with_progress (GInputStream *in, GOutputStream *out, GCancellable *cancellable, GFileProgressCallback progress_callback, gpointer progress_callback_data, GError **error) { int buffer[2] = { -1, -1 }; int buffer_size; gboolean res; goffset total_size; loff_t offset_in; loff_t offset_out; int fd_in, fd_out; fd_in = g_file_descriptor_based_get_fd (G_FILE_DESCRIPTOR_BASED (in)); fd_out = g_file_descriptor_based_get_fd (G_FILE_DESCRIPTOR_BASED (out)); if (!g_unix_open_pipe (buffer, FD_CLOEXEC, error)) return FALSE; #if defined(F_SETPIPE_SZ) && defined(F_GETPIPE_SZ) /* Try a 1MiB buffer for improved throughput. If that fails, use the default * pipe size. See: https://bugzilla.gnome.org/791457 */ buffer_size = fcntl (buffer[1], F_SETPIPE_SZ, 1024 * 1024); if (buffer_size <= 0) { int errsv; buffer_size = fcntl (buffer[1], F_GETPIPE_SZ); errsv = errno; if (buffer_size <= 0) { g_set_error (error, G_IO_ERROR, g_io_error_from_errno (errsv), _("Error splicing file: %s"), g_strerror (errsv)); res = FALSE; goto out; } } #else /* If #F_GETPIPE_SZ isn’t available, assume we’re on Linux < 2.6.35, * but ≥ 2.6.11, meaning the pipe capacity is 64KiB. Ignore the possibility of * running on Linux < 2.6.11 (where the capacity was the system page size, * typically 4KiB) because it’s ancient. See pipe(7). */ buffer_size = 1024 * 64; #endif g_assert (buffer_size > 0); total_size = -1; /* avoid performance impact of querying total size when it's not needed */ if (progress_callback) { struct stat sbuf; if (fstat (fd_in, &sbuf) == 0) total_size = sbuf.st_size; } if (total_size == -1) total_size = 0; offset_in = offset_out = 0; res = FALSE; while (TRUE) { long n_read; long n_written; if (g_cancellable_set_error_if_cancelled (cancellable, error)) break; if (!do_splice (fd_in, &offset_in, buffer[1], NULL, buffer_size, &n_read, error)) break; if (n_read == 0) { res = TRUE; break; } while (n_read > 0) { if (g_cancellable_set_error_if_cancelled (cancellable, error)) goto out; if (!do_splice (buffer[0], NULL, fd_out, &offset_out, n_read, &n_written, error)) goto out; n_read -= n_written; } if (progress_callback) progress_callback (offset_in, total_size, progress_callback_data); } /* Make sure we send full copied size */ if (progress_callback) progress_callback (offset_in, total_size, progress_callback_data); if (!g_close (buffer[0], error)) goto out; buffer[0] = -1; if (!g_close (buffer[1], error)) goto out; buffer[1] = -1; out: if (buffer[0] != -1) (void) g_close (buffer[0], NULL); if (buffer[1] != -1) (void) g_close (buffer[1], NULL); return res; }
0
[ "CWE-362" ]
glib
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
33,244,884,185,994,667,000,000,000,000,000,000,000
114
gfile: Limit access to files when copying file_copy_fallback creates new files with default permissions and set the correct permissions after the operation is finished. This might cause that the files can be accessible by more users during the operation than expected. Use G_FILE_CREATE_PRIVATE for the new files to limit access to those files.
static inline int is_valid_mmap(u64 token) { return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); }
0
[ "CWE-416" ]
linux
3d2a9d642512c21a12d19b9250e7a835dcb41a79
205,089,436,361,385,300,000,000,000,000,000,000,000
4
IB/hfi1: Ensure correct mm is used at all times Two earlier bug fixes have created a security problem in the hfi1 driver. One fix aimed to solve an issue where current->mm was not valid when closing the hfi1 cdev. It attempted to do this by saving a cached value of the current->mm pointer at file open time. This is a problem if another process with access to the FD calls in via write() or ioctl() to pin pages via the hfi driver. The other fix tried to solve a use after free by taking a reference on the mm. To fix this correctly we use the existing cached value of the mm in the mmu notifier. Now we can check in the insert, evict, etc. routines that current->mm matched what the notifier was registered for. If not, then don't allow access. The register of the mmu notifier will save the mm pointer. Since in do_exit() the exit_mm() is called before exit_files(), which would call our close routine a reference is needed on the mm. We rely on the mmgrab done by the registration of the notifier, whereas before it was explicit. The mmu notifier deregistration happens when the user context is torn down, the creation of which triggered the registration. Also of note is we do not do any explicit work to protect the interval tree notifier. It doesn't seem that this is going to be needed since we aren't actually doing anything with current->mm. The interval tree notifier stuff still has a FIXME noted from a previous commit that will be addressed in a follow on patch. Cc: <[email protected]> Fixes: e0cf75deab81 ("IB/hfi1: Fix mm_struct use after free") Fixes: 3faa3d9a308e ("IB/hfi1: Make use of mm consistent") Link: https://lore.kernel.org/r/[email protected] Suggested-by: Jann Horn <[email protected]> Reported-by: Jason Gunthorpe <[email protected]> Reviewed-by: Ira Weiny <[email protected]> Reviewed-by: Mike Marciniszyn <[email protected]> Signed-off-by: Dennis Dalessandro <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; int offset; __wsum csum; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb_checksum_start_offset(skb); csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ segs = skb_segment(skb, features); out: return segs; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
6,206,510,690,073,357,000,000,000,000,000,000,000
41
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
BSONObj operand2() { return BSON("" << 5); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
121,443,907,050,661,980,000,000,000,000,000,000,000
3
SERVER-38070 fix infinite loop in agg expression
void libxsmm_sparse_csc_reader( libxsmm_generated_code* io_generated_code, const char* i_csc_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csc_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_column_idx_id = NULL; unsigned int l_i = 0; l_csc_file_handle = fopen( i_csc_file_in, "r" ); if ( l_csc_file_handle == NULL ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_INPUT ); return; } while (fgets(l_line, l_line_length, l_csc_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { free(*o_row_idx); free(*o_column_idx); free(*o_values); free(l_column_idx_id); *o_row_idx = 0; *o_column_idx = 0; *o_values = 0; fclose( l_csc_file_handle ); /* close mtx file */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_READ_LEN ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC data structure matching mtx file */ *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)(*o_column_count) + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_column_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_column_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_column_idx_id == NULL ) ) { free(*o_row_idx); free(*o_column_idx); free(*o_values); free(l_column_idx_id); *o_row_idx = 0; *o_column_idx = 0; *o_values = 0; fclose(l_csc_file_handle); /* close mtx file */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_ALLOC_DATA ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int) * (*o_element_count)); memset(*o_column_idx, 0, sizeof(unsigned int) * ((size_t)(*o_column_count) + 1)); memset(*o_values, 0, sizeof(double) * (*o_element_count)); memset(l_column_idx_id, 0, sizeof(unsigned int) * (*o_column_count)); /* init column idx */ for (l_i = 0; l_i <= *o_column_count; ++l_i) { (*o_column_idx)[l_i] = *o_element_count; } /* init */ (*o_column_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_READ_DESC ); fclose( l_csc_file_handle ); /* close mtx file */ return; } /* now we read the actual content */ } else { unsigned int l_row = 0, l_column = 0; double l_value = 0; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { free(*o_row_idx); free(*o_column_idx); free(*o_values); free(l_column_idx_id); *o_row_idx = 0; *o_column_idx = 0; *o_values = 0; fclose(l_csc_file_handle); /* close mtx file */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_READ_ELEMS ); return; } /* adjust numbers to zero termination */ LIBXSMM_ASSERT(0 != l_row && 0 != l_column); l_row--; l_column--; /* add these values to row and value structure */ (*o_row_idx)[l_i] = l_row; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_column_idx_id[l_column] = 1; (*o_column_idx)[l_column+1] = l_i; } } } /* close mtx file */ fclose( l_csc_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { free(*o_row_idx); free(*o_column_idx); free(*o_values); free(l_column_idx_id); *o_row_idx = 0; *o_column_idx = 0; *o_values = 0; LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_CSC_LEN ); return; } if ( l_column_idx_id != NULL ) { /* let's handle empty columns */ for ( l_i = 0; l_i < (*o_column_count); l_i++) { if ( l_column_idx_id[l_i] == 0 ) { (*o_column_idx)[l_i+1] = (*o_column_idx)[l_i]; } } /* free helper data structure */ free( l_column_idx_id ); } }
0
[ "CWE-119", "CWE-787" ]
libxsmm
151481489192e6d1997f8bde52c5c425ea41741d
140,983,365,412,809,230,000,000,000,000,000,000,000
124
Issue #287: made CSR/CSC readers more robust against invalid input (case #1).
get_buffcont( buffheader_T *buffer, int dozero) // count == zero is not an error { long_u count = 0; char_u *p = NULL; char_u *p2; char_u *str; buffblock_T *bp; // compute the total length of the string for (bp = buffer->bh_first.b_next; bp != NULL; bp = bp->b_next) count += (long_u)STRLEN(bp->b_str); if ((count || dozero) && (p = alloc(count + 1)) != NULL) { p2 = p; for (bp = buffer->bh_first.b_next; bp != NULL; bp = bp->b_next) for (str = bp->b_str; *str; ) *p2++ = *str++; *p2 = NUL; } return (p); }
0
[ "CWE-125" ]
vim
a4bc2dd7cccf5a4a9f78b58b6f35a45d17164323
85,472,208,984,311,370,000,000,000,000,000,000,000
24
patch 8.2.4233: crash when recording and using Select mode Problem: Crash when recording and using Select mode. Solution: When deleting the last recorded character check there is something to delete.
View_creation_ctx * View_creation_ctx::create(THD *thd, TABLE_LIST *view) { View_creation_ctx *ctx= new (thd->mem_root) View_creation_ctx(thd); /* Throw a warning if there is NULL cs name. */ if (!view->view_client_cs_name.str || !view->view_connection_cl_name.str) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_VIEW_NO_CREATION_CTX, ER_THD(thd, ER_VIEW_NO_CREATION_CTX), (const char *) view->db, (const char *) view->table_name); ctx->m_client_cs= system_charset_info; ctx->m_connection_cl= system_charset_info; return ctx; } /* Resolve cs names. Throw a warning if there is unknown cs name. */ bool invalid_creation_ctx; invalid_creation_ctx= resolve_charset(view->view_client_cs_name.str, system_charset_info, &ctx->m_client_cs); invalid_creation_ctx= resolve_collation(view->view_connection_cl_name.str, system_charset_info, &ctx->m_connection_cl) || invalid_creation_ctx; if (invalid_creation_ctx) { sql_print_warning("View '%s'.'%s': there is unknown charset/collation " "names (client: '%s'; connection: '%s').", (const char *) view->db, (const char *) view->table_name, (const char *) view->view_client_cs_name.str, (const char *) view->view_connection_cl_name.str); push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_VIEW_INVALID_CREATION_CTX, ER_THD(thd, ER_VIEW_INVALID_CREATION_CTX), (const char *) view->db, (const char *) view->table_name); } return ctx; }
0
[ "CWE-416" ]
server
4681b6f2d8c82b4ec5cf115e83698251963d80d5
274,036,428,740,596,830,000,000,000,000,000,000,000
53
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob the bug was that in_vector array in Item_func_in was allocated in the statement arena, not in the table->expr_arena. revert part of the 5acd391e8b2d. Instead, change the arena correctly in fix_all_session_vcol_exprs(). Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force item tree changes to be rolled back (because they were allocated in the wrong arena and didn't persist. now they do)
CharString *Formattable::internalGetCharString(UErrorCode &status) { if(fDecimalStr == NULL) { if (fDecimalQuantity == NULL) { // No decimal number for the formattable yet. Which means the value was // set directly by the user as an int, int64 or double. If the value came // from parsing, or from the user setting a decimal number, fDecimalNum // would already be set. // LocalPointer<DecimalQuantity> dq(new DecimalQuantity(), status); if (U_FAILURE(status)) { return nullptr; } populateDecimalQuantity(*dq, status); if (U_FAILURE(status)) { return nullptr; } fDecimalQuantity = dq.orphan(); } fDecimalStr = new CharString(); if (fDecimalStr == NULL) { status = U_MEMORY_ALLOCATION_ERROR; return NULL; } // Older ICUs called uprv_decNumberToString here, which is not exactly the same as // DecimalQuantity::toScientificString(). The biggest difference is that uprv_decNumberToString does // not print scientific notation for magnitudes greater than -5 and smaller than some amount (+5?). if (fDecimalQuantity->isZero()) { fDecimalStr->append("0", -1, status); } else if (std::abs(fDecimalQuantity->getMagnitude()) < 5) { fDecimalStr->appendInvariantChars(fDecimalQuantity->toPlainString(), status); } else { fDecimalStr->appendInvariantChars(fDecimalQuantity->toScientificString(), status); } } return fDecimalStr; }
1
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
178,923,371,622,098,230,000,000,000,000,000,000,000
33
ICU-20246 Fixing another integer overflow in number parsing.
static void kvmclock_reset(struct kvm_vcpu *vcpu) { kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time); vcpu->arch.time = 0; }
0
[ "CWE-476" ]
linux
fee060cd52d69c114b62d1a2948ea9648b5131f9
57,647,732,993,717,800,000,000,000,000,000,000,000
5
KVM: x86: avoid calling x86 emulator without a decoded instruction Whenever x86_decode_emulated_instruction() detects a breakpoint, it returns the value that kvm_vcpu_check_breakpoint() writes into its pass-by-reference second argument. Unfortunately this is completely bogus because the expected outcome of x86_decode_emulated_instruction is an EMULATION_* value. Then, if kvm_vcpu_check_breakpoint() does "*r = 0" (corresponding to a KVM_EXIT_DEBUG userspace exit), it is misunderstood as EMULATION_OK and x86_emulate_instruction() is called without having decoded the instruction. This causes various havoc from running with a stale emulation context. The fix is to move the call to kvm_vcpu_check_breakpoint() where it was before commit 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction emulation with decoding") introduced x86_decode_emulated_instruction(). The other caller of the function does not need breakpoint checks, because it is invoked as part of a vmexit and the processor has already checked those before executing the instruction that #GP'd. This fixes CVE-2022-1852. Reported-by: Qiuhao Li <[email protected]> Reported-by: Gaoning Pan <[email protected]> Reported-by: Yongkang Jia <[email protected]> Fixes: 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction emulation with decoding") Cc: [email protected] Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> [Rewrote commit message according to Qiuhao's report, since a patch already existed to fix the bug. - Paolo] Signed-off-by: Paolo Bonzini <[email protected]>
static uint8_t avrcp_handle_get_play_status(struct avrcp *session, struct avrcp_header *pdu, uint8_t transaction) { struct avrcp_player *player = target_get_player(session); uint16_t len = ntohs(pdu->params_len); uint32_t position; uint32_t duration; if (len != 0) { pdu->params_len = htons(1); pdu->params[0] = AVRCP_STATUS_INVALID_PARAM; return AVC_CTYPE_REJECTED; } position = player_get_position(player); duration = player_get_duration(player); position = htonl(position); duration = htonl(duration); memcpy(&pdu->params[0], &duration, 4); memcpy(&pdu->params[4], &position, 4); pdu->params[8] = player_get_status(player); pdu->params_len = htons(9); return AVC_CTYPE_STABLE; }
0
[ "CWE-200" ]
bluez
e2b0f0d8d63e1223bb714a9efb37e2257818268b
16,505,520,710,786,870,000,000,000,000,000,000,000
29
avrcp: Fix not checking if params_len match number of received bytes This makes sure the number of bytes in the params_len matches the remaining bytes received so the code don't end up accessing invalid memory.
bool Config::have(ParmStr key) const { PosibErr<const KeyInfo *> pe = keyinfo(key); if (pe.has_err()) {pe.ignore_err(); return false;} return lookup(pe.data->name); }
0
[ "CWE-125" ]
aspell
80fa26c74279fced8d778351cff19d1d8f44fe4e
299,214,850,962,219,800,000,000,000,000,000,000,000
6
Fix various bugs found by OSS-Fuze.
R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Synthetic.\n"); return; } Eprintf ("Synthetic Attribute Information:\n"); Eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); Eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); Eprintf (" Attribute Length: %d\n", attr->length); Eprintf (" Attribute Index: %d\n", attr->info.source_file_attr.sourcefile_idx); }
0
[ "CWE-787" ]
radare2
9650e3c352f675687bf6c6f65ff2c4a3d0e288fa
37,637,662,817,357,450,000,000,000,000,000,000,000
11
Fix oobread segfault in java arith8.class ##crash * Reported by Cen Zhang via huntr.dev
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { unsigned long flags; spin_lock_irqsave(&dev->work_lock, flags); if (list_empty(&work->node)) { list_add_tail(&work->node, &dev->work_list); work->queue_seq++; wake_up_process(dev->worker); } spin_unlock_irqrestore(&dev->work_lock, flags); }
0
[]
linux-2.6
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
317,410,949,102,574,600,000,000,000,000,000,000,000
12
vhost: fix length for cross region descriptor If a single descriptor crosses a region, the second chunk length should be decremented by size translated so far, instead it includes the full descriptor length. Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Jason Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
sync_cookie_create(Slapi_PBlock *pb, Sync_Cookie *client_cookie) { Sync_CallBackData scbd = {0}; int rc = 0; Sync_Cookie *sc = (Sync_Cookie *)slapi_ch_calloc(1, sizeof(Sync_Cookie)); scbd.cb_err = SYNC_CALLBACK_PREINIT; rc = sync_cookie_get_change_info(&scbd); if (rc == 0) { /* If the client is in openldap compat, we need to generate the same. */ if (client_cookie && client_cookie->openldap_compat) { sc->openldap_compat = client_cookie->openldap_compat; sc->cookie_client_signature = slapi_ch_strdup(client_cookie->cookie_client_signature); sc->cookie_server_signature = NULL; } else { sc->openldap_compat = PR_FALSE; sc->cookie_server_signature = sync_cookie_get_server_info(pb); sc->cookie_client_signature = sync_cookie_get_client_info(pb); } if (scbd.cb_err == SYNC_CALLBACK_PREINIT) { /* changenr is not initialized. */ sc->cookie_change_info = 0; } else { sc->cookie_change_info = scbd.changenr; } } else { slapi_ch_free((void **)&sc); sc = NULL; } return (sc); }
0
[ "CWE-476" ]
389-ds-base
d7eef2fcfbab2ef8aa6ee0bf60f0a9b16ede66e0
327,436,226,515,446,850,000,000,000,000,000,000,000
34
Issue 4711 - SIGSEV with sync_repl (#4738) Bug description: sync_repl sends back entries identified with a unique identifier that is 'nsuniqueid'. If 'nsuniqueid' is missing, then it may crash Fix description: Check a nsuniqueid is available else returns OP_ERR relates: https://github.com/389ds/389-ds-base/issues/4711 Reviewed by: Pierre Rogier, James Chapman, William Brown (Thanks!) Platforms tested: F33
int ssl_init( ssl_context *ssl ) { int ret; int len = SSL_BUFFER_LEN; memset( ssl, 0, sizeof( ssl_context ) ); /* * Sane defaults */ ssl->min_major_ver = SSL_MIN_MAJOR_VERSION; ssl->min_minor_ver = SSL_MIN_MINOR_VERSION; ssl->max_major_ver = SSL_MAX_MAJOR_VERSION; ssl->max_minor_ver = SSL_MAX_MINOR_VERSION; ssl_set_ciphersuites( ssl, ssl_list_ciphersuites() ); #if defined(POLARSSL_SSL_RENEGOTIATION) ssl->renego_max_records = SSL_RENEGO_MAX_RECORDS_DEFAULT; memset( ssl->renego_period, 0xFF, 7 ); ssl->renego_period[7] = 0x00; #endif #if defined(POLARSSL_DHM_C) if( ( ret = mpi_read_string( &ssl->dhm_P, 16, POLARSSL_DHM_RFC5114_MODP_2048_P) ) != 0 || ( ret = mpi_read_string( &ssl->dhm_G, 16, POLARSSL_DHM_RFC5114_MODP_2048_G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } #endif /* * Prepare base structures */ if( ( ssl->in_ctr = polarssl_malloc( len ) ) == NULL || ( ssl->out_ctr = polarssl_malloc( len ) ) == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); polarssl_free( ssl->in_ctr ); ssl->in_ctr = NULL; return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl-> in_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); ssl->in_hdr = ssl->in_ctr + 8; ssl->in_iv = ssl->in_ctr + 13; ssl->in_msg = ssl->in_ctr + 13; ssl->out_hdr = ssl->out_ctr + 8; ssl->out_iv = ssl->out_ctr + 13; ssl->out_msg = ssl->out_ctr + 13; #if defined(POLARSSL_SSL_ENCRYPT_THEN_MAC) ssl->encrypt_then_mac = SSL_ETM_ENABLED; #endif #if defined(POLARSSL_SSL_EXTENDED_MASTER_SECRET) ssl->extended_ms = SSL_EXTENDED_MS_ENABLED; #endif #if defined(POLARSSL_SSL_SESSION_TICKETS) ssl->ticket_lifetime = SSL_DEFAULT_TICKET_LIFETIME; #endif #if defined(POLARSSL_SSL_SET_CURVES) ssl->curve_list = ecp_grp_id_list( ); #endif if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); }
0
[ "CWE-119" ]
mbedtls
c988f32adde62a169ba340fee0da15aecd40e76e
86,553,265,990,532,710,000,000,000,000,000,000,000
78
Added max length checking of hostname
add_cipher_name_to_ary(const OBJ_NAME *name, VALUE ary) { rb_ary_push(ary, rb_str_new2(name->name)); return NULL; }
0
[ "CWE-326", "CWE-310", "CWE-703" ]
openssl
8108e0a6db133f3375608303fdd2083eb5115062
255,051,342,800,021,800,000,000,000,000,000,000,000
5
cipher: don't set dummy encryption key in Cipher#initialize Remove the encryption key initialization from Cipher#initialize. This is effectively a revert of r32723 ("Avoid possible SEGV from AES encryption/decryption", 2011-07-28). r32723, which added the key initialization, was a workaround for Ruby Bug #2768. For some certain ciphers, calling EVP_CipherUpdate() before setting an encryption key caused segfault. It was not a problem until OpenSSL implemented GCM mode - the encryption key could be overridden by repeated calls of EVP_CipherInit_ex(). But, it is not the case for AES-GCM ciphers. Setting a key, an IV, a key, in this order causes the IV to be reset to an all-zero IV. The problem of Bug #2768 persists on the current versions of OpenSSL. So, make Cipher#update raise an exception if a key is not yet set by the user. Since encrypting or decrypting without key does not make any sense, this should not break existing applications. Users can still call Cipher#key= and Cipher#iv= multiple times with their own responsibility. Reference: https://bugs.ruby-lang.org/issues/2768 Reference: https://bugs.ruby-lang.org/issues/8221 Reference: https://github.com/ruby/openssl/issues/49
njs_vm_retval_set(njs_vm_t *vm, const njs_value_t *value) { vm->retval = *value; }
0
[ "CWE-416" ]
njs
6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6
121,461,084,884,912,130,000,000,000,000,000,000,000
4
Fixed recursive async function calls. Previously, PromiseCapability record was stored (function->context) directly in function object during a function invocation. This is not correct, because PromiseCapability record should be linked to current execution context. As a result, function->context is overwritten with consecutive recursive calls which results in use-after-free. This closes #451 issue on Github.