func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
hb_set_get_max (const hb_set_t *set) { return set->get_max (); }
0
[ "CWE-787" ]
harfbuzz
d3e09bf4654fe5478b6dbf2b26ebab6271317d81
334,606,342,536,626,100,000,000,000,000,000,000,000
4
[set] Make all operators null-safe again Changed my mind. Also for hb_map_clear(). Part of https://github.com/harfbuzz/harfbuzz/pull/3162
static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { int error, revents, pwake = 0; unsigned long flags; long user_watches; struct epitem *epi; struct ep_pqueue epq; user_watches = atomic_long_read(&ep->user->epoll_watches); if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); epi->ep = ep; ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; epi->nwait = 0; epi->next = EP_UNACTIVE_PTR; /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); epq.pt._key = event->events; /* * Attach the item to the poll hooks and get current event bits. * We can safely use the file* here because its usage count has * been increased by the caller of this function. Note that after * this operation completes, the poll callback can start hitting * the new item. */ revents = tfile->f_op->poll(tfile, &epq.pt); /* * We have to check if something went wrong during the poll wait queue * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (reverse_path_check()) goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); /* If the file is already "ready" we drop it inside the ready list */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; error_remove_epi: spin_lock(&tfile->f_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&tfile->f_lock); rb_erase(&epi->rbn, &ep->rbr); error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); kmem_cache_free(epi_cache, epi); return error; }
0
[]
linux-2.6
13d518074a952d33d47c428419693f63389547e9
109,307,612,435,176,940,000,000,000,000,000,000,000
114
epoll: clear the tfile_check_list on -ELOOP An epoll_ctl(,EPOLL_CTL_ADD,,) operation can return '-ELOOP' to prevent circular epoll dependencies from being created. However, in that case we do not properly clear the 'tfile_check_list'. Thus, add a call to clear_tfile_check_list() for the -ELOOP case. Signed-off-by: Jason Baron <[email protected]> Reported-by: Yurij M. Plotnikov <[email protected]> Cc: Nelson Elhage <[email protected]> Cc: Davide Libenzi <[email protected]> Tested-by: Alexandra N. Kossovsky <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
std::unique_ptr<base::DictionaryValue> CreateFileSystemValue( const FileSystem& file_system) { auto file_system_value = std::make_unique<base::DictionaryValue>(); file_system_value->SetString("type", file_system.type); file_system_value->SetString("fileSystemName", file_system.file_system_name); file_system_value->SetString("rootURL", file_system.root_url); file_system_value->SetString("fileSystemPath", file_system.file_system_path); return file_system_value; }
0
[]
electron
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
259,609,433,550,219,300,000,000,000,000,000,000,000
9
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344) * fix: ensure ElectronBrowser mojo service is only bound to authorized render frames Notes: no-notes * refactor: extract electron API IPC to its own mojo interface * fix: just check main frame not primary main frame Co-authored-by: Samuel Attard <[email protected]> Co-authored-by: Samuel Attard <[email protected]>
static size_t __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; while (bytes) { char __user *buf = iov->iov_base + base; int copy = min(bytes, iov->iov_len - base); base = 0; left = __copy_from_user_inatomic(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; if (unlikely(left)) break; } return copied - left; }
0
[ "CWE-17" ]
linux
f0d1bec9d58d4c038d0ac958c9af82be6eb18045
4,013,928,642,464,240,600,000,000,000,000,000,000
21
new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]>
ZEND_VM_HOT_TYPE_SPEC_HANDLER(ZEND_IS_SMALLER_OR_EQUAL, (op1_info == MAY_BE_LONG && op2_info == MAY_BE_LONG), ZEND_IS_SMALLER_OR_EQUAL_LONG, CONST|TMPVARCV, CONST|TMPVARCV, SPEC(SMART_BRANCH,NO_CONST_CONST)) { USE_OPLINE zval *op1, *op2; int result; op1 = GET_OP1_ZVAL_PTR_UNDEF(BP_VAR_R); op2 = GET_OP2_ZVAL_PTR_UNDEF(BP_VAR_R); result = (Z_LVAL_P(op1) <= Z_LVAL_P(op2)); ZEND_VM_SMART_BRANCH(result, 0); ZVAL_BOOL(EX_VAR(opline->result.var), result); ZEND_VM_NEXT_OPCODE(); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
78,114,339,376,514,670,000,000,000,000,000,000,000
13
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
psutil_proc_cpu_times(PyObject *self, PyObject *args) { long pid; HANDLE hProcess; FILETIME ftCreate, ftExit, ftKernel, ftUser; if (! PyArg_ParseTuple(args, "l", &pid)) return NULL; hProcess = psutil_handle_from_pid(pid, PROCESS_QUERY_LIMITED_INFORMATION); if (hProcess == NULL) return NULL; if (! GetProcessTimes(hProcess, &ftCreate, &ftExit, &ftKernel, &ftUser)) { if (GetLastError() == ERROR_ACCESS_DENIED) { // usually means the process has died so we throw a NoSuchProcess // here NoSuchProcess(""); } else { PyErr_SetFromWindowsErr(0); } CloseHandle(hProcess); return NULL; } CloseHandle(hProcess); /* * User and kernel times are represented as a FILETIME structure * which contains a 64-bit value representing the number of * 100-nanosecond intervals since January 1, 1601 (UTC): * http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx * To convert it into a float representing the seconds that the * process has executed in user/kernel mode I borrowed the code * below from Python's Modules/posixmodule.c */ return Py_BuildValue( "(dd)", (double)(ftUser.dwHighDateTime * 429.4967296 + \ ftUser.dwLowDateTime * 1e-7), (double)(ftKernel.dwHighDateTime * 429.4967296 + \ ftKernel.dwLowDateTime * 1e-7) ); }
0
[ "CWE-415" ]
psutil
7d512c8e4442a896d56505be3e78f1156f443465
277,596,628,424,530,940,000,000,000,000,000,000,000
44
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616) These files contain loops that convert system data into python objects and during the process they create objects and dereference their refcounts after they have been added to the resulting list. However, in case of errors during the creation of those python objects, the refcount to previously allocated objects is dropped again with Py_XDECREF, which should be a no-op in case the paramater is NULL. Even so, in most of these loops the variables pointing to the objects are never set to NULL, even after Py_DECREF is called at the end of the loop iteration. This means, after the first iteration, if an error occurs those python objects will get their refcount dropped two times, resulting in a possible double-free.
Error Box_ftyp::write(StreamWriter& writer) const { size_t box_start = reserve_box_header_space(writer); writer.write32(m_major_brand); writer.write32(m_minor_version); for (uint32_t b : m_compatible_brands) { writer.write32(b); } prepend_header(writer, box_start); return Error::Ok; }
0
[ "CWE-703" ]
libheif
2710c930918609caaf0a664e9c7bc3dce05d5b58
295,809,674,861,913,160,000,000,000,000,000,000,000
15
force fraction to a limited resolution to finally solve those pesky numerical edge cases
static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; }
0
[ "CWE-125" ]
radare2
c6d0076c924891ad9948a62d89d0bcdaf965f0cd
205,388,715,154,572,200,000,000,000,000,000,000,000
54
Fix #8731 - Crash in ELF parser with negative 32bit number
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret = 0; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { trace_f2fs_iget(inode); return inode; } if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; inode_nohighmem(inode); } else if (S_ISLNK(inode->i_mode)) { if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } f2fs_set_inode_flags(inode); unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; bad_inode: iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); }
0
[ "CWE-125" ]
linux
e34438c903b653daca2b2a7de95aed46226f8ed3
187,267,550,579,770,120,000,000,000,000,000,000,000
61
f2fs: fix to do sanity check with node footer and iblocks This patch adds to do sanity check with below fields of inode to avoid reported panic. - node footer - iblocks https://bugzilla.kernel.org/show_bug.cgi?id=200223 - Overview BUG() triggered in f2fs_truncate_inode_blocks() when un-mounting a mounted f2fs image after writing to it - Reproduce - POC (poc.c) static void activity(char *mpoint) { char *foo_bar_baz; int err; static int buf[8192]; memset(buf, 0, sizeof(buf)); err = asprintf(&foo_bar_baz, "%s/foo/bar/baz", mpoint); // open / write / read int fd = open(foo_bar_baz, O_RDWR | O_TRUNC, 0777); if (fd >= 0) { write(fd, (char *)buf, 517); write(fd, (char *)buf, sizeof(buf)); close(fd); } } int main(int argc, char *argv[]) { activity(argv[1]); return 0; } - Kernel meesage [ 552.479723] F2FS-fs (loop0): Mounted with checkpoint version = 2 [ 556.451891] ------------[ cut here ]------------ [ 556.451899] kernel BUG at fs/f2fs/node.c:987! [ 556.452920] invalid opcode: 0000 [#1] SMP KASAN PTI [ 556.453936] CPU: 1 PID: 1310 Comm: umount Not tainted 4.18.0-rc1+ #4 [ 556.455213] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 556.457140] RIP: 0010:f2fs_truncate_inode_blocks+0x4a7/0x6f0 [ 556.458280] Code: e8 ae ea ff ff 41 89 c7 c1 e8 1f 84 c0 74 0a 41 83 ff fe 0f 85 35 ff ff ff 81 85 b0 fe ff ff fb 03 00 00 e9 f7 fd ff ff 0f 0b <0f> 0b e8 62 b7 9a 00 48 8b bd a0 fe ff ff e8 56 54 ae ff 48 8b b5 [ 556.462015] RSP: 0018:ffff8801f292f808 EFLAGS: 00010286 [ 556.463068] RAX: ffffed003e73242d RBX: ffff8801f292f958 RCX: ffffffffb88b81bc [ 556.464479] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffff8801f3992164 [ 556.465901] RBP: ffff8801f292f980 R08: ffffed003e73242d R09: ffffed003e73242d [ 556.467311] R10: 0000000000000001 R11: ffffed003e73242c R12: 00000000fffffc64 [ 556.468706] R13: ffff8801f3992000 R14: 0000000000000058 R15: 00000000ffff8801 [ 556.470117] FS: 00007f8029297840(0000) GS:ffff8801f6f00000(0000) knlGS:0000000000000000 [ 556.471702] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 556.472838] CR2: 000055f5f57305d8 CR3: 00000001f18b0000 CR4: 00000000000006e0 [ 556.474265] Call Trace: [ 556.474782] ? f2fs_alloc_nid_failed+0xf0/0xf0 [ 556.475686] ? truncate_nodes+0x980/0x980 [ 556.476516] ? pagecache_get_page+0x21f/0x2f0 [ 556.477412] ? __asan_loadN+0xf/0x20 [ 556.478153] ? __get_node_page+0x331/0x5b0 [ 556.478992] ? reweight_entity+0x1e6/0x3b0 [ 556.479826] f2fs_truncate_blocks+0x55e/0x740 [ 556.480709] ? f2fs_truncate_data_blocks+0x20/0x20 [ 556.481689] ? __radix_tree_lookup+0x34/0x160 [ 556.482630] ? radix_tree_lookup+0xd/0x10 [ 556.483445] f2fs_truncate+0xd4/0x1a0 [ 556.484206] f2fs_evict_inode+0x5ce/0x630 [ 556.485032] evict+0x16f/0x290 [ 556.485664] iput+0x280/0x300 [ 556.486300] dentry_unlink_inode+0x165/0x1e0 [ 556.487169] __dentry_kill+0x16a/0x260 [ 556.487936] dentry_kill+0x70/0x250 [ 556.488651] shrink_dentry_list+0x125/0x260 [ 556.489504] shrink_dcache_parent+0xc1/0x110 [ 556.490379] ? shrink_dcache_sb+0x200/0x200 [ 556.491231] ? bit_wait_timeout+0xc0/0xc0 [ 556.492047] do_one_tree+0x12/0x40 [ 556.492743] shrink_dcache_for_umount+0x3f/0xa0 [ 556.493656] generic_shutdown_super+0x43/0x1c0 [ 556.494561] kill_block_super+0x52/0x80 [ 556.495341] kill_f2fs_super+0x62/0x70 [ 556.496105] deactivate_locked_super+0x6f/0xa0 [ 556.497004] deactivate_super+0x5e/0x80 [ 556.497785] cleanup_mnt+0x61/0xa0 [ 556.498492] __cleanup_mnt+0x12/0x20 [ 556.499218] task_work_run+0xc8/0xf0 [ 556.499949] exit_to_usermode_loop+0x125/0x130 [ 556.500846] do_syscall_64+0x138/0x170 [ 556.501609] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 556.502659] RIP: 0033:0x7f8028b77487 [ 556.503384] Code: 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 31 f6 e9 09 00 00 00 66 0f 1f 84 00 00 00 00 00 b8 a6 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e1 c9 2b 00 f7 d8 64 89 01 48 [ 556.507137] RSP: 002b:00007fff9f2e3598 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [ 556.508637] RAX: 0000000000000000 RBX: 0000000000ebd030 RCX: 00007f8028b77487 [ 556.510069] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000000000ec41e0 [ 556.511481] RBP: 0000000000ec41e0 R08: 0000000000000000 R09: 0000000000000014 [ 556.512892] R10: 00000000000006b2 R11: 0000000000000246 R12: 00007f802908083c [ 556.514320] R13: 0000000000000000 R14: 0000000000ebd210 R15: 00007fff9f2e3820 [ 556.515745] Modules linked in: snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm snd_timer snd mac_hid i2c_piix4 soundcore ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx raid1 raid0 multipath linear 8139too crct10dif_pclmul crc32_pclmul qxl drm_kms_helper syscopyarea aesni_intel sysfillrect sysimgblt fb_sys_fops ttm drm aes_x86_64 crypto_simd cryptd 8139cp glue_helper mii pata_acpi floppy [ 556.529276] ---[ end trace 4ce02f25ff7d3df5 ]--- [ 556.530340] RIP: 0010:f2fs_truncate_inode_blocks+0x4a7/0x6f0 [ 556.531513] Code: e8 ae ea ff ff 41 89 c7 c1 e8 1f 84 c0 74 0a 41 83 ff fe 0f 85 35 ff ff ff 81 85 b0 fe ff ff fb 03 00 00 e9 f7 fd ff ff 0f 0b <0f> 0b e8 62 b7 9a 00 48 8b bd a0 fe ff ff e8 56 54 ae ff 48 8b b5 [ 556.535330] RSP: 0018:ffff8801f292f808 EFLAGS: 00010286 [ 556.536395] RAX: ffffed003e73242d RBX: ffff8801f292f958 RCX: ffffffffb88b81bc [ 556.537824] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffff8801f3992164 [ 556.539290] RBP: ffff8801f292f980 R08: ffffed003e73242d R09: ffffed003e73242d [ 556.540709] R10: 0000000000000001 R11: ffffed003e73242c R12: 00000000fffffc64 [ 556.542131] R13: ffff8801f3992000 R14: 0000000000000058 R15: 00000000ffff8801 [ 556.543579] FS: 00007f8029297840(0000) GS:ffff8801f6f00000(0000) knlGS:0000000000000000 [ 556.545180] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 556.546338] CR2: 000055f5f57305d8 CR3: 00000001f18b0000 CR4: 00000000000006e0 [ 556.547809] ================================================================== [ 556.549248] BUG: KASAN: stack-out-of-bounds in arch_tlb_gather_mmu+0x52/0x170 [ 556.550672] Write of size 8 at addr ffff8801f292fd10 by task umount/1310 [ 556.552338] CPU: 1 PID: 1310 Comm: umount Tainted: G D 4.18.0-rc1+ #4 [ 556.553886] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 556.555756] Call Trace: [ 556.556264] dump_stack+0x7b/0xb5 [ 556.556944] print_address_description+0x70/0x290 [ 556.557903] kasan_report+0x291/0x390 [ 556.558649] ? arch_tlb_gather_mmu+0x52/0x170 [ 556.559537] __asan_store8+0x57/0x90 [ 556.560268] arch_tlb_gather_mmu+0x52/0x170 [ 556.561110] tlb_gather_mmu+0x12/0x40 [ 556.561862] exit_mmap+0x123/0x2a0 [ 556.562555] ? __ia32_sys_munmap+0x50/0x50 [ 556.563384] ? exit_aio+0x98/0x230 [ 556.564079] ? __x32_compat_sys_io_submit+0x260/0x260 [ 556.565099] ? taskstats_exit+0x1f4/0x640 [ 556.565925] ? kasan_check_read+0x11/0x20 [ 556.566739] ? mm_update_next_owner+0x322/0x380 [ 556.567652] mmput+0x8b/0x1d0 [ 556.568260] do_exit+0x43a/0x1390 [ 556.568937] ? mm_update_next_owner+0x380/0x380 [ 556.569855] ? deactivate_super+0x5e/0x80 [ 556.570668] ? cleanup_mnt+0x61/0xa0 [ 556.571395] ? __cleanup_mnt+0x12/0x20 [ 556.572156] ? task_work_run+0xc8/0xf0 [ 556.572917] ? exit_to_usermode_loop+0x125/0x130 [ 556.573861] rewind_stack_do_exit+0x17/0x20 [ 556.574707] RIP: 0033:0x7f8028b77487 [ 556.575428] Code: Bad RIP value. [ 556.576106] RSP: 002b:00007fff9f2e3598 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [ 556.577599] RAX: 0000000000000000 RBX: 0000000000ebd030 RCX: 00007f8028b77487 [ 556.579020] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000000000ec41e0 [ 556.580422] RBP: 0000000000ec41e0 R08: 0000000000000000 R09: 0000000000000014 [ 556.581833] R10: 00000000000006b2 R11: 0000000000000246 R12: 00007f802908083c [ 556.583252] R13: 0000000000000000 R14: 0000000000ebd210 R15: 00007fff9f2e3820 [ 556.584983] The buggy address belongs to the page: [ 556.585961] page:ffffea0007ca4bc0 count:0 mapcount:0 mapping:0000000000000000 index:0x0 [ 556.587540] flags: 0x2ffff0000000000() [ 556.588296] raw: 02ffff0000000000 0000000000000000 dead000000000200 0000000000000000 [ 556.589822] raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 [ 556.591359] page dumped because: kasan: bad access detected [ 556.592786] Memory state around the buggy address: [ 556.593753] ffff8801f292fc00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 556.595191] ffff8801f292fc80: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 00 00 00 [ 556.596613] >ffff8801f292fd00: 00 00 f3 00 00 00 00 f3 f3 00 00 00 00 f4 f4 f4 [ 556.598044] ^ [ 556.598797] ffff8801f292fd80: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00 [ 556.600225] ffff8801f292fe00: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 f4 f4 f4 [ 556.601647] ================================================================== - Location https://elixir.bootlin.com/linux/v4.18-rc1/source/fs/f2fs/node.c#L987 case NODE_DIND_BLOCK: err = truncate_nodes(&dn, nofs, offset[1], 3); cont = 0; break; default: BUG(); <--- } Reported-by Wen Xu <[email protected]> Signed-off-by: Chao Yu <[email protected]> Signed-off-by: Jaegeuk Kim <[email protected]>
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Group Origin Iif Pkts Bytes Wrong Oifs\n"); } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld", (unsigned long) mfc->mfc_mcastgrp, (unsigned long) mfc->mfc_origin, mfc->mfc_parent, mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); if (it->cache != &mfc_unres_queue) { for(n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++ ) { if(VIF_EXISTS(n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", n, mfc->mfc_un.res.ttls[n]); } } seq_putc(seq, '\n'); } return 0; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
224,192,023,859,389,780,000,000,000,000,000,000,000
33
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
add_document_portal_args (GPtrArray *argv_array, const char *app_id) { g_autoptr(GDBusConnection) session_bus = NULL; g_autofree char *doc_mount_path = NULL; session_bus = g_bus_get_sync (G_BUS_TYPE_SESSION, NULL, NULL); if (session_bus) { g_autoptr(GError) local_error = NULL; g_autoptr(GDBusMessage) reply = NULL; g_autoptr(GDBusMessage) msg = g_dbus_message_new_method_call ("org.freedesktop.portal.Documents", "/org/freedesktop/portal/documents", "org.freedesktop.portal.Documents", "GetMountPoint"); g_dbus_message_set_body (msg, g_variant_new ("()")); reply = g_dbus_connection_send_message_with_reply_sync (session_bus, msg, G_DBUS_SEND_MESSAGE_FLAGS_NONE, 30000, NULL, NULL, NULL); if (reply) { if (g_dbus_message_to_gerror (reply, &local_error)) { g_message ("Can't get document portal: %s\n", local_error->message); } else { g_autofree char *src_path = NULL; g_autofree char *dst_path = NULL; g_variant_get (g_dbus_message_get_body (reply), "(^ay)", &doc_mount_path); src_path = g_strdup_printf ("%s/by-app/%s", doc_mount_path, app_id); dst_path = g_strdup_printf ("/run/user/%d/doc", getuid ()); add_args (argv_array, "--bind", src_path, dst_path, NULL); } } } }
0
[ "CWE-20" ]
flatpak
902fb713990a8f968ea4350c7c2a27ff46f1a6c4
130,347,917,465,921,680,000,000,000,000,000,000,000
45
Use seccomp to filter out TIOCSTI ioctl This would otherwise let the sandbox add input to the controlling tty.
int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); lock_mount_hash(); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; unlock_mount_hash(); up_read(&namespace_sem); return ret; }
0
[ "CWE-269" ]
user-namespace
a6138db815df5ee542d848318e5dae681590fccd
270,074,117,050,143,600,000,000,000,000,000,000,000
11
mnt: Only change user settable mount flags in remount Kenton Varda <[email protected]> discovered that by remounting a read-only bind mount read-only in a user namespace the MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user to the remount a read-only mount read-write. Correct this by replacing the mask of mount flags to preserve with a mask of mount flags that may be changed, and preserve all others. This ensures that any future bugs with this mask and remount will fail in an easy to detect way where new mount flags simply won't change. Cc: [email protected] Acked-by: Serge E. Hallyn <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]>
pq_getstring(StringInfo s) { int i; Assert(PqCommReadingMsg); resetStringInfo(s); /* Read until we get the terminating '\0' */ for (;;) { while (PqRecvPointer >= PqRecvLength) { if (pq_recvbuf()) /* If nothing in buffer, then recv some */ return EOF; /* Failed to recv data */ } for (i = PqRecvPointer; i < PqRecvLength; i++) { if (PqRecvBuffer[i] == '\0') { /* include the '\0' in the copy */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, i - PqRecvPointer + 1); PqRecvPointer = i + 1; /* advance past \0 */ return 0; } } /* If we're here we haven't got the \0 in the buffer yet. */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, PqRecvLength - PqRecvPointer); PqRecvPointer = PqRecvLength; } }
0
[ "CWE-89" ]
postgres
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
279,690,430,386,992,200,000,000,000,000,000,000,000
35
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
decodets( char *str, l_fp *lfp ) { char *cp; char buf[30]; size_t b; /* * If it starts with a 0x, decode as hex. */ if (*str == '0' && (*(str+1) == 'x' || *(str+1) == 'X')) return hextolfp(str+2, lfp); /* * If it starts with a '"', try it as an RT-11 date. */ if (*str == '"') { cp = str + 1; b = 0; while ('"' != *cp && '\0' != *cp && b < COUNTOF(buf) - 1) buf[b++] = *cp++; buf[b] = '\0'; return rtdatetolfp(buf, lfp); } /* * Might still be hex. Check out the first character. Talk * about heuristics! */ if ((*str >= 'A' && *str <= 'F') || (*str >= 'a' && *str <= 'f')) return hextolfp(str, lfp); /* * Try it as a decimal. If this fails, try as an unquoted * RT-11 date. This code should go away eventually. */ if (atolfp(str, lfp)) return 1; return rtdatetolfp(str, lfp); }
0
[ "CWE-20" ]
ntp
07a5b8141e354a998a52994c3c9cd547927e56ce
230,687,940,889,403,020,000,000,000,000,000,000,000
44
[TALOS-CAN-0063] avoid buffer overrun in ntpq
static int proc_cwd_link(struct dentry *dentry, struct path *path) { struct task_struct *task = get_proc_task(dentry->d_inode); int result = -ENOENT; if (task) { task_lock(task); if (task->fs) { get_fs_pwd(task->fs, path); result = 0; } task_unlock(task); put_task_struct(task); } return result; }
0
[]
linux
0499680a42141d86417a8fbaa8c8db806bea1201
106,597,650,919,289,920,000,000,000,000,000,000,000
16
procfs: add hidepid= and gid= mount options Add support for mount options to restrict access to /proc/PID/ directories. The default backward-compatible "relaxed" behaviour is left untouched. The first mount option is called "hidepid" and its value defines how much info about processes we want to be available for non-owners: hidepid=0 (default) means the old behavior - anybody may read all world-readable /proc/PID/* files. hidepid=1 means users may not access any /proc/<pid>/ directories, but their own. Sensitive files like cmdline, sched*, status are now protected against other users. As permission checking done in proc_pid_permission() and files' permissions are left untouched, programs expecting specific files' modes are not confused. hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other users. It doesn't mean that it hides whether a process exists (it can be learned by other means, e.g. by kill -0 $PID), but it hides process' euid and egid. It compicates intruder's task of gathering info about running processes, whether some daemon runs with elevated privileges, whether another user runs some sensitive program, whether other users run any program at all, etc. gid=XXX defines a group that will be able to gather all processes' info (as in hidepid=0 mode). This group should be used instead of putting nonroot user in sudoers file or something. However, untrusted users (like daemons, etc.) which are not supposed to monitor the tasks in the whole system should not be added to the group. hidepid=1 or higher is designed to restrict access to procfs files, which might reveal some sensitive private information like precise keystrokes timings: http://www.openwall.com/lists/oss-security/2011/11/05/3 hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and conky gracefully handle EPERM/ENOENT and behave as if the current user is the only user running processes. pstree shows the process subtree which contains "pstree" process. Note: the patch doesn't deal with setuid/setgid issues of keeping preopened descriptors of procfs files (like https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked information like the scheduling counters of setuid apps doesn't threaten anybody's privacy - only the user started the setuid program may read the counters. Signed-off-by: Vasiliy Kulikov <[email protected]> Cc: Alexey Dobriyan <[email protected]> Cc: Al Viro <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Greg KH <[email protected]> Cc: Theodore Tso <[email protected]> Cc: Alan Cox <[email protected]> Cc: James Morris <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) { struct wiphy *wiphy = cfg_to_wiphy(cfg); struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); struct ieee80211_supported_band *band; struct brcmf_fil_bwcap_le band_bwcap; struct brcmf_chanspec_list *list; u8 *pbuf; u32 val; int err; struct brcmu_chan ch; u32 num_chan; int i, j; /* verify support for bw_cap command */ val = WLC_BAND_5G; err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val); if (!err) { /* only set 2G bandwidth using bw_cap command */ band_bwcap.band = cpu_to_le32(WLC_BAND_2G); band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, sizeof(band_bwcap)); } else { brcmf_dbg(INFO, "fallback to mimo_bw_cap\n"); val = WLC_N_BW_40ALL; err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val); } if (!err) { /* update channel info in 2G band */ pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); if (pbuf == NULL) return -ENOMEM; ch.band = BRCMU_CHAN_BAND_2G; ch.bw = BRCMU_CHAN_BW_40; ch.sb = BRCMU_CHAN_SB_NONE; ch.chnum = 0; cfg->d11inf.encchspec(&ch); /* pass encoded chanspec in query */ *(__le16 *)pbuf = cpu_to_le16(ch.chspec); err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf, BRCMF_DCMD_MEDLEN); if (err) { bphy_err(wiphy, "get chanspecs error (%d)\n", err); kfree(pbuf); return err; } band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ]; list = (struct brcmf_chanspec_list *)pbuf; num_chan = le32_to_cpu(list->count); for (i = 0; i < num_chan; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); if (WARN_ON(ch.band != BRCMU_CHAN_BAND_2G)) continue; if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40)) continue; for (j = 0; j < band->n_channels; j++) { if (band->channels[j].hw_value == ch.control_ch_num) break; } if (WARN_ON(j == band->n_channels)) continue; brcmf_update_bw40_channel_flag(&band->channels[j], &ch); } kfree(pbuf); } return err; }
0
[ "CWE-787" ]
linux
1b5e2423164b3670e8bc9174e4762d297990deff
177,296,069,623,130,930,000,000,000,000,000,000,000
77
brcmfmac: assure SSID length from firmware is limited The SSID length as received from firmware should not exceed IEEE80211_MAX_SSID_LEN as that would result in heap overflow. Reviewed-by: Hante Meuleman <[email protected]> Reviewed-by: Pieter-Paul Giesberts <[email protected]> Reviewed-by: Franky Lin <[email protected]> Signed-off-by: Arend van Spriel <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
* %NULL on failure */ struct iscsi_bus_flash_session * iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_session *fnode_sess; int err; fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); if (!fnode_sess) return NULL; fnode_sess->transport = transport; fnode_sess->target_id = index; fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; fnode_sess->dev.bus = &iscsi_flashnode_bus; fnode_sess->dev.parent = &shost->shost_gendev; dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", shost->host_no, index); err = device_register(&fnode_sess->dev); if (err) goto free_fnode_sess; if (dd_size) fnode_sess->dd_data = &fnode_sess[1]; return fnode_sess; free_fnode_sess:
0
[ "CWE-787" ]
linux
ec98ea7070e94cc25a422ec97d1421e28d97b7ee
298,197,815,191,497,340,000,000,000,000,000,000,000
32
scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE As the iSCSI parameters are exported back through sysfs, it should be enforcing that they never are more than PAGE_SIZE (which should be more than enough) before accepting updates through netlink. Change all iSCSI sysfs attributes to use sysfs_emit(). Cc: [email protected] Reported-by: Adam Nichols <[email protected]> Reviewed-by: Lee Duncan <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Reviewed-by: Mike Christie <[email protected]> Signed-off-by: Chris Leech <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1)) { return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } mutex_lock(&root->fs_info->volume_mutex); vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out; } vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; ret = btrfs_init_new_device(root, vol_args->name); if (!ret) btrfs_info(root->fs_info, "disk added %s",vol_args->name); kfree(vol_args); out: mutex_unlock(&root->fs_info->volume_mutex); atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); return ret; }
0
[ "CWE-200" ]
linux
8039d87d9e473aeb740d4fdbd59b9d2f89b2ced9
13,474,245,797,156,590,000,000,000,000,000,000,000
32
Btrfs: fix file corruption and data loss after cloning inline extents Currently the clone ioctl allows to clone an inline extent from one file to another that already has other (non-inlined) extents. This is a problem because btrfs is not designed to deal with files having inline and regular extents, if a file has an inline extent then it must be the only extent in the file and must start at file offset 0. Having a file with an inline extent followed by regular extents results in EIO errors when doing reads or writes against the first 4K of the file. Also, the clone ioctl allows one to lose data if the source file consists of a single inline extent, with a size of N bytes, and the destination file consists of a single inline extent with a size of M bytes, where we have M > N. In this case the clone operation removes the inline extent from the destination file and then copies the inline extent from the source file into the destination file - we lose the M - N bytes from the destination file, a read operation will get the value 0x00 for any bytes in the the range [N, M] (the destination inode's i_size remained as M, that's why we can read past N bytes). So fix this by not allowing such destructive operations to happen and return errno EOPNOTSUPP to user space. Currently the fstest btrfs/035 tests the data loss case but it totally ignores this - i.e. expects the operation to succeed and does not check the we got data loss. The following test case for fstests exercises all these cases that result in file corruption and data loss: seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner _require_btrfs_fs_feature "no_holes" _require_btrfs_mkfs_feature "no-holes" rm -f $seqres.full test_cloning_inline_extents() { local mkfs_opts=$1 local mount_opts=$2 _scratch_mkfs $mkfs_opts >>$seqres.full 2>&1 _scratch_mount $mount_opts # File bar, the source for all the following clone operations, consists # of a single inline extent (50 bytes). $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 50" $SCRATCH_MNT/bar \ | _filter_xfs_io # Test cloning into a file with an extent (non-inlined) where the # destination offset overlaps that extent. It should not be possible to # clone the inline extent from file bar into this file. $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 16K" $SCRATCH_MNT/foo \ | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo # Doing IO against any range in the first 4K of the file should work. # Due to a past clone ioctl bug which allowed cloning the inline extent, # these operations resulted in EIO errors. echo "File foo data after clone operation:" # All bytes should have the value 0xaa (clone operation failed and did # not modify our file). od -t x1 $SCRATCH_MNT/foo $XFS_IO_PROG -c "pwrite -S 0xcc 0 100" $SCRATCH_MNT/foo | _filter_xfs_io # Test cloning the inline extent against a file which has a hole in its # first 4K followed by a non-inlined extent. It should not be possible # as well to clone the inline extent from file bar into this file. $XFS_IO_PROG -f -c "pwrite -S 0xdd 4K 12K" $SCRATCH_MNT/foo2 \ | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo2 # Doing IO against any range in the first 4K of the file should work. # Due to a past clone ioctl bug which allowed cloning the inline extent, # these operations resulted in EIO errors. echo "File foo2 data after clone operation:" # All bytes should have the value 0x00 (clone operation failed and did # not modify our file). od -t x1 $SCRATCH_MNT/foo2 $XFS_IO_PROG -c "pwrite -S 0xee 0 90" $SCRATCH_MNT/foo2 | _filter_xfs_io # Test cloning the inline extent against a file which has a size of zero # but has a prealloc extent. It should not be possible as well to clone # the inline extent from file bar into this file. $XFS_IO_PROG -f -c "falloc -k 0 1M" $SCRATCH_MNT/foo3 | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo3 # Doing IO against any range in the first 4K of the file should work. # Due to a past clone ioctl bug which allowed cloning the inline extent, # these operations resulted in EIO errors. echo "First 50 bytes of foo3 after clone operation:" # Should not be able to read any bytes, file has 0 bytes i_size (the # clone operation failed and did not modify our file). od -t x1 $SCRATCH_MNT/foo3 $XFS_IO_PROG -c "pwrite -S 0xff 0 90" $SCRATCH_MNT/foo3 | _filter_xfs_io # Test cloning the inline extent against a file which consists of a # single inline extent that has a size not greater than the size of # bar's inline extent (40 < 50). # It should be possible to do the extent cloning from bar to this file. $XFS_IO_PROG -f -c "pwrite -S 0x01 0 40" $SCRATCH_MNT/foo4 \ | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo4 # Doing IO against any range in the first 4K of the file should work. echo "File foo4 data after clone operation:" # Must match file bar's content. od -t x1 $SCRATCH_MNT/foo4 $XFS_IO_PROG -c "pwrite -S 0x02 0 90" $SCRATCH_MNT/foo4 | _filter_xfs_io # Test cloning the inline extent against a file which consists of a # single inline extent that has a size greater than the size of bar's # inline extent (60 > 50). # It should not be possible to clone the inline extent from file bar # into this file. $XFS_IO_PROG -f -c "pwrite -S 0x03 0 60" $SCRATCH_MNT/foo5 \ | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo5 # Reading the file should not fail. echo "File foo5 data after clone operation:" # Must have a size of 60 bytes, with all bytes having a value of 0x03 # (the clone operation failed and did not modify our file). od -t x1 $SCRATCH_MNT/foo5 # Test cloning the inline extent against a file which has no extents but # has a size greater than bar's inline extent (16K > 50). # It should not be possible to clone the inline extent from file bar # into this file. $XFS_IO_PROG -f -c "truncate 16K" $SCRATCH_MNT/foo6 | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo6 # Reading the file should not fail. echo "File foo6 data after clone operation:" # Must have a size of 16K, with all bytes having a value of 0x00 (the # clone operation failed and did not modify our file). od -t x1 $SCRATCH_MNT/foo6 # Test cloning the inline extent against a file which has no extents but # has a size not greater than bar's inline extent (30 < 50). # It should be possible to clone the inline extent from file bar into # this file. $XFS_IO_PROG -f -c "truncate 30" $SCRATCH_MNT/foo7 | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo7 # Reading the file should not fail. echo "File foo7 data after clone operation:" # Must have a size of 50 bytes, with all bytes having a value of 0xbb. od -t x1 $SCRATCH_MNT/foo7 # Test cloning the inline extent against a file which has a size not # greater than the size of bar's inline extent (20 < 50) but has # a prealloc extent that goes beyond the file's size. It should not be # possible to clone the inline extent from bar into this file. $XFS_IO_PROG -f -c "falloc -k 0 1M" \ -c "pwrite -S 0x88 0 20" \ $SCRATCH_MNT/foo8 | _filter_xfs_io $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo8 echo "File foo8 data after clone operation:" # Must have a size of 20 bytes, with all bytes having a value of 0x88 # (the clone operation did not modify our file). od -t x1 $SCRATCH_MNT/foo8 _scratch_unmount } echo -e "\nTesting without compression and without the no-holes feature...\n" test_cloning_inline_extents echo -e "\nTesting with compression and without the no-holes feature...\n" test_cloning_inline_extents "" "-o compress" echo -e "\nTesting without compression and with the no-holes feature...\n" test_cloning_inline_extents "-O no-holes" "" echo -e "\nTesting with compression and with the no-holes feature...\n" test_cloning_inline_extents "-O no-holes" "-o compress" status=0 exit Cc: [email protected] Signed-off-by: Filipe Manana <[email protected]>
bool __fastcall TRemoteMoveDialog::Execute(UnicodeString & Target, UnicodeString & FileMask) { Combo->Items = CustomWinConfiguration->History[L"RemoteTarget"]; Combo->Text = UnixIncludeTrailingBackslash(Target) + FileMask; bool Result = TCustomDialog::Execute(); if (Result) { Target = UnixExtractFilePath(Combo->Text); FileMask = GetFileMask(); Combo->SaveToHistory(); CustomWinConfiguration->History[L"RemoteTarget"] = Combo->Items; } return Result; }
0
[ "CWE-787" ]
winscp
faa96e8144e6925a380f94a97aa382c9427f688d
299,122,003,924,190,260,000,000,000,000,000,000,000
14
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs https://winscp.net/tracker/1943 (cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0) Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, struct dest_map *dest_map) { int result = 0; struct kvm_vcpu *vcpu = apic->vcpu; trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; fallthrough; case APIC_DM_FIXED: if (unlikely(trig_mode && !level)) break; /* FIXME add logic for vcpu on reset */ if (unlikely(!apic_enabled(apic))) break; result = 1; if (dest_map) { __set_bit(vcpu->vcpu_id, dest_map->map); dest_map->vectors[vcpu->vcpu_id] = vector; } if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { if (trig_mode) kvm_lapic_set_vector(vector, apic->regs + APIC_TMR); else kvm_lapic_clear_vector(vector, apic->regs + APIC_TMR); } static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode, trig_mode, vector); break; case APIC_DM_REMRD: result = 1; vcpu->arch.pv.pv_unhalted = 1; kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_SMI: result = 1; kvm_make_request(KVM_REQ_SMI, vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_NMI: result = 1; kvm_inject_nmi(vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_INIT: if (!trig_mode || level) { result = 1; /* assumes that there are only KVM_APIC_INIT/SIPI */ apic->pending_events = (1UL << KVM_APIC_INIT); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } break; case APIC_DM_STARTUP: result = 1; apic->sipi_vector = vector; /* make sure sipi_vector is visible for the receiver */ smp_wmb(); set_bit(KVM_APIC_SIPI, &apic->pending_events); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); break; case APIC_DM_EXTINT: /* * Should only be called by kvm_apic_local_deliver() with LVT0, * before NMI watchdog was enabled. Already handled by * kvm_apic_accept_pic_intr(). */ break; default: printk(KERN_ERR "TODO: unsupported delivery mode %x\n", delivery_mode); break; } return result; }
0
[ "CWE-476" ]
linux
00b5f37189d24ac3ed46cb7f11742094778c46ce
261,355,262,229,064,420,000,000,000,000,000,000,000
95
KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast() When kvm_irq_delivery_to_apic_fast() is called with APIC_DEST_SELF shorthand, 'src' must not be NULL. Crash the VM with KVM_BUG_ON() instead of crashing the host. Signed-off-by: Vitaly Kuznetsov <[email protected]> Message-Id: <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static Value performCastDoubleToLong(ExpressionContext* const expCtx, Value inputValue) { double inputDouble = inputValue.getDouble(); validateDoubleValueIsFinite(inputDouble); uassert(ErrorCodes::ConversionFailure, str::stream() << "Conversion would overflow target type in $convert with no onError value: " << inputDouble, inputDouble >= std::numeric_limits<long long>::lowest() && inputDouble < BSONElement::kLongLongMaxPlusOneAsDouble); return Value(static_cast<long long>(inputDouble)); }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
223,028,947,332,304,100,000,000,000,000,000,000,000
13
SERVER-49404 Enforce additional checks in $arrayToObject
static void usage() { cmdinfo(); fprintf(stderr, "usage:\n"); fprintf(stderr,"%s ", cmdname); fprintf(stderr, "[-f image_file]\n"); exit(EXIT_FAILURE); }
0
[ "CWE-476", "CWE-125" ]
jasper
8f62b4761711d036fd8964df256b938c809b7fca
162,239,402,393,546,230,000,000,000,000,000,000,000
8
Fixed a sanitizer failure in the BMP codec. Also, added a --debug-level command line option to the imginfo command for debugging purposes.
static void __mcheck_cpu_init_timer(void) { struct timer_list *t = this_cpu_ptr(&mce_timer); timer_setup(t, mce_timer_fn, TIMER_PINNED); mce_start_timer(t); }
0
[ "CWE-362" ]
linux
b3b7c4795ccab5be71f080774c45bbbcc75c2aaf
225,455,469,861,365,700,000,000,000,000,000,000,000
7
x86/MCE: Serialize sysfs changes The check_interval file in /sys/devices/system/machinecheck/machinecheck<cpu number> directory is a global timer value for MCE polling. If it is changed by one CPU, mce_restart() broadcasts the event to other CPUs to delete and restart the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the mce_timer variable. If more than one CPU writes a specific value to the check_interval file concurrently, mce_timer is not protected from such concurrent accesses and all kinds of explosions happen. Since only root can write to those sysfs variables, the issue is not a big deal security-wise. However, concurrent writes to these configuration variables is void of reason so the proper thing to do is to serialize the access with a mutex. Boris: - Make store_int_with_restart() use device_store_ulong() to filter out negative intervals - Limit min interval to 1 second - Correct locking - Massage commit message Signed-off-by: Seunghun Han <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Tony Luck <[email protected]> Cc: linux-edac <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected]
static void io_submit_flush_completions(struct io_comp_state *cs) { struct io_ring_ctx *ctx = cs->ctx; spin_lock_irq(&ctx->completion_lock); while (!list_empty(&cs->list)) { struct io_kiocb *req; req = list_first_entry(&cs->list, struct io_kiocb, compl.list); list_del(&req->compl.list); __io_cqring_fill_event(req, req->result, req->compl.cflags); if (!(req->flags & REQ_F_LINK_HEAD)) { req->flags |= REQ_F_COMP_LOCKED; io_put_req(req); } else { spin_unlock_irq(&ctx->completion_lock); io_put_req(req); spin_lock_irq(&ctx->completion_lock); } } io_commit_cqring(ctx); spin_unlock_irq(&ctx->completion_lock); io_cqring_ev_posted(ctx); cs->nr = 0; }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
69,433,423,996,166,070,000,000,000,000,000,000,000
26
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
ikev1_cr_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len, const u_char *ep _U_, uint32_t phase _U_, uint32_t doi0 _U_, uint32_t proto0 _U_, int depth _U_) { const struct ikev1_pl_cert *p; struct ikev1_pl_cert cert; static const char *certstr[] = { "none", "pkcs7", "pgp", "dns", "x509sign", "x509ke", "kerberos", "crl", "arl", "spki", "x509attr", }; ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_CR))); p = (const struct ikev1_pl_cert *)ext; ND_TCHECK(*p); UNALIGNED_MEMCPY(&cert, ext, sizeof(cert)); ND_PRINT((ndo," len=%d", item_len - 4)); ND_PRINT((ndo," type=%s", STR_OR_ID((cert.encode), certstr))); if (2 < ndo->ndo_vflag && 4 < item_len) { ND_PRINT((ndo," ")); if (!rawprint(ndo, (const uint8_t *)(ext + 1), item_len - 4)) goto trunc; } return (const u_char *)ext + item_len; trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_CR))); return NULL; }
1
[ "CWE-125", "CWE-787" ]
tcpdump
8dca25d26c7ca2caf6138267f6f17111212c156e
262,010,346,470,979,620,000,000,000,000,000,000,000
30
CVE-2017-13690/IKEv2: Fix some bounds checks. Use a pointer of the correct type in ND_TCHECK(), or use ND_TCHECK2() and provide the correct length. While we're at it, remove the blank line between some checks and the UNALIGNED_MEMCPY()s they protect. Also, note the places where we print the entire payload. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s).
void transport_get_fds(rdpTransport* transport, void** rfds, int* rcount) { void* pfd; #ifdef _WIN32 rfds[*rcount] = transport->TcpIn->wsa_event; (*rcount)++; if (transport->SplitInputOutput) { rfds[*rcount] = transport->TcpOut->wsa_event; (*rcount)++; } #else rfds[*rcount] = (void*)(long)(transport->TcpIn->sockfd); (*rcount)++; if (transport->SplitInputOutput) { rfds[*rcount] = (void*)(long)(transport->TcpOut->sockfd); (*rcount)++; } #endif pfd = GetEventWaitObject(transport->ReceiveEvent); if (pfd) { rfds[*rcount] = pfd; (*rcount)++; } if (transport->GatewayEvent) { pfd = GetEventWaitObject(transport->GatewayEvent); if (pfd) { rfds[*rcount] = pfd; (*rcount)++; } } }
0
[ "CWE-476", "CWE-125" ]
FreeRDP
0773bb9303d24473fe1185d85a424dfe159aff53
335,441,517,015,292,270,000,000,000,000,000,000,000
43
nla: invalidate sec handle after creation If sec pointer isn't invalidated after creation it is not possible to check if the upper and lower pointers are valid. This fixes a segfault in the server part if the client disconnects before the authentication was finished.
static void evp_cipher_cleanup(struct ssh_cipher_struct *cipher) { if (cipher->ctx != NULL) { EVP_CIPHER_CTX_cleanup(cipher->ctx); EVP_CIPHER_CTX_free(cipher->ctx); } }
0
[ "CWE-476" ]
libssh
b36272eac1b36982598c10de7af0a501582de07a
109,086,525,726,408,300,000,000,000,000,000,000,000
6
CVE-2020-1730: Fix a possible segfault when zeroing AES-CTR key Fixes T213 Signed-off-by: Andreas Schneider <[email protected]> Reviewed-by: Anderson Toshiyuki Sasaki <[email protected]>
getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; }
0
[ "CWE-416" ]
linux
f15133df088ecadd141ea1907f2c96df67c729f0
197,278,675,743,077,400,000,000,000,000,000,000,000
33
path_openat(): fix double fput() path_openat() jumps to the wrong place after do_tmpfile() - it has already done path_cleanup() (as part of path_lookupat() called by do_tmpfile()), so doing that again can lead to double fput(). Cc: [email protected] # v3.11+ Signed-off-by: Al Viro <[email protected]>
xmlSchemaPCustomErrExt(xmlSchemaParserCtxtPtr ctxt, xmlParserErrors error, xmlSchemaBasicItemPtr item, xmlNodePtr itemElem, const char *message, const xmlChar *str1, const xmlChar *str2, const xmlChar *str3) { xmlChar *des = NULL, *msg = NULL; xmlSchemaFormatItemForReport(&des, NULL, item, itemElem); msg = xmlStrdup(BAD_CAST "%s: "); msg = xmlStrcat(msg, (const xmlChar *) message); msg = xmlStrcat(msg, BAD_CAST ".\n"); if ((itemElem == NULL) && (item != NULL)) itemElem = WXS_ITEM_NODE(item); xmlSchemaPErrExt(ctxt, itemElem, error, NULL, NULL, NULL, (const char *) msg, BAD_CAST des, str1, str2, str3, NULL); FREE_AND_NULL(des); FREE_AND_NULL(msg); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
305,164,589,618,281,900,000,000,000,000,000,000,000
22
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
static int add_partition_to_data(struct ldb_context *ldb, struct partition_private_data *data, struct dsdb_partition *partition) { unsigned int i; int ret; /* Count the partitions */ for (i=0; data->partitions && data->partitions[i]; i++) { /* noop */}; /* Add partition to list of partitions */ data->partitions = talloc_realloc(data, data->partitions, struct dsdb_partition *, i + 2); if (!data->partitions) { return ldb_oom(ldb); } data->partitions[i] = talloc_steal(data->partitions, partition); data->partitions[i+1] = NULL; /* Sort again (should use binary insert) */ TYPESAFE_QSORT(data->partitions, i+1, partition_sort_compare); ret = partition_register(ldb, partition->ctrl); if (ret != LDB_SUCCESS) { return ret; } return LDB_SUCCESS; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
189,279,210,399,204,560,000,000,000,000,000,000,000
26
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
static int smack_sb_umount(struct vfsmount *mnt, int flags) { struct superblock_smack *sbp; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint); smk_ad_setfield_u_fs_path_mnt(&ad, mnt); sbp = mnt->mnt_sb->s_security; return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
197,947,342,287,761,930,000,000,000,000,000,000,000
12
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
int getGenericCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.null[c->resp])) == NULL) return C_OK; if (o->type != OBJ_STRING) { addReply(c,shared.wrongtypeerr); return C_ERR; } else { addReplyBulk(c,o); return C_OK; } }
0
[ "CWE-190" ]
redis
394614a5f91d88380f480c4610926a865b5b0f16
249,750,839,888,082,600,000,000,000,000,000,000,000
14
Fix integer overflow in STRALGO LCS (CVE-2021-29477) An integer overflow bug in Redis version 6.0 or newer could be exploited using the STRALGO LCS command to corrupt the heap and potentially result with remote code execution. (cherry picked from commit f0c5f920d0f88bd8aa376a2c05af4902789d1ef9)
static void suboption(struct Curl_easy *data) { struct curl_slist *v; unsigned char temp[2048]; ssize_t bytes_written; size_t len; int err; char varname[128] = ""; char varval[128] = ""; struct TELNET *tn = data->req.p.telnet; struct connectdata *conn = data->conn; printsub(data, '<', (unsigned char *)tn->subbuffer, CURL_SB_LEN(tn) + 2); switch(CURL_SB_GET(tn)) { case CURL_TELOPT_TTYPE: len = strlen(tn->subopt_ttype) + 4 + 2; msnprintf((char *)temp, sizeof(temp), "%c%c%c%c%s%c%c", CURL_IAC, CURL_SB, CURL_TELOPT_TTYPE, CURL_TELQUAL_IS, tn->subopt_ttype, CURL_IAC, CURL_SE); bytes_written = swrite(conn->sock[FIRSTSOCKET], temp, len); if(bytes_written < 0) { err = SOCKERRNO; failf(data,"Sending data failed (%d)",err); } printsub(data, '>', &temp[2], len-2); break; case CURL_TELOPT_XDISPLOC: len = strlen(tn->subopt_xdisploc) + 4 + 2; msnprintf((char *)temp, sizeof(temp), "%c%c%c%c%s%c%c", CURL_IAC, CURL_SB, CURL_TELOPT_XDISPLOC, CURL_TELQUAL_IS, tn->subopt_xdisploc, CURL_IAC, CURL_SE); bytes_written = swrite(conn->sock[FIRSTSOCKET], temp, len); if(bytes_written < 0) { err = SOCKERRNO; failf(data,"Sending data failed (%d)",err); } printsub(data, '>', &temp[2], len-2); break; case CURL_TELOPT_NEW_ENVIRON: msnprintf((char *)temp, sizeof(temp), "%c%c%c%c", CURL_IAC, CURL_SB, CURL_TELOPT_NEW_ENVIRON, CURL_TELQUAL_IS); len = 4; for(v = tn->telnet_vars; v; v = v->next) { size_t tmplen = (strlen(v->data) + 1); /* Add the variable only if it fits */ if(len + tmplen < (int)sizeof(temp)-6) { if(sscanf(v->data, "%127[^,],%127s", varname, varval) == 2) { msnprintf((char *)&temp[len], sizeof(temp) - len, "%c%s%c%s", CURL_NEW_ENV_VAR, varname, CURL_NEW_ENV_VALUE, varval); len += tmplen; } } } msnprintf((char *)&temp[len], sizeof(temp) - len, "%c%c", CURL_IAC, CURL_SE); len += 2; bytes_written = swrite(conn->sock[FIRSTSOCKET], temp, len); if(bytes_written < 0) { err = SOCKERRNO; failf(data,"Sending data failed (%d)",err); } printsub(data, '>', &temp[2], len-2); break; } return; }
0
[ "CWE-200", "CWE-909" ]
curl
39ce47f219b09c380b81f89fe54ac586c8db6bde
162,263,181,786,713,460,000,000,000,000,000,000,000
69
telnet: check sscanf() for correct number of matches CVE-2021-22898 Bug: https://curl.se/docs/CVE-2021-22898.html
BSONObj CommandHelpers::appendMajorityWriteConcern(const BSONObj& cmdObj) { WriteConcernOptions newWC = kMajorityWriteConcern; if (cmdObj.hasField(kWriteConcernField)) { auto wc = cmdObj.getField(kWriteConcernField); // The command has a writeConcern field and it's majority, so we can // return it as-is. if (wc["w"].ok() && wc["w"].str() == "majority") { return cmdObj; } if (wc["wtimeout"].ok()) { // They set a timeout, but aren't using majority WC. We want to use their // timeout along with majority WC. newWC = WriteConcernOptions(WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, wc["wtimeout"].Number()); } } // Append all original fields except the writeConcern field to the new command. BSONObjBuilder cmdObjWithWriteConcern; for (const auto& elem : cmdObj) { const auto name = elem.fieldNameStringData(); if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) { cmdObjWithWriteConcern.append(elem); } } // Finally, add the new write concern. cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON()); return cmdObjWithWriteConcern.obj(); }
0
[ "CWE-20" ]
mongo
722f06f3217c029ef9c50062c8cc775966fd7ead
265,854,605,724,596,630,000,000,000,000,000,000,000
33
SERVER-38275 ban find explain with UUID
Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) { uint i; my_bool error= FALSE; const TABLE_FIELD_TYPE *field_def= table_def->field; DBUG_ENTER("table_check_intact"); DBUG_PRINT("info",("table: %s expected_count: %d", table->alias.c_ptr(), table_def->count)); /* Whether the table definition has already been validated. */ if (table->s->table_field_def_cache == table_def) goto end; if (table->s->fields != table_def->count) { THD *thd= current_thd; DBUG_PRINT("info", ("Column count has changed, checking the definition")); /* previous MySQL version */ if (MYSQL_VERSION_ID > table->s->mysql_version) { report_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, ER_THD(thd, ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE), table->alias.c_ptr(), table_def->count, table->s->fields, static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID); DBUG_RETURN(TRUE); } else if (MYSQL_VERSION_ID == table->s->mysql_version) { report_error(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2, ER_THD(thd, ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2), table->s->db.str, table->s->table_name.str, table_def->count, table->s->fields); DBUG_RETURN(TRUE); } /* Something has definitely changed, but we're running an older version of MySQL with new system tables. Let's check column definitions. If a column was added at the end of the table, then we don't care much since such change is backward compatible. */ } else { StringBuffer<1024> sql_type(system_charset_info); sql_type.extra_allocation(256); // Allocate min 256 characters at once for (i=0 ; i < table_def->count; i++, field_def++) { sql_type.length(0); if (i < table->s->fields) { Field *field= table->field[i]; if (strncmp(field->field_name, field_def->name.str, field_def->name.length)) { /* Name changes are not fatal, we use ordinal numbers to access columns. Still this can be a sign of a tampered table, output an error to the error log. */ report_error(0, "Incorrect definition of table %s.%s: " "expected column '%s' at position %d, found '%s'.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field->field_name); } field->sql_type(sql_type); /* Generally, if column types don't match, then something is wrong. However, we only compare column definitions up to the length of the original definition, since we consider the following definitions compatible: 1. DATETIME and DATETIM 2. INT(11) and INT(11 3. SET('one', 'two') and SET('one', 'two', 'more') For SETs or ENUMs, if the same prefix is there it's OK to add more elements - they will get higher ordinal numbers and the new table definition is backward compatible with the original one. */ if (strncmp(sql_type.c_ptr_safe(), field_def->type.str, field_def->type.length - 1)) { report_error(0, "Incorrect definition of table %s.%s: " "expected column '%s' at position %d to have type " "%s, found type %s.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field_def->type.str, sql_type.c_ptr_safe()); error= TRUE; } else if (field_def->cset.str && !field->has_charset()) { report_error(0, "Incorrect definition of table %s.%s: " "expected the type of column '%s' at position %d " "to have character set '%s' but the type has no " "character set.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field_def->cset.str); error= TRUE; } else if (field_def->cset.str && strcmp(field->charset()->csname, field_def->cset.str)) { report_error(0, "Incorrect definition of table %s.%s: " "expected the type of column '%s' at position %d " "to have character set '%s' but found " "character set '%s'.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field_def->cset.str, field->charset()->csname); error= TRUE; } } else { report_error(0, "Incorrect definition of table %s.%s: " "expected column '%s' at position %d to have type %s " " but the column is not found.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field_def->type.str); error= TRUE; } } } if (table_def->primary_key_parts) { if (table->s->primary_key == MAX_KEY) { report_error(0, "Incorrect definition of table %s.%s: " "missing primary key.", table->s->db.str, table->alias.c_ptr()); error= TRUE; } else { KEY *pk= &table->s->key_info[table->s->primary_key]; if (pk->user_defined_key_parts != table_def->primary_key_parts) { report_error(0, "Incorrect definition of table %s.%s: " "Expected primary key to have %u columns, but instead " "found %u columns.", table->s->db.str, table->alias.c_ptr(), table_def->primary_key_parts, pk->user_defined_key_parts); error= TRUE; } else { for (i= 0; i < pk->user_defined_key_parts; ++i) { if (table_def->primary_key_columns[i] + 1 != pk->key_part[i].fieldnr) { report_error(0, "Incorrect definition of table %s.%s: Expected " "primary key part %u to refer to column %u, but " "instead found column %u.", table->s->db.str, table->alias.c_ptr(), i + 1, table_def->primary_key_columns[i] + 1, pk->key_part[i].fieldnr); error= TRUE; } } } } } if (! error) table->s->table_field_def_cache= table_def; end: if (has_keys && !error && !table->key_info) { report_error(0, "Incorrect definition of table %s.%s: " "indexes are missing", table->s->db.str, table->alias.c_ptr()); error= TRUE; } DBUG_RETURN(error); }
0
[ "CWE-416" ]
server
4681b6f2d8c82b4ec5cf115e83698251963d80d5
118,895,665,435,070,480,000,000,000,000,000,000,000
188
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob the bug was that in_vector array in Item_func_in was allocated in the statement arena, not in the table->expr_arena. revert part of the 5acd391e8b2d. Instead, change the arena correctly in fix_all_session_vcol_exprs(). Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force item tree changes to be rolled back (because they were allocated in the wrong arena and didn't persist. now they do)
static enum test_return test_binary_get_impl(const char *key, uint8_t cmd) { union { protocol_binary_request_no_extras request; protocol_binary_response_no_extras response; char bytes[1024]; } send, receive; size_t len = raw_command(send.bytes, sizeof(send.bytes), cmd, key, strlen(key), NULL, 0); safe_send(send.bytes, len, false); safe_recv_packet(receive.bytes, sizeof(receive.bytes)); validate_response_header(&receive.response, cmd, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT); len = storage_command(send.bytes, sizeof(send.bytes), PROTOCOL_BINARY_CMD_ADD, key, strlen(key), NULL, 0, 0, 0); safe_send(send.bytes, len, false); safe_recv_packet(receive.bytes, sizeof(receive.bytes)); validate_response_header(&receive.response, PROTOCOL_BINARY_CMD_ADD, PROTOCOL_BINARY_RESPONSE_SUCCESS); /* run a little pipeline test ;-) */ len = 0; int ii; for (ii = 0; ii < 10; ++ii) { union { protocol_binary_request_no_extras request; char bytes[1024]; } temp; size_t l = raw_command(temp.bytes, sizeof(temp.bytes), cmd, key, strlen(key), NULL, 0); memcpy(send.bytes + len, temp.bytes, l); len += l; } safe_send(send.bytes, len, false); for (ii = 0; ii < 10; ++ii) { safe_recv_packet(receive.bytes, sizeof(receive.bytes)); validate_response_header(&receive.response, cmd, PROTOCOL_BINARY_RESPONSE_SUCCESS); } return TEST_PASS; }
0
[ "CWE-20" ]
memcached
75cc83685e103bc8ba380a57468c8f04413033f9
316,899,017,663,320,760,000,000,000,000,000,000,000
46
Issue 102: Piping null to the server will crash it
CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type=cJSON_Array; } return item; }
0
[ "CWE-754", "CWE-787" ]
cJSON
be749d7efa7c9021da746e685bd6dec79f9dd99b
245,258,329,613,515,130,000,000,000,000,000,000,000
10
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, u8 antenna_nr) { u8 antenna_mask; if (antenna_nr == 0) { /* Zero means "use default antenna". That's always OK. */ return 0; } /* Get the mask of available antennas. */ if (dev->phy.gmode) antenna_mask = dev->dev->bus_sprom->ant_available_bg; else antenna_mask = dev->dev->bus_sprom->ant_available_a; if (!(antenna_mask & (1 << (antenna_nr - 1)))) { /* This antenna is not available. Fall back to default. */ return 0; } return antenna_nr; }
0
[ "CWE-134" ]
wireless
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
220,712,353,582,329,100,000,000,000,000,000,000,000
23
b43: stop format string leaking into error msgs The module parameter "fwpostfix" is userspace controllable, unfiltered, and is used to define the firmware filename. b43_do_request_fw() populates ctx->errors[] on error, containing the firmware filename. b43err() parses its arguments as a format string. For systems with b43 hardware, this could lead to a uid-0 to ring-0 escalation. CVE-2013-2852 Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: John W. Linville <[email protected]>
lt_dlsetsearchpath (const char *search_path) { int errors = 0; FREE (user_search_path); if (!search_path || !LT_STRLEN (search_path)) { return errors; } if (canonicalize_path (search_path, &user_search_path) != 0) ++errors; return errors; }
0
[]
libtool
e91f7b960032074a55fc91273c1917e3082b5338
26,152,265,810,069,126,000,000,000,000,000,000,000
16
Don't load module.la from current directory by default. * libltdl/ltdl.c (try_dlopen): Do not attempt to load an unqualified module.la file from the current directory (by default) since doing so is insecure and is not compliant with the documentation. * tests/testsuite.at: Qualify access to module.la file in current directory so that test passes.
static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; preempt_enable(); local_irq_enable(); while (!guest_state_valid(vcpu)) { err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); if (err == EMULATE_DO_MMIO) break; if (err != EMULATE_DONE) { kvm_report_emulation_failure(vcpu, "emulation failure"); return; } if (signal_pending(current)) break; if (need_resched()) schedule(); } local_irq_disable(); preempt_disable(); vmx->invalid_state_emulation_result = err; }
0
[ "CWE-20" ]
linux-2.6
16175a796d061833aacfbd9672235f2d2725df65
72,138,344,135,478,160,000,000,000,000,000,000,000
31
KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert <[email protected]> Cc: [email protected] Signed-off-by: Avi Kivity <[email protected]>
static int ntop_verbose_trace(lua_State* vm) { ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__); lua_pushboolean(vm, (ntop->getTrace()->get_trace_level() == MAX_TRACE_LEVEL) ? true : false); return(CONST_LUA_OK); }
0
[ "CWE-476" ]
ntopng
01f47e04fd7c8d54399c9e465f823f0017069f8f
7,600,276,731,693,976,000,000,000,000,000,000,000
6
Security fix: prevents empty host from being used
static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) { struct sctp_packet *packet; struct sctp_packet singleton; struct sctp_association *asoc = q->asoc; __u16 sport = asoc->base.bind_addr.port; __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; struct sctp_chunk *chunk, *tmp; sctp_xmit_t status; int error = 0; int start_timer = 0; int one_packet = 0; /* These transports have chunks to send. */ struct list_head transport_list; struct list_head *ltransport; INIT_LIST_HEAD(&transport_list); packet = NULL; /* * 6.10 Bundling * ... * When bundling control chunks with DATA chunks, an * endpoint MUST place control chunks first in the outbound * SCTP packet. The transmitter MUST transmit DATA chunks * within a SCTP packet in increasing order of TSN. * ... */ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { /* RFC 5061, 5.3 * F1) This means that until such time as the ASCONF * containing the add is acknowledged, the sender MUST * NOT use the new IP address as a source for ANY SCTP * packet except on carrying an ASCONF Chunk. */ if (asoc->src_out_of_asoc_ok && chunk->chunk_hdr->type != SCTP_CID_ASCONF) continue; list_del_init(&chunk->list); /* Pick the right transport to use. */ new_transport = chunk->transport; if (!new_transport) { /* * If we have a prior transport pointer, see if * the destination address of the chunk * matches the destination address of the * current transport. If not a match, then * try to look up the transport with a given * destination address. We do this because * after processing ASCONFs, we may have new * transports created. */ if (transport && sctp_cmp_addr_exact(&chunk->dest, &transport->ipaddr)) new_transport = transport; else new_transport = sctp_assoc_lookup_paddr(asoc, &chunk->dest); /* if we still don't have a new transport, then * use the current active path. */ if (!new_transport) new_transport = asoc->peer.active_path; } else if ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED) || (new_transport->state == SCTP_PF)) { /* If the chunk is Heartbeat or Heartbeat Ack, * send it to chunk->transport, even if it's * inactive. * * 3.3.6 Heartbeat Acknowledgement: * ... * A HEARTBEAT ACK is always sent to the source IP * address of the IP datagram containing the * HEARTBEAT chunk to which this ack is responding. * ... * * ASCONF_ACKs also must be sent to the source. */ if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) new_transport = asoc->peer.active_path; } /* Are we switching transports? * Take care of transport locks. */ if (new_transport != transport) { transport = new_transport; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); } switch (chunk->chunk_hdr->type) { /* * 6.10 Bundling * ... * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN * COMPLETE with any other chunks. [Send them immediately.] */ case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: sctp_packet_init(&singleton, transport, sport, dport); sctp_packet_config(&singleton, vtag, 0); sctp_packet_append_chunk(&singleton, chunk); error = sctp_packet_transmit(&singleton); if (error < 0) return error; break; case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) { packet->vtag = asoc->c.my_vtag; } /* The following chunks are "response" chunks, i.e. * they are generated in response to something we * received. If we are sending these, then we can * send only 1 packet containing these chunks. */ case SCTP_CID_HEARTBEAT_ACK: case SCTP_CID_SHUTDOWN_ACK: case SCTP_CID_COOKIE_ACK: case SCTP_CID_COOKIE_ECHO: case SCTP_CID_ERROR: case SCTP_CID_ECN_CWR: case SCTP_CID_ASCONF_ACK: one_packet = 1; /* Fall through */ case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: case SCTP_CID_SHUTDOWN: case SCTP_CID_ECN_ECNE: case SCTP_CID_ASCONF: case SCTP_CID_FWD_TSN: status = sctp_packet_transmit_chunk(packet, chunk, one_packet); if (status != SCTP_XMIT_OK) { /* put the chunk back */ list_add(&chunk->list, &q->control_chunk_list); } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { /* PR-SCTP C5) If a FORWARD TSN is sent, the * sender MUST assure that at least one T3-rtx * timer is running. */ sctp_transport_reset_timers(transport); } break; default: /* We built a chunk with an illegal type! */ BUG(); } } if (q->asoc->src_out_of_asoc_ok) goto sctp_flush_out; /* Is it OK to send data chunks? */ switch (asoc->state) { case SCTP_STATE_COOKIE_ECHOED: /* Only allow bundling when this packet has a COOKIE-ECHO * chunk. */ if (!packet || !packet->has_cookie_echo) break; /* fallthru */ case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: /* * RFC 2960 6.1 Transmission of DATA Chunks * * C) When the time comes for the sender to transmit, * before sending new DATA chunks, the sender MUST * first transmit any outstanding DATA chunks which * are marked for retransmission (limited by the * current cwnd). */ if (!list_empty(&q->retransmit)) { if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) goto sctp_flush_out; if (transport == asoc->peer.retran_path) goto retran; /* Switch transports & prepare the packet. */ transport = asoc->peer.retran_path; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); retran: error = sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer); if (start_timer) sctp_transport_reset_timers(transport); /* This can happen on COOKIE-ECHO resend. Only * one chunk can get bundled with a COOKIE-ECHO. */ if (packet->has_cookie_echo) goto sctp_flush_out; /* Don't send new data if there is still data * waiting to retransmit. */ if (!list_empty(&q->retransmit)) goto sctp_flush_out; } /* Apply Max.Burst limitation to the current transport in * case it will be used for new data. We are going to * rest it before we return, but we want to apply the limit * to the currently queued data. */ if (transport) sctp_transport_burst_limited(transport); /* Finally, transmit new packets. */ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ if (chunk->sinfo.sinfo_stream >= asoc->c.sinit_num_ostreams) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); sctp_chunk_free(chunk); continue; } /* Has this chunk expired? */ if (sctp_chunk_abandoned(chunk)) { sctp_chunk_fail(chunk, 0); sctp_chunk_free(chunk); continue; } /* If there is a specified transport, use it. * Otherwise, we want to use the active path. */ new_transport = chunk->transport; if (!new_transport || ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED) || (new_transport->state == SCTP_PF))) new_transport = asoc->peer.active_path; if (new_transport->state == SCTP_UNCONFIRMED) continue; /* Change packets if necessary. */ if (new_transport != transport) { transport = new_transport; /* Schedule to have this transport's * packet flushed. */ if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); /* We've switched transports, so apply the * Burst limit to the new transport. */ sctp_transport_burst_limited(transport); } SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", q, chunk, chunk && chunk->chunk_hdr ? sctp_cname(SCTP_ST_CHUNK( chunk->chunk_hdr->type)) : "Illegal Chunk"); SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " "%p skb->users %d.\n", ntohl(chunk->subh.data_hdr->tsn), chunk->skb ?chunk->skb->head : NULL, chunk->skb ? atomic_read(&chunk->skb->users) : -1); /* Add the chunk to the packet. */ status = sctp_packet_transmit_chunk(packet, chunk, 0); switch (status) { case SCTP_XMIT_PMTU_FULL: case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_NAGLE_DELAY: /* We could not append this chunk, so put * the chunk back on the output queue. */ SCTP_DEBUG_PRINTK("sctp_outq_flush: could " "not transmit TSN: 0x%x, status: %d\n", ntohl(chunk->subh.data_hdr->tsn), status); sctp_outq_head_data(q, chunk); goto sctp_flush_out; break; case SCTP_XMIT_OK: /* The sender is in the SHUTDOWN-PENDING state, * The sender MAY set the I-bit in the DATA * chunk header. */ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; break; default: BUG(); } /* BUG: We assume that the sctp_packet_transmit() * call below will succeed all the time and add the * chunk to the transmitted list and restart the * timers. * It is possible that the call can fail under OOM * conditions. * * Is this really a problem? Won't this behave * like a lost TSN? */ list_add_tail(&chunk->transmitted_list, &transport->transmitted); sctp_transport_reset_timers(transport); q->empty = 0; /* Only let one DATA chunk get bundled with a * COOKIE-ECHO chunk. */ if (packet->has_cookie_echo) goto sctp_flush_out; } break; default: /* Do nothing. */ break; } sctp_flush_out: /* Before returning, examine all the transports touched in * this call. Right now, we bluntly force clear all the * transports. Things might change after we implement Nagle. * But such an examination is still required. * * --xguo */ while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { struct sctp_transport *t = list_entry(ltransport, struct sctp_transport, send_ready); packet = &t->packet; if (!sctp_packet_empty(packet)) error = sctp_packet_transmit(packet); /* Clear the burst limited state, if any */ sctp_transport_burst_reset(t); } return error; }
1
[]
linux
196d67593439b03088913227093e374235596e33
28,954,651,423,028,803,000,000,000,000,000,000,000
396
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable) { int i; int end; st->scantable= src_scantable; for(i=0; i<64; i++){ int j; j = src_scantable[i]; st->permutated[i] = permutation[j]; } end=-1; for(i=0; i<64; i++){ int j; j = st->permutated[i]; if(j>end) end=j; st->raster_end[i]= end; } }
0
[ "CWE-703", "CWE-189" ]
FFmpeg
454a11a1c9c686c78aa97954306fb63453299760
176,643,515,238,679,630,000,000,000,000,000,000,000
22
avcodec/dsputil: fix signedness in sizeof() comparissions Signed-off-by: Michael Niedermayer <[email protected]>
static void setup_namespaces(struct lo_data *lo, struct fuse_session *se) { pid_t child; /* * Create a new pid namespace for *child* processes. We'll have to * fork in order to enter the new pid namespace. A new mount namespace * is also needed so that we can remount /proc for the new pid * namespace. * * Our UNIX domain sockets have been created. Now we can move to * an empty network namespace to prevent TCP/IP and other network * activity in case this process is compromised. */ if (unshare(CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWNET) != 0) { fuse_log(FUSE_LOG_ERR, "unshare(CLONE_NEWPID | CLONE_NEWNS): %m\n"); exit(1); } child = fork(); if (child < 0) { fuse_log(FUSE_LOG_ERR, "fork() failed: %m\n"); exit(1); } if (child > 0) { pid_t waited; int wstatus; setup_wait_parent_capabilities(); /* The parent waits for the child */ do { waited = waitpid(child, &wstatus, 0); } while (waited < 0 && errno == EINTR && !se->exited); /* We were terminated by a signal, see fuse_signals.c */ if (se->exited) { exit(0); } if (WIFEXITED(wstatus)) { exit(WEXITSTATUS(wstatus)); } exit(1); } /* Send us SIGTERM when the parent thread terminates, see prctl(2) */ prctl(PR_SET_PDEATHSIG, SIGTERM); /* * If the mounts have shared propagation then we want to opt out so our * mount changes don't affect the parent mount namespace. */ if (mount(NULL, "/", NULL, MS_REC | MS_SLAVE, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/, MS_REC|MS_SLAVE): %m\n"); exit(1); } /* The child must remount /proc to use the new pid namespace */ if (mount("proc", "/proc", "proc", MS_NODEV | MS_NOEXEC | MS_NOSUID | MS_RELATIME, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/proc): %m\n"); exit(1); } /* * We only need /proc/self/fd. Prevent ".." from accessing parent * directories of /proc/self/fd by bind-mounting it over /proc. Since / was * previously remounted with MS_REC | MS_SLAVE this mount change only * affects our process. */ if (mount("/proc/self/fd", "/proc", NULL, MS_BIND, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/proc/self/fd, MS_BIND): %m\n"); exit(1); } /* Get the /proc (actually /proc/self/fd, see above) file descriptor */ lo->proc_self_fd = open("/proc", O_PATH); if (lo->proc_self_fd == -1) { fuse_log(FUSE_LOG_ERR, "open(/proc, O_PATH): %m\n"); exit(1); } }
0
[]
qemu
6084633dff3a05d63176e06d7012c7e15aba15be
190,255,656,956,682,740,000,000,000,000,000,000,000
84
tools/virtiofsd: xattr name mappings: Add option Add an option to define mappings of xattr names so that the client and server filesystems see different views. This can be used to have different SELinux mappings as seen by the guest, to run the virtiofsd with less privileges (e.g. in a case where it can't set trusted/system/security xattrs but you want the guest to be able to), or to isolate multiple users of the same name; e.g. trusted attributes used by stacking overlayfs. A mapping engine is used with 3 simple rules; the rules can be combined to allow most useful mapping scenarios. The ruleset is defined by -o xattrmap='rules...'. This patch doesn't use the rule maps yet. Signed-off-by: Dr. David Alan Gilbert <[email protected]> Message-Id: <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]> Signed-off-by: Dr. David Alan Gilbert <[email protected]>
static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group) { struct br_ip br_group; if (ipv4_is_local_multicast(group)) return; br_group.u.ip4 = group; br_group.proto = htons(ETH_P_IP); br_multicast_leave_group(br, port, &br_group); }
0
[ "CWE-399" ]
linux
6b0d6a9b4296fa16a28d10d416db7a770fc03287
140,614,751,852,285,180,000,000,000,000,000,000,000
14
bridge: Fix mglist corruption that leads to memory corruption The list mp->mglist is used to indicate whether a multicast group is active on the bridge interface itself as opposed to one of the constituent interfaces in the bridge. Unfortunately the operation that adds the mp->mglist node to the list neglected to check whether it has already been added. This leads to list corruption in the form of nodes pointing to itself. Normally this would be quite obvious as it would cause an infinite loop when walking the list. However, as this list is never actually walked (which means that we don't really need it, I'll get rid of it in a subsequent patch), this instead is hidden until we perform a delete operation on the affected nodes. As the same node may now be pointed to by more than one node, the delete operations can then cause modification of freed memory. This was observed in practice to cause corruption in 512-byte slabs, most commonly leading to crashes in jbd2. Thanks to Josef Bacik for pointing me in the right direction. Reported-by: Ian Page Hands <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, void *ext, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_paramhdr *param = ext; struct sctp_chunk *abort = NULL; if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) goto discard; /* Make the abort chunk. */ abort = sctp_make_violation_paramlen(asoc, chunk, param); if (!abort) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); discard: sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); return SCTP_DISPOSITION_ABORT; nomem: return SCTP_DISPOSITION_NOMEM; }
0
[ "CWE-119" ]
linux-2.6
9fcb95a105758b81ef0131cd18e2db5149f13e95
126,512,961,201,879,580,000,000,000,000,000,000,000
35
sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID If FWD-TSN chunk is received with bad stream ID, the sctp will not do the validity check, this may cause memory overflow when overwrite the TSN of the stream ID. The FORWARD-TSN chunk is like this: FORWARD-TSN chunk Type = 192 Flags = 0 Length = 172 NewTSN = 99 Stream = 10000 StreamSequence = 0xFFFF This patch fix this problem by discard the chunk if stream ID is not less than MIS. Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; u16 queue_idx; /* First, check if there is only one queue */ if (num_queues == 1) { queue_idx = 0; } else { hash = skb_get_hash(skb); queue_idx = hash % num_queues; } return queue_idx; }
0
[]
linux
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
111,790,671,989,484,200,000,000,000,000,000,000,000
17
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() The commit referenced below moved the invocation past the "next" label, without any explanation. In fact this allows misbehaving backends undue control over the domain the frontend runs in, as earlier detected errors require the skb to not be freed (it may be retained for later processing via xennet_move_rx_slot(), or it may simply be unsafe to have it freed). This is CVE-2022-33743 / XSA-405. Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront") Signed-off-by: Jan Beulich <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir) { struct sadb_msg *hdr; struct sadb_address *addr; struct sadb_lifetime *lifetime; struct sadb_x_policy *pol; struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int i; int size; int sockaddr_size = pfkey_sockaddr_size(xp->family); int socklen = pfkey_sockaddr_len(xp->family); size = pfkey_xfrm_policy2msg_size(xp); /* call should fill header later */ hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); memset(hdr, 0, size); /* XXX do we need this ? */ /* src address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); addr->sadb_address_prefixlen = xp->selector.prefixlen_s; addr->sadb_address_reserved = 0; if (!pfkey_sockaddr_fill(&xp->selector.saddr, xp->selector.sport, (struct sockaddr *) (addr + 1), xp->family)) BUG(); /* dst address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); addr->sadb_address_prefixlen = xp->selector.prefixlen_d; addr->sadb_address_reserved = 0; pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport, (struct sockaddr *) (addr + 1), xp->family); /* hard time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.hard_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.hard_byte_limit); lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds; lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds; /* soft time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.soft_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.soft_byte_limit); lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds; lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds; /* current time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lifetime->sadb_lifetime_allocations = xp->curlft.packets; lifetime->sadb_lifetime_bytes = xp->curlft.bytes; lifetime->sadb_lifetime_addtime = xp->curlft.add_time; lifetime->sadb_lifetime_usetime = xp->curlft.use_time; pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t); pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_DISCARD; if (xp->action == XFRM_POLICY_ALLOW) { if (xp->xfrm_nr) pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; else pol->sadb_x_policy_type = IPSEC_POLICY_NONE; } pol->sadb_x_policy_dir = dir+1; pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = xp->index; pol->sadb_x_policy_priority = xp->priority; for (i=0; i<xp->xfrm_nr; i++) { const struct xfrm_tmpl *t = xp->xfrm_vec + i; struct sadb_x_ipsecrequest *rq; int req_size; int mode; req_size = sizeof(struct sadb_x_ipsecrequest); if (t->mode == XFRM_MODE_TUNNEL) { socklen = pfkey_sockaddr_len(t->encap_family); req_size += socklen * 2; } else { size -= 2*socklen; } rq = (void*)skb_put(skb, req_size); pol->sadb_x_policy_len += req_size/8; memset(rq, 0, sizeof(*rq)); rq->sadb_x_ipsecrequest_len = req_size; rq->sadb_x_ipsecrequest_proto = t->id.proto; if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0) return -EINVAL; rq->sadb_x_ipsecrequest_mode = mode; rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; if (t->reqid) rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; if (t->optional) rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; rq->sadb_x_ipsecrequest_reqid = t->reqid; if (t->mode == XFRM_MODE_TUNNEL) { u8 *sa = (void *)(rq + 1); pfkey_sockaddr_fill(&t->saddr, 0, (struct sockaddr *)sa, t->encap_family); pfkey_sockaddr_fill(&t->id.daddr, 0, (struct sockaddr *) (sa + socklen), t->encap_family); } } /* security context */ if ((xfrm_ctx = xp->security)) { int ctx_size = pfkey_xfrm_policy2sec_ctx_size(xp); sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb, ctx_size); sec_ctx->sadb_x_sec_len = ctx_size / sizeof(uint64_t); sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX; sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi; sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg; sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len; memcpy(sec_ctx + 1, xfrm_ctx->ctx_str, xfrm_ctx->ctx_len); } hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); return 0; }
0
[]
linux
096f41d3a8fcbb8dde7f71379b1ca85fe213eded
289,397,153,842,632,800,000,000,000,000,000,000,000
154
af_key: Fix sadb_x_ipsecrequest parsing The parsing of sadb_x_ipsecrequest is broken in a number of ways. First of all we're not verifying sadb_x_ipsecrequest_len. This is needed when the structure carries addresses at the end. Worse we don't even look at the length when we parse those optional addresses. The migration code had similar parsing code that's better but it also has some deficiencies. The length is overcounted first of all as it includes the header itself. It also fails to check the length before dereferencing the sa_family field. This patch fixes those problems in parse_sockaddr_pair and then uses it in parse_ipsecrequest. Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
static UserName extractUserNameFromBSON(const BSONObj& userObj) { std::string name; std::string db; Status status = bsonExtractStringField(userObj, AuthorizationManager::USER_NAME_FIELD_NAME, &name); uassertStatusOK(status); status = bsonExtractStringField(userObj, AuthorizationManager::USER_DB_FIELD_NAME, &db); uassertStatusOK(status); return UserName(name, db); }
0
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
83,891,067,801,508,870,000,000,000,000,000,000,000
10
SERVER-38984 Validate unique User ID on UserCache hit
int Field_str::store(longlong nr, bool unsigned_val) { char buff[64]; uint length; length= (uint) (field_charset->cset->longlong10_to_str)(field_charset, buff, sizeof(buff), (unsigned_val ? 10: -10), nr); return store(buff, length, field_charset); }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
225,220,348,068,259,840,000,000,000,000,000,000,000
12
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
_get_user_env(batch_job_launch_msg_t *req) { struct passwd pwd, *pwd_ptr = NULL; char pwd_buf[PW_BUF_SIZE]; char **new_env; int i; static time_t config_update = 0; static bool no_env_cache = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); no_env_cache = (sched_params && strstr(sched_params, "no_env_cache")); xfree(sched_params); config_update = conf->last_update; } for (i=0; i<req->envc; i++) { if (xstrcmp(req->environment[i], "SLURM_GET_USER_ENV=1") == 0) break; } if (i >= req->envc) return 0; /* don't need to load env */ if (slurm_getpwuid_r(req->uid, &pwd, pwd_buf, PW_BUF_SIZE, &pwd_ptr) || (pwd_ptr == NULL)) { error("%s: getpwuid_r(%u):%m", __func__, req->uid); return -1; } verbose("%s: get env for user %s here", __func__, pwd.pw_name); /* Permit up to 120 second delay before using cache file */ new_env = env_array_user_default(pwd.pw_name, 120, 0, no_env_cache); if (! new_env) { error("%s: Unable to get user's local environment%s", __func__, no_env_cache ? "" : ", running only with passed environment"); return -1; } env_array_merge(&new_env, (const char **) req->environment); env_array_free(req->environment); req->environment = new_env; req->envc = envcount(new_env); return 0; }
0
[ "CWE-20" ]
slurm
df545955e4f119974c278bff0c47155257d5afc7
122,162,760,716,756,300,000,000,000,000,000,000,000
48
Validate gid and user_name values provided to slurmd up front. Do not defer until later, and do not potentially miss out on proper validation of the user_name field which can lead to improper authentication handling. CVE-2018-10995.
s16 rssi_compensation_calc(struct ar6_softc *ar, s16 rssi) { if (ar->arBssChannel > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.a_param_a + rssi_compensation_param.a_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.bg_param_a + rssi_compensation_param.bg_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } return rssi; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
16,538,668,955,919,197,000,000,000,000,000,000,000
27
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_durable_v2_buf(oparms); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); *num_iovec = num + 1; return 0; }
0
[ "CWE-416", "CWE-200" ]
linux
6a3eb3360667170988f8a6477f6686242061488a
287,078,085,080,108,840,000,000,000,000,000,000,000
18
cifs: Fix use-after-free in SMB2_write There is a KASAN use-after-free: BUG: KASAN: use-after-free in SMB2_write+0x1342/0x1580 Read of size 8 at addr ffff8880b6a8e450 by task ln/4196 Should not release the 'req' because it will use in the trace. Fixes: eccb4422cf97 ("smb3: Add ftrace tracepoints for improved SMB3 debugging") Signed-off-by: ZhangXiaoxu <[email protected]> Signed-off-by: Steve French <[email protected]> CC: Stable <[email protected]> 4.18+ Reviewed-by: Pavel Shilovsky <[email protected]>
ParseState(PolicyParser* pp, const Keyword* w) : pp(pp), w(w) {}
0
[ "CWE-617" ]
ceph
b3118cabb8060a8cc6a01c4e8264cb18e7b1745a
156,425,447,627,573,610,000,000,000,000,000,000,000
2
rgw: Remove assertions in IAM Policy A couple of them could be triggered by user input. Signed-off-by: Adam C. Emerson <[email protected]>
parser_parse_object_initializer (parser_context_t *context_p, /**< context */ parser_pattern_flags_t flags) /**< flags */ { parser_pattern_end_marker_t end_pos = parser_pattern_get_target (context_p, flags); /* 12.14.5.2: ObjectAssignmentPattern : { } */ if (lexer_check_next_character (context_p, LIT_CHAR_RIGHT_BRACE)) { parser_emit_cbc_ext (context_p, CBC_EXT_REQUIRE_OBJECT_COERCIBLE); lexer_consume_next_character (context_p); parser_pattern_finalize (context_p, flags, &end_pos); return; } cbc_ext_opcode_t context_opcode = CBC_EXT_OBJ_INIT_CONTEXT_CREATE; if (flags & PARSER_PATTERN_HAS_REST_ELEMENT) { context_opcode = CBC_EXT_OBJ_INIT_REST_CONTEXT_CREATE; } parser_emit_cbc_ext (context_p, context_opcode); while (true) { lexer_expect_object_literal_id (context_p, LEXER_OBJ_IDENT_OBJECT_PATTERN); uint16_t prop_index = context_p->lit_object.index; parser_line_counter_t start_line = context_p->token.line; parser_line_counter_t start_column = context_p->token.column; uint16_t push_prop_opcode = CBC_EXT_INITIALIZER_PUSH_PROP_LITERAL; if (context_p->token.type == LEXER_RIGHT_BRACE) { break; } if (context_p->token.type == LEXER_THREE_DOTS) { lexer_next_token (context_p); flags |= PARSER_PATTERN_REST_ELEMENT; if (parser_pattern_process_assignment (context_p, flags, CBC_EXT_OBJ_INIT_PUSH_REST, PARSER_PATTERN_RHS_NO_LIT, LEXER_RIGHT_BRACE)) { parser_raise_error (context_p, PARSER_ERR_INVALID_LHS_ASSIGNMENT); } if (context_p->token.type != LEXER_RIGHT_BRACE) { parser_raise_error (context_p, PARSER_ERR_RIGHT_BRACE_EXPECTED); } /* Checked at the end because there might be syntax errors before. */ JERRY_ASSERT (flags & PARSER_PATTERN_HAS_REST_ELEMENT); break; } if (context_p->token.type == LEXER_RIGHT_SQUARE) { prop_index = PARSER_PATTERN_RHS_NO_LIT; push_prop_opcode = ((flags & PARSER_PATTERN_HAS_REST_ELEMENT) ? CBC_EXT_INITIALIZER_PUSH_NAME : CBC_EXT_INITIALIZER_PUSH_PROP); } else if (flags & PARSER_PATTERN_HAS_REST_ELEMENT) { push_prop_opcode = CBC_EXT_INITIALIZER_PUSH_NAME_LITERAL; } if (context_p->next_scanner_info_p->source_p == context_p->source_p) { JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_ERR_REDECLARED); parser_raise_error (context_p, PARSER_ERR_VARIABLE_REDECLARED); } lexer_next_token (context_p); if (context_p->token.type == LEXER_COLON) { lexer_next_token (context_p); parser_pattern_process_assignment (context_p, flags, push_prop_opcode, prop_index, LEXER_RIGHT_BRACE); } else { if (push_prop_opcode == CBC_EXT_INITIALIZER_PUSH_NAME || push_prop_opcode == CBC_EXT_INITIALIZER_PUSH_PROP) { parser_raise_error (context_p, PARSER_ERR_COLON_EXPECTED); } if (context_p->token.type != LEXER_RIGHT_BRACE && context_p->token.type != LEXER_ASSIGN && context_p->token.type != LEXER_COMMA) { parser_raise_error (context_p, PARSER_ERR_OBJECT_ITEM_SEPARATOR_EXPECTED); } parser_reparse_as_common_identifier (context_p, start_line, start_column); if (flags & PARSER_PATTERN_ARGUMENTS) { if (context_p->lit_object.literal_p->status_flags & LEXER_FLAG_FUNCTION_ARGUMENT) { parser_raise_error (context_p, PARSER_ERR_VARIABLE_REDECLARED); } context_p->lit_object.literal_p->status_flags |= LEXER_FLAG_FUNCTION_ARGUMENT; } #if JERRY_MODULE_SYSTEM parser_module_append_export_name (context_p); #endif /* JERRY_MODULE_SYSTEM */ parser_emit_cbc_literal_from_token (context_p, CBC_PUSH_LITERAL); lexer_next_token (context_p); JERRY_ASSERT (context_p->token.type == LEXER_RIGHT_BRACE || context_p->token.type == LEXER_ASSIGN || context_p->token.type == LEXER_COMMA); parser_pattern_form_assignment (context_p, flags, push_prop_opcode, prop_index, start_line); #if JERRY_LINE_INFO parser_line_info_append (context_p, start_line, start_column); #endif /* JERRY_LINE_INFO */ } if (context_p->token.type == LEXER_RIGHT_BRACE) { break; } else if (context_p->token.type != LEXER_COMMA) { parser_raise_error (context_p, PARSER_ERR_OBJECT_ITEM_SEPARATOR_EXPECTED); } } if (flags & PARSER_PATTERN_HAS_REST_ELEMENT) { PARSER_MINUS_EQUAL_U16 (context_p->stack_depth, (PARSER_OBJ_INIT_REST_CONTEXT_STACK_ALLOCATION - PARSER_OBJ_INIT_CONTEXT_STACK_ALLOCATION)); } parser_emit_cbc_ext (context_p, CBC_EXT_OBJ_INIT_CONTEXT_END); parser_pattern_finalize (context_p, flags, &end_pos); } /* parser_parse_object_initializer */
0
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
254,709,296,871,445,630,000,000,000,000,000,000,000
149
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
size_t jsuGetFreeStack() { #ifdef ARM void *frame = __builtin_frame_address(0); size_t stackPos = (size_t)((char*)frame); size_t stackEnd = (size_t)((char*)&LINKER_END_VAR); if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow! return stackPos - stackEnd; #elif defined(LINUX) // On linux, we set STACK_BASE from `main`. char ptr; // this is on the stack extern void *STACK_BASE; uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr); const uint32_t max_stack = 1000000; // give it 1 megabyte of stack if (count>max_stack) return 0; return max_stack - count; #else // stack depth seems pretty platform-specific :( Default to a value that disables it return 1000000; // no stack depth check on this platform #endif }
0
[ "CWE-119", "CWE-787" ]
Espruino
0a7619875bf79877907205f6bee08465b89ff10b
86,633,633,545,772,160,000,000,000,000,000,000,000
20
Fix strncat/cpy bounding issues (fix #1425)
alloc_compspace(isc_httpd_t *httpd, unsigned int size) { char *newspace = NULL; isc_region_t r; isc_buffer_region(&httpd->compbuffer, &r); if (size < r.length) { return; } newspace = isc_mem_get(httpd->mgr->mctx, size); isc_buffer_reinit(&httpd->compbuffer, newspace, size); if (r.base != NULL) { isc_mem_put(httpd->mgr->mctx, r.base, r.length); } }
0
[]
bind9
d4c5d1c650ae0e97a083b0ce7a705c20fc001f07
270,709,720,787,245,800,000,000,000,000,000,000,000
16
Fix statistics channel multiple request processing with non-empty bodies When the HTTP request has a body part after the HTTP headers, it is not getting processed and is being prepended to the next request's data, which results in an error when trying to parse it. Improve the httpd.c:process_request() function with the following additions: 1. Require that HTTP POST requests must have Content-Length header. 2. When Content-Length header is set, extract its value, and make sure that it is valid and that the whole request's body is received before processing the request. 3. Discard the request's body by consuming Content-Length worth of data in the buffer. (cherry picked from commit c2bbdc8a648c9630b2c9cea5227ad5c309c2ade5)
maybe_send_secure(dns_zone_t *zone) { isc_result_t result; /* * We've finished loading, or else failed to load, an inline-signing * 'secure' zone. We now need information about the status of the * 'raw' zone. If we failed to load, then we need it to send a * copy of its database; if we succeeded, we need it to send its * serial number so that we can sync with it. If it has not yet * loaded, we set a flag so that it will send the necessary * information when it has finished loading. */ if (zone->raw->db != NULL) { if (zone->db != NULL) { uint32_t serial; unsigned int soacount; result = zone_get_from_db(zone->raw, zone->raw->db, NULL, &soacount, &serial, NULL, NULL, NULL, NULL, NULL); if (result == ISC_R_SUCCESS && soacount > 0U) zone_send_secureserial(zone->raw, serial); } else zone_send_securedb(zone->raw, zone->raw->db); } else DNS_ZONE_SETFLAG(zone->raw, DNS_ZONEFLG_SENDSECURE); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
325,477,012,273,263,600,000,000,000,000,000,000,000
28
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
static int reg_event_syscall_enter(struct ftrace_event_file *file, struct ftrace_event_call *call) { struct trace_array *tr = file->tr; int ret = 0; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) return -ENOSYS; mutex_lock(&syscall_trace_lock); if (!tr->sys_refcount_enter) ret = register_trace_sys_enter(ftrace_syscall_enter, tr); if (!ret) { rcu_assign_pointer(tr->enter_syscall_files[num], file); tr->sys_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; }
0
[ "CWE-125", "CWE-476", "CWE-119", "CWE-264" ]
linux
086ba77a6db00ed858ff07451bedee197df868c9
133,683,009,287,343,340,000,000,000,000,000,000,000
20
tracing/syscalls: Ignore numbers outside NR_syscalls' range ARM has some private syscalls (for example, set_tls(2)) which lie outside the range of NR_syscalls. If any of these are called while syscall tracing is being performed, out-of-bounds array access will occur in the ftrace and perf sys_{enter,exit} handlers. # trace-cmd record -e raw_syscalls:* true && trace-cmd report ... true-653 [000] 384.675777: sys_enter: NR 192 (0, 1000, 3, 4000022, ffffffff, 0) true-653 [000] 384.675812: sys_exit: NR 192 = 1995915264 true-653 [000] 384.675971: sys_enter: NR 983045 (76f74480, 76f74000, 76f74b28, 76f74480, 76f76f74, 1) true-653 [000] 384.675988: sys_exit: NR 983045 = 0 ... # trace-cmd record -e syscalls:* true [ 17.289329] Unable to handle kernel paging request at virtual address aaaaaace [ 17.289590] pgd = 9e71c000 [ 17.289696] [aaaaaace] *pgd=00000000 [ 17.289985] Internal error: Oops: 5 [#1] PREEMPT SMP ARM [ 17.290169] Modules linked in: [ 17.290391] CPU: 0 PID: 704 Comm: true Not tainted 3.18.0-rc2+ #21 [ 17.290585] task: 9f4dab00 ti: 9e710000 task.ti: 9e710000 [ 17.290747] PC is at ftrace_syscall_enter+0x48/0x1f8 [ 17.290866] LR is at syscall_trace_enter+0x124/0x184 Fix this by ignoring out-of-NR_syscalls-bounds syscall numbers. Commit cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls" added the check for less than zero, but it should have also checked for greater than NR_syscalls. Link: http://lkml.kernel.org/p/[email protected] Fixes: cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls" Cc: [email protected] # 2.6.33+ Signed-off-by: Rabin Vincent <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
static void cirrus_update_bank_ptr(CirrusVGAState * s, unsigned bank_index) { unsigned offset; unsigned limit; if ((s->vga.gr[0x0b] & 0x01) != 0) /* dual bank */ offset = s->vga.gr[0x09 + bank_index]; else /* single bank */ offset = s->vga.gr[0x09]; if ((s->vga.gr[0x0b] & 0x20) != 0) offset <<= 14; else offset <<= 12; if (s->real_vram_size <= offset) limit = 0; else limit = s->real_vram_size - offset; if (((s->vga.gr[0x0b] & 0x01) == 0) && (bank_index != 0)) { if (limit > 0x8000) { offset += 0x8000; limit -= 0x8000; } else { limit = 0; } } if (limit > 0) { s->cirrus_bank_base[bank_index] = offset; s->cirrus_bank_limit[bank_index] = limit; } else { s->cirrus_bank_base[bank_index] = 0; s->cirrus_bank_limit[bank_index] = 0; } }
0
[ "CWE-119" ]
qemu
026aeffcb4752054830ba203020ed6eb05bcaba8
114,277,140,663,719,480,000,000,000,000,000,000,000
37
cirrus: stop passing around dst pointers in the blitter Instead pass around the address (aka offset into vga memory). Calculate the pointer in the rop_* functions, after applying the mask to the address, to make sure the address stays within the valid range. Signed-off-by: Gerd Hoffmann <[email protected]> Message-id: [email protected]
static int iowarrior_open(struct inode *inode, struct file *file) { struct iowarrior *dev = NULL; struct usb_interface *interface; int subminor; int retval = 0; mutex_lock(&iowarrior_mutex); subminor = iminor(inode); interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { mutex_unlock(&iowarrior_mutex); printk(KERN_ERR "%s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } mutex_lock(&iowarrior_open_disc_lock); dev = usb_get_intfdata(interface); if (!dev) { mutex_unlock(&iowarrior_open_disc_lock); mutex_unlock(&iowarrior_mutex); return -ENODEV; } mutex_lock(&dev->mutex); mutex_unlock(&iowarrior_open_disc_lock); /* Only one process can open each device, no sharing. */ if (dev->opened) { retval = -EBUSY; goto out; } /* setup interrupt handler for receiving values */ if ((retval = usb_submit_urb(dev->int_in_urb, GFP_KERNEL)) < 0) { dev_err(&interface->dev, "Error %d while submitting URB\n", retval); retval = -EFAULT; goto out; } /* increment our usage count for the driver */ ++dev->opened; /* save our object in the file's private structure */ file->private_data = dev; retval = 0; out: mutex_unlock(&dev->mutex); mutex_unlock(&iowarrior_mutex); return retval; }
0
[ "CWE-703" ]
linux
4ec0ef3a82125efc36173062a50624550a900ae0
53,843,067,312,119,430,000,000,000,000,000,000,000
52
USB: iowarrior: fix oops with malicious USB descriptors The iowarrior driver expects at least one valid endpoint. If given malicious descriptors that specify 0 for the number of endpoints, it will crash in the probe function. Ensure there is at least one endpoint on the interface before using it. The full report of this issue can be found here: http://seclists.org/bugtraq/2016/Mar/87 Reported-by: Ralf Spenneberg <[email protected]> Cc: stable <[email protected]> Signed-off-by: Josh Boyer <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
template<typename t> CImg<T>& operator+=(const t value) { if (is_empty()) return *this; cimg_openmp_for(*this,*ptr + value,524288); return *this;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
162,058,484,971,064,710,000,000,000,000,000,000,000
5
.
static int mpage_da_map_blocks(struct mpage_da_data *mpd) { int err, blks, get_blocks_flags; struct buffer_head new; sector_t next = mpd->b_blocknr; unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; loff_t disksize = EXT4_I(mpd->inode)->i_disksize; handle_t *handle = NULL; /* * We consider only non-mapped and non-allocated blocks */ if ((mpd->b_state & (1 << BH_Mapped)) && !(mpd->b_state & (1 << BH_Delay)) && !(mpd->b_state & (1 << BH_Unwritten))) return 0; /* * If we didn't accumulate anything to write simply return */ if (!mpd->b_size) return 0; handle = ext4_journal_current_handle(); BUG_ON(!handle); /* * Call ext4_get_blocks() to allocate any delayed allocation * blocks, or to convert an uninitialized extent to be * initialized (in the case where we have written into * one or more preallocated blocks). * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to * indicate that we are on the delayed allocation path. This * affects functions in many different parts of the allocation * call path. This flag exists primarily because we don't * want to change *many* call functions, so ext4_get_blocks() * will set the magic i_delalloc_reserved_flag once the * inode's allocation semaphore is taken. * * If the blocks in questions were delalloc blocks, set * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting * variables are updated after the blocks have been allocated. */ new.b_state = 0; get_blocks_flags = EXT4_GET_BLOCKS_CREATE; if (mpd->b_state & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, &new, get_blocks_flags); if (blks < 0) { err = blks; /* * If get block returns with error we simply * return. Later writepage will redirty the page and * writepages will find the dirty page again */ if (err == -EAGAIN) return 0; if (err == -ENOSPC && ext4_count_free_blocks(mpd->inode->i_sb)) { mpd->retval = err; return 0; } /* * get block failure will cause us to loop in * writepages, because a_ops->writepage won't be able * to make progress. The page will be redirtied by * writepage and writepages will again try to write * the same. */ ext4_msg(mpd->inode->i_sb, KERN_CRIT, "delayed block allocation failed for inode %lu at " "logical offset %llu with max blocks %zd with " "error %d\n", mpd->inode->i_ino, (unsigned long long) next, mpd->b_size >> mpd->inode->i_blkbits, err); printk(KERN_CRIT "This should not happen!! " "Data will be lost\n"); if (err == -ENOSPC) { ext4_print_free_blocks(mpd->inode); } /* invalidate all the pages */ ext4_da_block_invalidatepages(mpd, next, mpd->b_size >> mpd->inode->i_blkbits); return err; } BUG_ON(blks == 0); new.b_size = (blks << mpd->inode->i_blkbits); if (buffer_new(&new)) __unmap_underlying_blocks(mpd->inode, &new); /* * If blocks are delayed marked, we need to * put actual blocknr and drop delayed bit */ if ((mpd->b_state & (1 << BH_Delay)) || (mpd->b_state & (1 << BH_Unwritten))) mpage_put_bnr_to_bhs(mpd, next, &new); if (ext4_should_order_data(mpd->inode)) { err = ext4_jbd2_file_inode(handle, mpd->inode); if (err) return err; } /* * Update on-disk size along with block allocation. */ disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; if (disksize > i_size_read(mpd->inode)) disksize = i_size_read(mpd->inode); if (disksize > EXT4_I(mpd->inode)->i_disksize) { ext4_update_i_disksize(mpd->inode, disksize); return ext4_mark_inode_dirty(handle, mpd->inode); } return 0; }
1
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
87,840,566,716,022,190,000,000,000,000,000,000,000
124
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv) { /* If template ops are set, no work to do for us. */ if (!tmplt_ops) return; tmplt_ops->tmplt_destroy(tmplt_priv); module_put(tmplt_ops->owner); }
0
[ "CWE-416" ]
linux
04c2a47ffb13c29778e2a14e414ad4cb5a5db4b5
158,537,608,382,966,400,000,000,000,000,000,000,000
10
net: sched: fix use-after-free in tc_new_tfilter() Whenever tc_new_tfilter() jumps back to replay: label, we need to make sure @q and @chain local variables are cleared again, or risk use-after-free as in [1] For consistency, apply the same fix in tc_ctl_chain() BUG: KASAN: use-after-free in mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 Write of size 8 at addr ffff8880985c4b08 by task syz-executor.4/1945 CPU: 0 PID: 1945 Comm: syz-executor.4 Not tainted 5.17.0-rc1-syzkaller-00495-gff58831fa02d #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0x8d/0x336 mm/kasan/report.c:255 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:459 mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 tcf_chain_head_change_item net/sched/cls_api.c:372 [inline] tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:386 tcf_chain_tp_insert net/sched/cls_api.c:1657 [inline] tcf_chain_tp_insert_unique net/sched/cls_api.c:1707 [inline] tc_new_tfilter+0x1e67/0x2350 net/sched/cls_api.c:2086 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f2647172059 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f2645aa5168 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00007f2647285100 RCX: 00007f2647172059 RDX: 040000000000009f RSI: 00000000200002c0 RDI: 0000000000000006 RBP: 00007f26471cc08d R08: 0000000000000000 R09: 0000000000000000 R10: 9e00000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007fffb3f7f02f R14: 00007f2645aa5300 R15: 0000000000022000 </TASK> Allocated by task 1944: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:45 [inline] set_alloc_info mm/kasan/common.c:436 [inline] ____kasan_kmalloc mm/kasan/common.c:515 [inline] ____kasan_kmalloc mm/kasan/common.c:474 [inline] __kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:524 kmalloc_node include/linux/slab.h:604 [inline] kzalloc_node include/linux/slab.h:726 [inline] qdisc_alloc+0xac/0xa10 net/sched/sch_generic.c:941 qdisc_create.constprop.0+0xce/0x10f0 net/sched/sch_api.c:1211 tc_modify_qdisc+0x4c5/0x1980 net/sched/sch_api.c:1660 rtnetlink_rcv_msg+0x413/0xb80 net/core/rtnetlink.c:5592 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Freed by task 3609: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track+0x21/0x30 mm/kasan/common.c:45 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free+0x130/0x160 mm/kasan/common.c:328 kasan_slab_free include/linux/kasan.h:236 [inline] slab_free_hook mm/slub.c:1728 [inline] slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1754 slab_free mm/slub.c:3509 [inline] kfree+0xcb/0x280 mm/slub.c:4562 rcu_do_batch kernel/rcu/tree.c:2527 [inline] rcu_core+0x7b8/0x1540 kernel/rcu/tree.c:2778 __do_softirq+0x29b/0x9c2 kernel/softirq.c:558 Last potentially related work creation: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 __kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348 __call_rcu kernel/rcu/tree.c:3026 [inline] call_rcu+0xb1/0x740 kernel/rcu/tree.c:3106 qdisc_put_unlocked+0x6f/0x90 net/sched/sch_generic.c:1109 tcf_block_release+0x86/0x90 net/sched/cls_api.c:1238 tc_new_tfilter+0xc0d/0x2350 net/sched/cls_api.c:2148 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff8880985c4800 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 776 bytes inside of 1024-byte region [ffff8880985c4800, ffff8880985c4c00) The buggy address belongs to the page: page:ffffea0002617000 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x985c0 head:ffffea0002617000 order:3 compound_mapcount:0 compound_pincount:0 flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff) raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888010c41dc0 raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Unmovable, gfp_mask 0x1d20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC|__GFP_HARDWALL), pid 1941, ts 1038999441284, free_ts 1033444432829 prep_new_page mm/page_alloc.c:2434 [inline] get_page_from_freelist+0xa72/0x2f50 mm/page_alloc.c:4165 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5389 alloc_pages+0x1aa/0x310 mm/mempolicy.c:2271 alloc_slab_page mm/slub.c:1799 [inline] allocate_slab mm/slub.c:1944 [inline] new_slab+0x28a/0x3b0 mm/slub.c:2004 ___slab_alloc+0x87c/0xe90 mm/slub.c:3018 __slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3105 slab_alloc_node mm/slub.c:3196 [inline] slab_alloc mm/slub.c:3238 [inline] __kmalloc+0x2fb/0x340 mm/slub.c:4420 kmalloc include/linux/slab.h:586 [inline] kzalloc include/linux/slab.h:715 [inline] __register_sysctl_table+0x112/0x1090 fs/proc/proc_sysctl.c:1335 neigh_sysctl_register+0x2c8/0x5e0 net/core/neighbour.c:3787 devinet_sysctl_register+0xb1/0x230 net/ipv4/devinet.c:2618 inetdev_init+0x286/0x580 net/ipv4/devinet.c:278 inetdev_event+0xa8a/0x15d0 net/ipv4/devinet.c:1532 notifier_call_chain+0xb5/0x200 kernel/notifier.c:84 call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:1919 call_netdevice_notifiers_extack net/core/dev.c:1931 [inline] call_netdevice_notifiers net/core/dev.c:1945 [inline] register_netdevice+0x1073/0x1500 net/core/dev.c:9698 veth_newlink+0x59c/0xa90 drivers/net/veth.c:1722 page last free stack trace: reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1352 [inline] free_pcp_prepare+0x374/0x870 mm/page_alloc.c:1404 free_unref_page_prepare mm/page_alloc.c:3325 [inline] free_unref_page+0x19/0x690 mm/page_alloc.c:3404 release_pages+0x748/0x1220 mm/swap.c:956 tlb_batch_pages_flush mm/mmu_gather.c:50 [inline] tlb_flush_mmu_free mm/mmu_gather.c:243 [inline] tlb_flush_mmu+0xe9/0x6b0 mm/mmu_gather.c:250 zap_pte_range mm/memory.c:1441 [inline] zap_pmd_range mm/memory.c:1490 [inline] zap_pud_range mm/memory.c:1519 [inline] zap_p4d_range mm/memory.c:1540 [inline] unmap_page_range+0x1d1d/0x2a30 mm/memory.c:1561 unmap_single_vma+0x198/0x310 mm/memory.c:1606 unmap_vmas+0x16b/0x2f0 mm/memory.c:1638 exit_mmap+0x201/0x670 mm/mmap.c:3178 __mmput+0x122/0x4b0 kernel/fork.c:1114 mmput+0x56/0x60 kernel/fork.c:1135 exit_mm kernel/exit.c:507 [inline] do_exit+0xa3c/0x2a30 kernel/exit.c:793 do_group_exit+0xd2/0x2f0 kernel/exit.c:935 __do_sys_exit_group kernel/exit.c:946 [inline] __se_sys_exit_group kernel/exit.c:944 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:944 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Memory state around the buggy address: ffff8880985c4a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8880985c4b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8880985c4b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc Fixes: 470502de5bdb ("net: sched: unlock rules update API") Signed-off-by: Eric Dumazet <[email protected]> Cc: Vlad Buslov <[email protected]> Cc: Jiri Pirko <[email protected]> Cc: Cong Wang <[email protected]> Reported-by: syzbot <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
static int __init pep_register(void) { return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto); }
0
[ "CWE-200" ]
net
bcd0f93353326954817a4f9fa55ec57fb38acbb0
316,291,219,715,020,100,000,000,000,000,000,000,000
4
phonet: refcount leak in pep_sock_accep sock_hold(sk) is invoked in pep_sock_accept(), but __sock_put(sk) is not invoked in subsequent failure branches(pep_accept_conn() != 0). Signed-off-by: Hangyu Hua <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
static int StreamTcpTest03 (void) { Packet *p = SCMalloc(SIZE_OF_PACKET); if (unlikely(p == NULL)) return 0; Flow f; ThreadVars tv; StreamTcpThread stt; TCPHdr tcph; memset(p, 0, SIZE_OF_PACKET); PacketQueue pq; memset(&pq,0,sizeof(PacketQueue)); memset (&f, 0, sizeof(Flow)); memset(&tv, 0, sizeof (ThreadVars)); memset(&stt, 0, sizeof (StreamTcpThread)); memset(&tcph, 0, sizeof (TCPHdr)); FLOW_INITIALIZE(&f); p->flow = &f; StreamTcpUTInit(&stt.ra_ctx); tcph.th_win = htons(5480); tcph.th_seq = htonl(10); tcph.th_ack = htonl(20); tcph.th_flags = TH_SYN|TH_ACK; p->tcph = &tcph; int ret = 0; if (StreamTcpPacket(&tv, p, &stt, &pq) == -1) goto end; p->tcph->th_seq = htonl(20); p->tcph->th_ack = htonl(11); p->tcph->th_flags = TH_ACK; p->flowflags = FLOW_PKT_TOSERVER; if (StreamTcpPacket(&tv, p, &stt, &pq) == -1) goto end; p->tcph->th_seq = htonl(19); p->tcph->th_ack = htonl(11); p->tcph->th_flags = TH_ACK|TH_PUSH; p->flowflags = FLOW_PKT_TOSERVER; if (StreamTcpPacket(&tv, p, &stt, &pq) == -1) goto end; if (stream_config.midstream != TRUE) { ret = 1; goto end; } if (((TcpSession *)(p->flow->protoctx))->state != TCP_ESTABLISHED) goto end; if (((TcpSession *)(p->flow->protoctx))->client.next_seq != 20 && ((TcpSession *)(p->flow->protoctx))->server.next_seq != 11) goto end; StreamTcpSessionClear(p->flow->protoctx); ret = 1; end: SCFree(p); FLOW_DESTROY(&f); StreamTcpUTDeinit(stt.ra_ctx); return ret; }
0
[]
suricata
843d0b7a10bb45627f94764a6c5d468a24143345
71,265,576,554,856,060,000,000,000,000,000,000,000
67
stream: support RST getting lost/ignored In case of a valid RST on a SYN, the state is switched to 'TCP_CLOSED'. However, the target of the RST may not have received it, or may not have accepted it. Also, the RST may have been injected, so the supposed sender may not actually be aware of the RST that was sent in it's name. In this case the previous behavior was to switch the state to CLOSED and accept no further TCP updates or stream reassembly. This patch changes this. It still switches the state to CLOSED, as this is by far the most likely to be correct. However, it will reconsider the state if the receiver continues to talk. To do this on each state change the previous state will be recorded in TcpSession::pstate. If a non-RST packet is received after a RST, this TcpSession::pstate is used to try to continue the conversation. If the (supposed) sender of the RST is also continueing the conversation as normal, it's highly likely it didn't send the RST. In this case a stream event is generated. Ticket: #2501 Reported-By: Kirill Shipulin
R_API DsoJsonObj *r_bin_java_get_field_json_definition(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_fm_type_definition_json (bin, fm_type, 0); }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
43,071,057,170,258,710,000,000,000,000,000,000,000
3
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
yang_read_enum(struct ly_ctx *ctx, struct yang_type *typ, struct lys_type_enum *enm, char *value) { int i, j; typ->base = LY_TYPE_ENUM; if (!value[0]) { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "enum name"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Enum name must not be empty."); free(value); goto error; } enm->name = lydict_insert_zc(ctx, value); /* the assigned name MUST NOT have any leading or trailing whitespace characters */ if (isspace(enm->name[0]) || isspace(enm->name[strlen(enm->name) - 1])) { LOGVAL(ctx, LYE_ENUM_WS, LY_VLOG_NONE, NULL, enm->name); goto error; } j = typ->type->info.enums.count - 1; /* check the name uniqueness */ for (i = 0; i < j; i++) { if (ly_strequal(typ->type->info.enums.enm[i].name, enm->name, 1)) { LOGVAL(ctx, LYE_ENUM_DUPNAME, LY_VLOG_NONE, NULL, enm->name); goto error; } } return EXIT_SUCCESS; error: return EXIT_FAILURE; }
0
[ "CWE-415" ]
libyang
d9feacc4a590d35dbc1af21caf9080008b4450ed
13,946,486,884,294,074,000,000,000,000,000,000,000
34
yang parser BUGFIX double free Fixes #742
virDomainDefMaybeAddInput(virDomainDefPtr def, int type, int bus) { size_t i; virDomainInputDefPtr input; for (i = 0; i < def->ninputs; i++) { if (def->inputs[i]->type == type && def->inputs[i]->bus == bus) return 0; } if (VIR_ALLOC(input) < 0) return -1; input->type = type; input->bus = bus; if (VIR_APPEND_ELEMENT(def->inputs, def->ninputs, input) < 0) { VIR_FREE(input); return -1; } return 0; }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
11,508,037,350,408,305,000,000,000,000,000,000,000
26
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
WasmResult Context::getDownstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_downstream_data_buffer_) return WasmResult::NotFound; if (network_downstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_downstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
159,055,531,158,535,100,000,000,000,000,000,000,000
11
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
void bdrv_ref(BlockDriverState *bs) { bs->refcnt++; }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
276,778,069,617,096,750,000,000,000,000,000,000,000
4
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
inline uint8_t* WireFormatLite::WriteUInt32NoTagToArray( const RepeatedField<uint32_t>& value, uint8_t* target) { return WritePrimitiveNoTagToArray(value, WriteUInt32NoTagToArray, target); }
0
[ "CWE-703" ]
protobuf
d1635e1496f51e0d5653d856211e8821bc47adc4
150,959,669,774,310,880,000,000,000,000,000,000,000
4
Apply patch
HTTPSession::onEgressMessageFinished(HTTPTransaction* txn, bool withRST) { // If the semantics of the protocol don't permit more messages // to be read or sent on this connection, close the socket in one or // more directions. CHECK(!transactions_.empty()); if (infoCallback_) { infoCallback_->onRequestEnd(*this, txn->getMaxDeferredSize()); } auto oldStreamCount = getPipelineStreamCount(); decrementTransactionCount(txn, false, true); if (withRST || ((!codec_->isReusable() || readsShutdown()) && transactions_.size() == 1)) { // We should shutdown reads if we are closing with RST or we aren't // interested in any further messages (ie if we are a downstream session). // Upgraded sessions have independent ingress and egress, and the reads // need not be shutdown on egress finish. if (withRST) { // Let any queued writes complete, but send a RST when done. VLOG(4) << *this << " resetting egress after this message"; resetAfterDrainingWrites_ = true; setCloseReason(ConnectionCloseReason::TRANSACTION_ABORT); shutdownTransport(true, true); } else { // the reason is already set (either not reusable or readshutdown). // Defer normal shutdowns until the end of the loop. This // handles an edge case with direct responses with Connection: // close served before ingress EOM. The remainder of the ingress // message may be in the parse loop, so give it a chance to // finish out and avoid a kErrorEOF // we can get here during shutdown, in that case do not schedule a // shutdown callback again if (!shutdownTransportCb_) { // Just for safety, the following bumps the refcount on this session // to keep it live until the loopCb runs shutdownTransportCb_.reset(new ShutdownTransportCallback(this)); sock_->getEventBase()->runInLoop(shutdownTransportCb_.get(), true); } } } else { maybeResumePausedPipelinedTransaction(oldStreamCount, txn->getSequenceNumber()); } }
0
[ "CWE-20" ]
proxygen
0600ebe59c3e82cd012def77ca9ca1918da74a71
167,973,455,483,119,480,000,000,000,000,000,000,000
46
Check that a secondary auth manager is set before dereferencing. Summary: CVE-2018-6343 Reviewed By: mingtaoy Differential Revision: D12994423 fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
bool open_and_set_current(const char *name) { FILE *opened= fopen(name, "rb"); if (!opened) return false; cur_file++; cur_file->file= opened; cur_file->file_name= my_strdup(name, MYF(MY_FAE)); cur_file->lineno=1; return true; }
0
[]
server
01b39b7b0730102b88d8ea43ec719a75e9316a1e
284,848,898,834,695,440,000,000,000,000,000,000,000
13
mysqltest: don't eat new lines in --exec pass them through as is
static MagickBooleanType IsDCM(const unsigned char *magick,const size_t length) { if (length < 132) return(MagickFalse); if (LocaleNCompare((char *) (magick+128),"DICM",4) == 0) return(MagickTrue); return(MagickFalse); }
0
[ "CWE-20", "CWE-476", "CWE-369" ]
ImageMagick
5511ef530576ed18fd636baa3bb4eda3d667665d
132,970,711,497,427,090,000,000,000,000,000,000,000
8
Add additional checks to DCM reader to prevent data-driven faults (bug report from Hanno Böck
static int if6_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &if6_seq_ops, sizeof(struct if6_iter_state)); }
0
[]
net
4b08a8f1bd8cb4541c93ec170027b4d0782dab52
52,106,196,926,129,970,000,000,000,000,000,000,000
5
ipv6: remove max_addresses check from ipv6_create_tempaddr Because of the max_addresses check attackers were able to disable privacy extensions on an interface by creating enough autoconfigured addresses: <http://seclists.org/oss-sec/2012/q4/292> But the check is not actually needed: max_addresses protects the kernel to install too many ipv6 addresses on an interface and guards addrconf_prefix_rcv to install further addresses as soon as this limit is reached. We only generate temporary addresses in direct response of a new address showing up. As soon as we filled up the maximum number of addresses of an interface, we stop installing more addresses and thus also stop generating more temp addresses. Even if the attacker tries to generate a lot of temporary addresses by announcing a prefix and removing it again (lifetime == 0) we won't install more temp addresses, because the temporary addresses do count to the maximum number of addresses, thus we would stop installing new autoconfigured addresses when the limit is reached. This patch fixes CVE-2013-0343 (but other layer-2 attacks are still possible). Thanks to Ding Tianhong to bring this topic up again. Cc: Ding Tianhong <[email protected]> Cc: George Kargiotakis <[email protected]> Cc: P J P <[email protected]> Cc: YOSHIFUJI Hideaki <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Acked-by: Ding Tianhong <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static CURLcode parse_url_login(struct SessionHandle *data, struct connectdata *conn, char **user, char **passwd, char **options) { CURLcode result = CURLE_OK; char *userp = NULL; char *passwdp = NULL; char *optionsp = NULL; /* At this point, we're hoping all the other special cases have * been taken care of, so conn->host.name is at most * [user[:password][;options]]@]hostname * * We need somewhere to put the embedded details, so do that first. */ char *ptr = strchr(conn->host.name, '@'); char *login = conn->host.name; DEBUGASSERT(!**user); DEBUGASSERT(!**passwd); DEBUGASSERT(!**options); if(!ptr) goto out; /* We will now try to extract the * possible login information in a string like: * ftp://user:[email protected]:8021/README */ conn->host.name = ++ptr; /* So the hostname is sane. Only bother interpreting the * results if we could care. It could still be wasted * work because it might be overtaken by the programmatically * set user/passwd, but doing that first adds more cases here :-( */ if(data->set.use_netrc == CURL_NETRC_REQUIRED) goto out; /* We could use the login information in the URL so extract it */ result = parse_login_details(login, ptr - login - 1, &userp, &passwdp, &optionsp); if(result) goto out; if(userp) { char *newname; /* We have a user in the URL */ conn->bits.userpwd_in_url = TRUE; conn->bits.user_passwd = TRUE; /* enable user+password */ /* Decode the user */ newname = curl_easy_unescape(data, userp, 0, NULL); if(!newname) { result = CURLE_OUT_OF_MEMORY; goto out; } free(*user); *user = newname; } if(passwdp) { /* We have a password in the URL so decode it */ char *newpasswd = curl_easy_unescape(data, passwdp, 0, NULL); if(!newpasswd) { result = CURLE_OUT_OF_MEMORY; goto out; } free(*passwd); *passwd = newpasswd; } if(optionsp) { /* We have an options list in the URL so decode it */ char *newoptions = curl_easy_unescape(data, optionsp, 0, NULL); if(!newoptions) { result = CURLE_OUT_OF_MEMORY; goto out; } free(*options); *options = newoptions; } out: free(userp); free(passwdp); free(optionsp); return result; }
0
[ "CWE-264" ]
curl
31be461c6b659312100c47be6ddd5f0f569290f6
172,558,666,533,662,100,000,000,000,000,000,000,000
97
ConnectionExists: for NTLM re-use, require credentials to match CVE-2015-3143 Bug: http://curl.haxx.se/docs/adv_20150422A.html Reported-by: Paras Sethia
MagickExport void GetNextToken(const char *start,const char **end, const size_t extent,char *token) { double value; register const char *p; register ssize_t i; size_t length; assert(start != (const char *) NULL); assert(token != (char *) NULL); i=0; length=strlen(start); p=start; while ((isspace((int) ((unsigned char) *p)) != 0) && (*p != '\0')) p++; switch (*p) { case '\0': break; case '"': case '\'': case '`': case '{': { register char escape; switch (*p) { case '"': escape='"'; break; case '\'': escape='\''; break; case '`': escape='\''; break; case '{': escape='}'; break; default: escape=(*p); break; } for (p++; *p != '\0'; p++) { if ((*p == '\\') && ((*(p+1) == escape) || (*(p+1) == '\\'))) p++; else if (*p == escape) { p++; break; } if (i < (ssize_t) (extent-1)) token[i++]=(*p); if ((p-start) >= length) break; } break; } case '/': { if (i < (ssize_t) (extent-1)) token[i++]=(*p++); if ((*p == '>') || (*p == '/')) if (i < (ssize_t) (extent-1)) token[i++]=(*p++); break; } default: { char *q; value=StringToDouble(p,&q); (void) value; if ((p != q) && (*p != ',')) { for ( ; (p < q) && (*p != ','); p++) { if (i < (ssize_t) (extent-1)) token[i++]=(*p); if ((p-start) >= length) break; } if (*p == '%') if (i < (ssize_t) (extent-1)) token[i++]=(*p++); break; } if ((*p != '\0') && (isalpha((int) ((unsigned char) *p)) == 0) && (*p != *DirectorySeparator) && (*p != '#') && (*p != '<')) { if (i < (ssize_t) (extent-1)) token[i++]=(*p++); break; } for ( ; *p != '\0'; p++) { if (((isspace((int) ((unsigned char) *p)) != 0) || (*p == '=') || (*p == ',') || (*p == ':') || (*p == ';')) && (*(p-1) != '\\')) break; if ((i > 0) && (*p == '<')) break; if (i < (ssize_t) (extent-1)) token[i++]=(*p); if (*p == '>') break; if (*p == '(') for (p++; *p != '\0'; p++) { if (i < (ssize_t) (extent-1)) token[i++]=(*p); if ((*p == ')') && (*(p-1) != '\\')) break; if ((p-start) >= length) break; } if ((p-start) >= length) break; } break; } } token[i]='\0'; if (LocaleNCompare(token,"url(",4) == 0) { ssize_t offset; offset=4; if (token[offset] == '#') offset++; i=(ssize_t) strlen(token); (void) CopyMagickString(token,token+offset,MagickPathExtent); token[i-offset-1]='\0'; } while (isspace((int) ((unsigned char) *p)) != 0) p++; if (end != (const char **) NULL) *end=(const char *) p; }
0
[ "CWE-125" ]
ImageMagick
787f9dc99c8d186ae26ed53ddec54bd0a6f90852
132,726,129,514,756,250,000,000,000,000,000,000,000
141
https://github.com/ImageMagick/ImageMagick/issues/539
cachedb_apply_cfg(struct cachedb_env* cachedb_env, struct config_file* cfg) { const char* backend_str = cfg->cachedb_backend; cachedb_env->backend = cachedb_find_backend(backend_str); if(!cachedb_env->backend) { log_err("cachedb: cannot find backend name '%s'", backend_str); return 0; } /* TODO see if more configuration needs to be applied or not */ return 1; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
9,284,840,782,224,979,000,000,000,000,000,000,000
12
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
receive_check_set_sender(uschar *newsender) { uschar *qnewsender; if (trusted_caller) return TRUE; if (!newsender || !untrusted_set_sender) return FALSE; qnewsender = Ustrchr(newsender, '@') ? newsender : string_sprintf("%s@%s", newsender, qualify_domain_sender); return match_address_list_basic(qnewsender, CUSS &untrusted_set_sender, 0) == OK; }
0
[ "CWE-416" ]
exim
4e6ae6235c68de243b1c2419027472d7659aa2b4
59,958,590,008,539,160,000,000,000,000,000,000,000
9
Avoid release of store if there have been later allocations. Bug 2199
static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [SAS_EDGE_EXPANDER_DEVICE] = "edge", [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); }
0
[ "CWE-399", "CWE-772" ]
linux
4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
275,607,837,713,256,700,000,000,000,000,000,000,000
24
scsi: libsas: fix memory leak in sas_smp_get_phy_events() We've got a memory leak with the following producer: while true; do cat /sys/class/sas_phy/phy-1:0:12/invalid_dword_count >/dev/null; done The buffer req is allocated and not freed after we return. Fix it. Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: chenqilin <[email protected]> CC: chenxiang <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
displayBuffer(Buffer *buf, int mode) { Str msg; int ny = 0; if (!buf) return; if (buf->topLine == NULL && readBufferCache(buf) == 0) { /* clear_buffer */ mode = B_FORCE_REDRAW; } if (buf->width == 0) buf->width = INIT_BUFFER_WIDTH; if (buf->height == 0) buf->height = LASTLINE + 1; if ((buf->width != INIT_BUFFER_WIDTH && (is_html_type(buf->type) || FoldLine)) || buf->need_reshape) { buf->need_reshape = TRUE; reshapeBuffer(buf); } if (showLineNum) { if (buf->lastLine && buf->lastLine->real_linenumber > 0) buf->rootX = (int)(log(buf->lastLine->real_linenumber + 0.1) / log(10)) + 2; if (buf->rootX < 5) buf->rootX = 5; if (buf->rootX > COLS) buf->rootX = COLS; } else buf->rootX = 0; buf->COLS = COLS - buf->rootX; if (nTab > 1 #ifdef USE_MOUSE || mouse_action.menu_str #endif ) { if (mode == B_FORCE_REDRAW || mode == B_REDRAW_IMAGE) calcTabPos(); ny = LastTab->y + 2; if (ny > LASTLINE) ny = LASTLINE; } if (buf->rootY != ny || buf->LINES != LASTLINE - ny) { buf->rootY = ny; buf->LINES = LASTLINE - ny; arrangeCursor(buf); mode = B_REDRAW_IMAGE; } if (mode == B_FORCE_REDRAW || mode == B_SCROLL || mode == B_REDRAW_IMAGE || cline != buf->topLine || ccolumn != buf->currentColumn) { #ifdef USE_RAW_SCROLL if ( #ifdef USE_IMAGE !(activeImage && displayImage && draw_image_flag) && #endif mode == B_SCROLL && cline && buf->currentColumn == ccolumn) { int n = buf->topLine->linenumber - cline->linenumber; if (n > 0 && n < buf->LINES) { move(LASTLINE, 0); clrtoeolx(); refresh(); scroll(n); } else if (n < 0 && n > -buf->LINES) { #if 0 /* defined(__CYGWIN__) */ move(LASTLINE + n + 1, 0); clrtoeolx(); refresh(); #endif /* defined(__CYGWIN__) */ rscroll(-n); } redrawNLine(buf, n); } else #endif { #ifdef USE_IMAGE if (activeImage && (mode == B_REDRAW_IMAGE || cline != buf->topLine || ccolumn != buf->currentColumn)) { if (draw_image_flag) clear(); clearImage(); loadImage(buf, IMG_FLAG_STOP); image_touch++; draw_image_flag = FALSE; } #endif redrawBuffer(buf); } cline = buf->topLine; ccolumn = buf->currentColumn; } if (buf->topLine == NULL) buf->topLine = buf->firstLine; #ifdef USE_IMAGE if (buf->need_reshape) { displayBuffer(buf, B_FORCE_REDRAW); return; } #endif drawAnchorCursor(buf); msg = make_lastline_message(buf); if (buf->firstLine == NULL) { /* FIXME: gettextize? */ Strcat_charp(msg, "\tNo Line"); } if (delayed_msg != NULL) { disp_message(delayed_msg, FALSE); delayed_msg = NULL; refresh(); } standout(); message(msg->ptr, buf->cursorX + buf->rootX, buf->cursorY + buf->rootY); standend(); term_title(conv_to_system(buf->buffername)); refresh(); #ifdef USE_IMAGE if (activeImage && displayImage && buf->img && buf->image_loaded) { drawImage(); } #endif #ifdef USE_BUFINFO if (buf != save_current_buf) { saveBufferInfo(); save_current_buf = buf; } #endif }
0
[ "CWE-119" ]
w3m
0c3f5d0e0d9269ad47b8f4b061d7818993913189
226,611,692,398,154,230,000,000,000,000,000,000,000
134
Prevent array index out of bounds for symbol Bug-Debian: https://github.com/tats/w3m/issues/38
xmlParseEntityDecl(xmlParserCtxtPtr ctxt) { const xmlChar *name = NULL; xmlChar *value = NULL; xmlChar *URI = NULL, *literal = NULL; const xmlChar *ndata = NULL; int isParameter = 0; xmlChar *orig = NULL; int skipped; /* GROW; done in the caller */ if (CMP8(CUR_PTR, '<', '!', 'E', 'N', 'T', 'I', 'T', 'Y')) { xmlParserInputPtr input = ctxt->input; SHRINK; SKIP(8); skipped = SKIP_BLANKS; if (skipped == 0) { xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Space required after '<!ENTITY'\n"); } if (RAW == '%') { NEXT; skipped = SKIP_BLANKS; if (skipped == 0) { xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Space required after '%'\n"); } isParameter = 1; } name = xmlParseName(ctxt); if (name == NULL) { xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED, "xmlParseEntityDecl: no name\n"); return; } if (xmlStrchr(name, ':') != NULL) { xmlNsErr(ctxt, XML_NS_ERR_COLON, "colons are forbidden from entities names '%s'\n", name, NULL, NULL); } skipped = SKIP_BLANKS; if (skipped == 0) { xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Space required after the entity name\n"); } ctxt->instate = XML_PARSER_ENTITY_DECL; /* * handle the various case of definitions... */ if (isParameter) { if ((RAW == '"') || (RAW == '\'')) { value = xmlParseEntityValue(ctxt, &orig); if (value) { if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && (ctxt->sax->entityDecl != NULL)) ctxt->sax->entityDecl(ctxt->userData, name, XML_INTERNAL_PARAMETER_ENTITY, NULL, NULL, value); } } else { URI = xmlParseExternalID(ctxt, &literal, 1); if ((URI == NULL) && (literal == NULL)) { xmlFatalErr(ctxt, XML_ERR_VALUE_REQUIRED, NULL); } if (URI) { xmlURIPtr uri; uri = xmlParseURI((const char *) URI); if (uri == NULL) { xmlErrMsgStr(ctxt, XML_ERR_INVALID_URI, "Invalid URI: %s\n", URI); /* * This really ought to be a well formedness error * but the XML Core WG decided otherwise c.f. issue * E26 of the XML erratas. */ } else { if (uri->fragment != NULL) { /* * Okay this is foolish to block those but not * invalid URIs. */ xmlFatalErr(ctxt, XML_ERR_URI_FRAGMENT, NULL); } else { if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && (ctxt->sax->entityDecl != NULL)) ctxt->sax->entityDecl(ctxt->userData, name, XML_EXTERNAL_PARAMETER_ENTITY, literal, URI, NULL); } xmlFreeURI(uri); } } } } else { if ((RAW == '"') || (RAW == '\'')) { value = xmlParseEntityValue(ctxt, &orig); if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && (ctxt->sax->entityDecl != NULL)) ctxt->sax->entityDecl(ctxt->userData, name, XML_INTERNAL_GENERAL_ENTITY, NULL, NULL, value); /* * For expat compatibility in SAX mode. */ if ((ctxt->myDoc == NULL) || (xmlStrEqual(ctxt->myDoc->version, SAX_COMPAT_MODE))) { if (ctxt->myDoc == NULL) { ctxt->myDoc = xmlNewDoc(SAX_COMPAT_MODE); if (ctxt->myDoc == NULL) { xmlErrMemory(ctxt, "New Doc failed"); return; } ctxt->myDoc->properties = XML_DOC_INTERNAL; } if (ctxt->myDoc->intSubset == NULL) ctxt->myDoc->intSubset = xmlNewDtd(ctxt->myDoc, BAD_CAST "fake", NULL, NULL); xmlSAX2EntityDecl(ctxt, name, XML_INTERNAL_GENERAL_ENTITY, NULL, NULL, value); } } else { URI = xmlParseExternalID(ctxt, &literal, 1); if ((URI == NULL) && (literal == NULL)) { xmlFatalErr(ctxt, XML_ERR_VALUE_REQUIRED, NULL); } if (URI) { xmlURIPtr uri; uri = xmlParseURI((const char *)URI); if (uri == NULL) { xmlErrMsgStr(ctxt, XML_ERR_INVALID_URI, "Invalid URI: %s\n", URI); /* * This really ought to be a well formedness error * but the XML Core WG decided otherwise c.f. issue * E26 of the XML erratas. */ } else { if (uri->fragment != NULL) { /* * Okay this is foolish to block those but not * invalid URIs. */ xmlFatalErr(ctxt, XML_ERR_URI_FRAGMENT, NULL); } xmlFreeURI(uri); } } if ((RAW != '>') && (!IS_BLANK_CH(CUR))) { xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Space required before 'NDATA'\n"); } SKIP_BLANKS; if (CMP5(CUR_PTR, 'N', 'D', 'A', 'T', 'A')) { SKIP(5); if (!IS_BLANK_CH(CUR)) { xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Space required after 'NDATA'\n"); } SKIP_BLANKS; ndata = xmlParseName(ctxt); if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && (ctxt->sax->unparsedEntityDecl != NULL)) ctxt->sax->unparsedEntityDecl(ctxt->userData, name, literal, URI, ndata); } else { if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && (ctxt->sax->entityDecl != NULL)) ctxt->sax->entityDecl(ctxt->userData, name, XML_EXTERNAL_GENERAL_PARSED_ENTITY, literal, URI, NULL); /* * For expat compatibility in SAX mode. * assuming the entity repalcement was asked for */ if ((ctxt->replaceEntities != 0) && ((ctxt->myDoc == NULL) || (xmlStrEqual(ctxt->myDoc->version, SAX_COMPAT_MODE)))) { if (ctxt->myDoc == NULL) { ctxt->myDoc = xmlNewDoc(SAX_COMPAT_MODE); if (ctxt->myDoc == NULL) { xmlErrMemory(ctxt, "New Doc failed"); return; } ctxt->myDoc->properties = XML_DOC_INTERNAL; } if (ctxt->myDoc->intSubset == NULL) ctxt->myDoc->intSubset = xmlNewDtd(ctxt->myDoc, BAD_CAST "fake", NULL, NULL); xmlSAX2EntityDecl(ctxt, name, XML_EXTERNAL_GENERAL_PARSED_ENTITY, literal, URI, NULL); } } } } if (ctxt->instate == XML_PARSER_EOF) return; SKIP_BLANKS; if (RAW != '>') { xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_NOT_FINISHED, "xmlParseEntityDecl: entity %s not terminated\n", name); xmlStopParser(ctxt); } else { if (input != ctxt->input) { xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_BOUNDARY, "Entity declaration doesn't start and stop in the same entity\n"); } NEXT; } if (orig != NULL) { /* * Ugly mechanism to save the raw entity value. */ xmlEntityPtr cur = NULL; if (isParameter) { if ((ctxt->sax != NULL) && (ctxt->sax->getParameterEntity != NULL)) cur = ctxt->sax->getParameterEntity(ctxt->userData, name); } else { if ((ctxt->sax != NULL) && (ctxt->sax->getEntity != NULL)) cur = ctxt->sax->getEntity(ctxt->userData, name); if ((cur == NULL) && (ctxt->userData==ctxt)) { cur = xmlSAX2GetEntity(ctxt, name); } } if (cur != NULL) { if (cur->orig != NULL) xmlFree(orig); else cur->orig = orig; } else xmlFree(orig); } if (value != NULL) xmlFree(value); if (URI != NULL) xmlFree(URI); if (literal != NULL) xmlFree(literal); } }
0
[ "CWE-119" ]
libxml2
a7dfab7411cbf545f359dd3157e5df1eb0e7ce31
254,152,679,003,082,600,000,000,000,000,000,000,000
247
Stop parsing on entities boundaries errors For https://bugzilla.gnome.org/show_bug.cgi?id=744980 There are times, like on unterminated entities that it's preferable to stop parsing, even if that means less error reporting. Entities are feeding the parser on further processing, and if they are ill defined then it's possible to get the parser to bug. Also do the same on Conditional Sections if the input is broken, as the structure of the document can't be guessed.
static int nft_fwd_neigh_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_fwd_neigh *priv = nft_expr_priv(expr); unsigned int addr_len; int err; if (!tb[NFTA_FWD_SREG_DEV] || !tb[NFTA_FWD_SREG_ADDR] || !tb[NFTA_FWD_NFPROTO]) return -EINVAL; priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO])); switch (priv->nfproto) { case NFPROTO_IPV4: addr_len = sizeof(struct in_addr); break; case NFPROTO_IPV6: addr_len = sizeof(struct in6_addr); break; default: return -EOPNOTSUPP; } err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev, sizeof(int)); if (err < 0) return err; return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr, addr_len); }
0
[ "CWE-269" ]
nf
b1a5983f56e371046dcf164f90bfaf704d2b89f6
119,116,992,808,546,170,000,000,000,000,000,000,000
34
netfilter: nf_tables_offload: incorrect flow offload action array size immediate verdict expression needs to allocate one slot in the flow offload action array, however, immediate data expression does not need to do so. fwd and dup expression need to allocate one slot, this is missing. Add a new offload_action interface to report if this expression needs to allocate one slot in the flow offload action array. Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support") Reported-and-tested-by: Nick Gregory <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
19,255,753,709,105,548,000,000,000,000,000,000,000
21
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int wsgi_hook_intercept(request_rec *r) { WSGIServerConfig *config = NULL; apr_array_header_t *aliases = NULL; WSGIAliasEntry *entries = NULL; WSGIAliasEntry *entry = NULL; ap_regmatch_t matches[AP_MAX_REG_MATCH]; const char *location = NULL; const char *application = NULL; int i = 0; config = ap_get_module_config(r->server->module_config, &wsgi_module); if (!config->alias_list) return DECLINED; if (r->uri[0] != '/' && r->uri[0]) return DECLINED; aliases = config->alias_list; entries = (WSGIAliasEntry *)aliases->elts; for (i = 0; i < aliases->nelts; ++i) { int l = 0; entry = &entries[i]; if (entry->regexp) { if (!ap_regexec(entry->regexp, r->uri, AP_MAX_REG_MATCH, matches, 0)) { if (entry->application) { l = matches[0].rm_eo; location = apr_pstrndup(r->pool, r->uri, l); application = ap_pregsub(r->pool, entry->application, r->uri, AP_MAX_REG_MATCH, matches); } } } else if (entry->location) { l = wsgi_alias_matches(r->uri, entry->location); location = entry->location; application = entry->application; } if (l > 0) { if (!strcmp(location, "/")) { r->filename = apr_pstrcat(r->pool, application, r->uri, NULL); } else { r->filename = apr_pstrcat(r->pool, application, r->uri + l, NULL); } r->handler = "wsgi-script"; apr_table_setn(r->notes, "alias-forced-type", r->handler); if (entry->process_group) { apr_table_setn(r->notes, "mod_wsgi.process_group", entry->process_group); } if (entry->application_group) { apr_table_setn(r->notes, "mod_wsgi.application_group", entry->application_group); } if (entry->callable_object) { apr_table_setn(r->notes, "mod_wsgi.callable_object", entry->callable_object); } if (entry->pass_authorization == 0) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "0"); else if (entry->pass_authorization == 1) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "1"); return OK; } } return DECLINED; }
0
[ "CWE-264" ]
mod_wsgi
d9d5fea585b23991f76532a9b07de7fcd3b649f4
125,228,609,060,686,130,000,000,000,000,000,000,000
89
Local privilege escalation when using daemon mode. (CVE-2014-0240)
static void opj_j2k_tcp_data_destroy(opj_tcp_t *p_tcp) { if (p_tcp->m_data) { opj_free(p_tcp->m_data); p_tcp->m_data = NULL; p_tcp->m_data_size = 0; } }
0
[ "CWE-416", "CWE-787" ]
openjpeg
4241ae6fbbf1de9658764a80944dc8108f2b4154
296,829,233,707,920,100,000,000,000,000,000,000,000
8
Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985)
gdm_session_answer_query (GdmSession *self, const char *service_name, const char *text) { GdmSessionConversation *conversation; g_return_if_fail (GDM_IS_SESSION (self)); conversation = find_conversation_by_name (self, service_name); if (conversation != NULL) { answer_pending_query (conversation, text); } }
0
[]
gdm
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
134,211,993,191,336,130,000,000,000,000,000,000,000
14
session: disconnect signals from worker proxy when conversation is freed We don't want an outstanding reference on the worker proxy to lead to signal handlers getting dispatched after the conversation is freed. https://bugzilla.gnome.org/show_bug.cgi?id=758032
static inline Quantum ClampPixel(const MagickRealType value) { #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) value); #else if (value < 0.0f) return(0.0); if (value >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); return(value); #endif }
1
[ "CWE-119", "CWE-787" ]
ImageMagick
450bd716ed3b9186dd10f9e60f630a3d9eeea2a4
166,207,034,266,473,880,000,000,000,000,000,000,000
12
PHP_FUNCTION(chdir) { char *str; int ret, str_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &str, &str_len) == FAILURE) { RETURN_FALSE; } if ((PG(safe_mode) && !php_checkuid(str, NULL, CHECKUID_CHECK_FILE_AND_DIR)) || php_check_open_basedir(str TSRMLS_CC)) { RETURN_FALSE; } ret = VCWD_CHDIR(str); if (ret != 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s (errno %d)", strerror(errno), errno); RETURN_FALSE; } if (BG(CurrentStatFile) && !IS_ABSOLUTE_PATH(BG(CurrentStatFile), strlen(BG(CurrentStatFile)))) { efree(BG(CurrentStatFile)); BG(CurrentStatFile) = NULL; } if (BG(CurrentLStatFile) && !IS_ABSOLUTE_PATH(BG(CurrentLStatFile), strlen(BG(CurrentLStatFile)))) { efree(BG(CurrentLStatFile)); BG(CurrentLStatFile) = NULL; } RETURN_TRUE; }
1
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
159,661,868,390,382,330,000,000,000,000,000,000,000
30
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
void dd_create_basic_files(struct dump_dir *dd, uid_t uid, const char *chroot_dir) { char long_str[sizeof(long) * 3 + 2]; char *time_str = dd_load_text_ext(dd, FILENAME_TIME, DD_FAIL_QUIETLY_ENOENT | DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE); if (!time_str) { time_t t = time(NULL); sprintf(long_str, "%lu", (long)t); dd_save_text(dd, FILENAME_TIME, long_str); } free(time_str); /* it doesn't make sense to create the uid file if uid == -1 */ if (uid != (uid_t)-1L) { snprintf(long_str, sizeof(long_str), "%li", (long)uid); dd_save_text(dd, FILENAME_UID, long_str); } struct utsname buf; uname(&buf); /* never fails */ dd_save_text(dd, FILENAME_KERNEL, buf.release); dd_save_text(dd, FILENAME_ARCHITECTURE, buf.machine); dd_save_text(dd, FILENAME_HOSTNAME, buf.nodename); /* if release exists in dumpdir don't create it, but don't warn * if it doesn't * i.e: anaconda doesn't have /etc/{fedora,redhat}-release and trying to load it * results in errors: rhbz#725857 */ char *release = dd_load_text_ext(dd, FILENAME_OS_RELEASE, DD_FAIL_QUIETLY_ENOENT | DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE); if (!release) { release = load_text_file("/etc/system-release", DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE); if (!release) release = load_text_file("/etc/redhat-release", /*flags:*/ 0); dd_save_text(dd, FILENAME_OS_RELEASE, release); if (chroot_dir) { free(release); char *chrooted_name = concat_path_file(chroot_dir, "/etc/system-release"); release = load_text_file(chrooted_name, /*flags:*/ 0); free(chrooted_name); if (release[0]) dd_save_text(dd, FILENAME_OS_RELEASE_IN_ROOTDIR, release); } } free(release); }
0
[ "CWE-264" ]
libreport
3bbf961b1884dd32654dd39b360dd78ef294b10a
182,654,921,728,357,670,000,000,000,000,000,000,000
54
never follow symlinks rhbz#887866 - use lchown instead chown and O_NOFOLLOW where we use open
decompression_cleanup(struct archive_read *a) { struct xar *xar; int r; xar = (struct xar *)(a->format->data); r = ARCHIVE_OK; if (xar->stream_valid) { if (inflateEnd(&(xar->stream)) != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up zlib decompressor"); r = ARCHIVE_FATAL; } } #if defined(HAVE_BZLIB_H) && defined(BZ_CONFIG_ERROR) if (xar->bzstream_valid) { if (BZ2_bzDecompressEnd(&(xar->bzstream)) != BZ_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up bzip2 decompressor"); r = ARCHIVE_FATAL; } } #endif #if defined(HAVE_LZMA_H) && defined(HAVE_LIBLZMA) if (xar->lzstream_valid) lzma_end(&(xar->lzstream)); #elif defined(HAVE_LZMA_H) && defined(HAVE_LIBLZMA) if (xar->lzstream_valid) { if (lzmadec_end(&(xar->lzstream)) != LZMADEC_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up lzmadec decompressor"); r = ARCHIVE_FATAL; } } #endif return (r); }
0
[ "CWE-125" ]
libarchive
fa7438a0ff4033e4741c807394a9af6207940d71
96,856,908,394,834,760,000,000,000,000,000,000,000
40
Do something sensible for empty strings to make fuzzers happy.
static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; }
0
[ "CWE-399", "CWE-772" ]
linux
4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
136,404,262,884,464,400,000,000,000,000,000,000,000
17
scsi: libsas: fix memory leak in sas_smp_get_phy_events() We've got a memory leak with the following producer: while true; do cat /sys/class/sas_phy/phy-1:0:12/invalid_dword_count >/dev/null; done The buffer req is allocated and not freed after we return. Fix it. Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: chenqilin <[email protected]> CC: chenxiang <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
#else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->end;
0
[ "CWE-20" ]
linux
2b16f048729bf35e6c28a40cbfad07239f9dcd90
259,928,556,977,425,270,000,000,000,000,000,000,000
4
net: create skb_gso_validate_mac_len() If you take a GSO skb, and split it into packets, will the MAC length (L2 + L3 + L4 headers + payload) of those packets be small enough to fit within a given length? Move skb_gso_mac_seglen() to skbuff.h with other related functions like skb_gso_network_seglen() so we can use it, and then create skb_gso_validate_mac_len to do the full calculation. Signed-off-by: Daniel Axtens <[email protected]> Signed-off-by: David S. Miller <[email protected]>
fill_body (mu_message_t msg, mu_stream_t instr) { int rc; mu_body_t body = NULL; mu_stream_t stream = NULL; mu_off_t n; rc = mu_message_get_body (msg, &body); if (rc) { mu_error (_("cannot get message body: %s"), mu_strerror (rc)); return 1; } rc = mu_body_get_streamref (body, &stream); if (rc) { mu_error (_("cannot get body: %s"), mu_strerror (rc)); return 1; } rc = mu_stream_copy (stream, instr, 0, &n); mu_stream_destroy (&stream); if (rc) { mu_error (_("cannot copy temporary stream: %s"), mu_strerror (rc)); return 1; } if (n == 0) { if (mailvar_is_true (mailvar_name_nullbody)) { char *str; if (mailvar_get (&str, mailvar_name_nullbodymsg, mailvar_type_string, 0) == 0) mu_error ("%s", _(str)); } else return 1; } return 0; }
0
[]
mailutils
4befcfd015256c568121653038accbd84820198f
126,786,308,018,188,850,000,000,000,000,000,000,000
44
mail: disable compose escapes in non-interctive mode. * NEWS: Document changes. * doc/texinfo/programs/mail.texi: Document changes. * mail/send.c (mail_compose_send): Recognize escapes only in interactive mode.
HTTPSession::onMessageBegin(HTTPCodec::StreamID streamID, HTTPMessage* msg) { VLOG(4) << "processing new msg streamID=" << streamID << " " << *this; if (infoCallback_) { infoCallback_->onRequestBegin(*this); } HTTPTransaction* txn = findTransaction(streamID); if (txn) { if (isDownstream() && txn->isPushed()) { // Push streams are unidirectional (half-closed). If the downstream // attempts to send ingress, abort with STREAM_CLOSED error. HTTPException ex(HTTPException::Direction::INGRESS_AND_EGRESS, "Downstream attempts to send ingress, abort."); ex.setCodecStatusCode(ErrorCode::STREAM_CLOSED); txn->onError(ex); } return; // If this transaction is already registered, no need to add it now } http2::PriorityUpdate messagePriority = getMessagePriority(msg); txn = createTransaction(streamID, HTTPCodec::NoStream, HTTPCodec::NoExAttributes, messagePriority); if (!txn) { return; // This could happen if the socket is bad. } if (!codec_->supportsParallelRequests() && getPipelineStreamCount() > 1) { // The previous transaction hasn't completed yet. Pause reads until // it completes; this requires pausing both transactions. // HTTP/1.1 pipeline is detected, and which is incompactible with // ByteEventTracker. Drain all the ByteEvents CHECK(byteEventTracker_); byteEventTracker_->drainByteEvents(); // drainByteEvents() may detach txn(s). Don't pause read if one txn left if (getPipelineStreamCount() < 2) { DCHECK(readsUnpaused()); return; } // There must be at least two transactions (we just checked). The previous // txns haven't completed yet. Pause reads until they complete DCHECK_GE(transactions_.size(), 2); for (auto it = ++transactions_.rbegin(); it != transactions_.rend(); ++it) { DCHECK(it->second.isIngressEOMSeen()); it->second.pauseIngress(); } transactions_.rbegin()->second.pauseIngress(); DCHECK_EQ(liveTransactions_, 0); DCHECK(readsPaused()); } }
0
[ "CWE-20" ]
proxygen
0600ebe59c3e82cd012def77ca9ca1918da74a71
154,318,022,484,106,300,000,000,000,000,000,000,000
53
Check that a secondary auth manager is set before dereferencing. Summary: CVE-2018-6343 Reviewed By: mingtaoy Differential Revision: D12994423 fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
poolBytesToAllocateFor(int blockSize) { /* Unprotected math would be: ** return offsetof(BLOCK, s) + blockSize * sizeof(XML_Char); ** ** Detect overflow, avoiding _signed_ overflow undefined behavior ** For a + b * c we check b * c in isolation first, so that addition of a ** on top has no chance of making us accept a small non-negative number */ const size_t stretch = sizeof(XML_Char); /* can be 4 bytes */ if (blockSize <= 0) return 0; if (blockSize > (int)(INT_MAX / stretch)) return 0; { const int stretchedBlockSize = blockSize * (int)stretch; const int bytesToAllocate = (int)( offsetof(BLOCK, s) + (unsigned)stretchedBlockSize); if (bytesToAllocate < 0) return 0; return (size_t)bytesToAllocate; } }
0
[ "CWE-611" ]
libexpat
c4bf96bb51dd2a1b0e185374362ee136fe2c9d7f
273,616,023,796,867,120,000,000,000,000,000,000,000
27
xmlparse.c: Fix external entity infinite loop bug (CVE-2017-9233)
TEST_F(SingleAllowMissingInOrListTest, BadJwt) { EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)); auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ExpiredToken}}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); verifier_->verify(context_); EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader)); }
0
[ "CWE-303", "CWE-703" ]
envoy
ea39e3cba652bcc4b11bb0d5c62b017e584d2e5a
232,355,746,841,532,500,000,000,000,000,000,000,000
7
jwt_authn: fix a bug where JWT with wrong issuer is allowed in allow_missing case (#15194) [jwt] When allow_missing is used inside RequiresAny, the requests with JWT with wrong issuer are accepted. This is a bug, allow_missing should only allow requests without any JWT. This change fixed the above issue by preserving JwtUnknownIssuer in allow_missing case. Signed-off-by: Wayne Zhang <[email protected]>
adjust_poll(int count) { G.polladj_count += count; if (G.polladj_count > POLLADJ_LIMIT) { G.polladj_count = 0; if (G.poll_exp < MAXPOLL) { G.poll_exp++; VERB4 bb_error_msg("polladj: discipline_jitter:%f ++poll_exp=%d", G.discipline_jitter, G.poll_exp); } } else if (G.polladj_count < -POLLADJ_LIMIT || (count < 0 && G.poll_exp > BIGPOLL)) { G.polladj_count = 0; if (G.poll_exp > MINPOLL) { llist_t *item; G.poll_exp--; /* Correct p->next_action_time in each peer * which waits for sending, so that they send earlier. * Old pp->next_action_time are on the order * of t + (1 << old_poll_exp) + small_random, * we simply need to subtract ~half of that. */ for (item = G.ntp_peers; item != NULL; item = item->link) { peer_t *pp = (peer_t *) item->data; if (pp->p_fd < 0) pp->next_action_time -= (1 << G.poll_exp); } VERB4 bb_error_msg("polladj: discipline_jitter:%f --poll_exp=%d", G.discipline_jitter, G.poll_exp); } } else { VERB4 bb_error_msg("polladj: count:%d", G.polladj_count); } }
0
[ "CWE-399" ]
busybox
150dc7a2b483b8338a3e185c478b4b23ee884e71
44,402,456,706,216,170,000,000,000,000,000,000,000
34
ntpd: respond only to client and symmetric active packets The busybox NTP implementation doesn't check the NTP mode of packets received on the server port and responds to any packet with the right size. This includes responses from another NTP server. An attacker can send a packet with a spoofed source address in order to create an infinite loop of responses between two busybox NTP servers. Adding more packets to the loop increases the traffic between the servers until one of them has a fully loaded CPU and/or network. Signed-off-by: Miroslav Lichvar <[email protected]> Signed-off-by: Denys Vlasenko <[email protected]>
e_mail_formatter_format_text_header (EMailFormatter *formatter, GString *buffer, const gchar *label, const gchar *value, guint32 flags) { GtkTextDirection direction; const gchar *fmt, *html; const gchar *display; gchar *mhtml = NULL; g_return_if_fail (E_IS_MAIL_FORMATTER (formatter)); g_return_if_fail (buffer != NULL); g_return_if_fail (label != NULL); if (value == NULL) return; while (*value == ' ') value++; if (!(flags & E_MAIL_FORMATTER_HEADER_FLAG_HTML)) { CamelMimeFilterToHTMLFlags text_format_flags; text_format_flags = e_mail_formatter_get_text_format_flags (formatter); html = mhtml = camel_text_to_html ( value, text_format_flags, 0); } else { html = value; } direction = gtk_widget_get_default_direction (); if (flags & E_MAIL_FORMATTER_HEADER_FLAG_NOCOLUMNS) { if (flags & E_MAIL_FORMATTER_HEADER_FLAG_BOLD) { fmt = "<tr style=\"display: %s\">" "<td><b>%s:</b> %s</td></tr>"; } else { fmt = "<tr style=\"display: %s\">" "<td>%s: %s</td></tr>"; } } else if (flags & E_MAIL_FORMATTER_HEADER_FLAG_NODEC) { if (direction == GTK_TEXT_DIR_RTL) fmt = "<tr class=\"header\" style=\"display: %s\">" "<th class=\"header rtl\">%s</th>" "<td class=\"header rtl\">%s</td>" "</tr>"; else fmt = "<tr class=\"header\" style=\"display: %s\">" "<th class=\"header ltr\">%s</th>" "<td class=\"header ltr\">%s</td>" "</tr>"; } else { if (direction == GTK_TEXT_DIR_RTL) fmt = "<tr class=\"header\" style=\"display: %s\">" "<th class=\"header rtl\">%s:</th>" "<td class=\"header rtl\">%s</td>" "</tr>"; else fmt = "<tr class=\"header\" style=\"display: %s\">" "<th class=\"header ltr\">%s:</th>" "<td class=\"header ltr\">%s</td>" "</tr>"; } if (flags & E_MAIL_FORMATTER_HEADER_FLAG_HIDDEN) display = "none"; else display = "table-row"; g_string_append_printf (buffer, fmt, display, label, html); g_free (mhtml); }
0
[ "CWE-347" ]
evolution
f66cd3e1db301d264563b4222a3574e2e58e2b85
266,227,403,392,358,940,000,000,000,000,000,000,000
75
eds-I#3 - [GPG] Mails that are not encrypted look encrypted Related to https://gitlab.gnome.org/GNOME/evolution-data-server/issues/3