func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
bool Item_sp_variable::fix_fields_from_item(THD *thd, Item **, const Item *it) { m_thd= thd; /* NOTE: this must be set before any this_xxx() */ DBUG_ASSERT(it->fixed); max_length= it->max_length; decimals= it->decimals; unsigned_flag= it->unsigned_flag; with_param= 1; if (thd->lex->current_select && thd->lex->current_select->master_unit()->item) thd->lex->current_select->master_unit()->item->with_param= 1; fixed= 1; collation.set(it->collation.collation, it->collation.derivation); return FALSE; }
0
[ "CWE-416" ]
server
c02ebf3510850ba78a106be9974c94c3b97d8585
46,536,210,447,879,390,000,000,000,000,000,000,000
17
MDEV-24176 Preparations 1. moved fix_vcol_exprs() call to open_table() mysql_alter_table() doesn't do lock_tables() so it cannot win from fix_vcol_exprs() from there. Tests affected: main.default_session 2. Vanilla cleanups and comments.
ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { struct ext4_extent_header *eh = path->p_hdr; struct ext4_extent_idx *r, *l, *m; ext_debug("binsearch for %u(idx): ", block); l = EXT_FIRST_INDEX(eh) + 1; r = EXT_LAST_INDEX(eh); while (l <= r) { m = l + (r - l) / 2; if (block < le32_to_cpu(m->ei_block)) r = m - 1; else l = m + 1; ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), r, le32_to_cpu(r->ei_block)); } path->p_idx = l - 1; ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); #ifdef CHECK_BINSEARCH { struct ext4_extent_idx *chix, *ix; int k; chix = ix = EXT_FIRST_INDEX(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { if (k != 0 && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { printk(KERN_DEBUG "k=%d, ix=0x%p, " "first=0x%p\n", k, ix, EXT_FIRST_INDEX(eh)); printk(KERN_DEBUG "%u <= %u\n", le32_to_cpu(ix->ei_block), le32_to_cpu(ix[-1].ei_block)); } BUG_ON(k && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)); if (block < le32_to_cpu(ix->ei_block)) break; chix = ix; } BUG_ON(chix != path->p_idx); } #endif }
0
[ "CWE-362" ]
linux-2.6
dee1f973ca341c266229faa5a1a5bb268bed3531
1,673,519,444,685,081,600,000,000,000,000,000,000
53
ext4: race-condition protection for ext4_convert_unwritten_extents_endio We assumed that at the time we call ext4_convert_unwritten_extents_endio() extent in question is fully inside [map.m_lblk, map->m_len] because it was already split during submission. But this may not be true due to a race between writeback vs fallocate. If extent in question is larger than requested we will split it again. Special precautions should being done if zeroout required because [map.m_lblk, map->m_len] already contains valid data. Signed-off-by: Dmitry Monakhov <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]> Cc: [email protected]
BGD_DECLARE(int) gdImageColorAllocate (gdImagePtr im, int r, int g, int b) { return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque); }
0
[ "CWE-119", "CWE-787" ]
libgd
77f619d48259383628c3ec4654b1ad578e9eb40e
74,316,920,764,324,590,000,000,000,000,000,000,000
4
fix #215 gdImageFillToBorder stack-overflow when invalid color is used
libssh2_channel_flush_ex(LIBSSH2_CHANNEL *channel, int stream) { int rc; if(!channel) return LIBSSH2_ERROR_BAD_USE; BLOCK_ADJUST(rc, channel->session, _libssh2_channel_flush(channel, stream)); return rc; }
0
[ "CWE-787" ]
libssh2
dc109a7f518757741590bb993c0c8412928ccec2
64,689,155,759,547,010,000,000,000,000,000,000,000
11
Security fixes (#315) * Bounds checks Fixes for CVEs https://www.libssh2.org/CVE-2019-3863.html https://www.libssh2.org/CVE-2019-3856.html * Packet length bounds check CVE https://www.libssh2.org/CVE-2019-3855.html * Response length check CVE https://www.libssh2.org/CVE-2019-3859.html * Bounds check CVE https://www.libssh2.org/CVE-2019-3857.html * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html and additional data validation * Check bounds before reading into buffers * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html * declare SIZE_MAX and UINT_MAX if needed
void CLASS foveon_load_camf() { unsigned type, wide, high, i, j, row, col, diff; ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); type = get4(); get4(); get4(); wide = get4(); high = get4(); if (type == 2) { fread (meta_data, 1, meta_length, ifp); for (i=0; i < meta_length; i++) { high = (high * 1597 + 51749) % 244944; wide = high * (INT64) 301593171 >> 24; meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17; } } else if (type == 4) { free (meta_data); meta_data = (char *) malloc (meta_length = wide*high*3/2); merror (meta_data, "foveon_load_camf()"); foveon_huff (huff); get4(); getbits(-1); for (j=row=0; row < high; row++) { for (col=0; col < wide; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if (col & 1) { meta_data[j++] = hpred[0] >> 4; meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8; meta_data[j++] = hpred[1]; } } } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type); #endif }
0
[ "CWE-129" ]
LibRaw
89d065424f09b788f443734d44857289489ca9e2
303,099,538,776,117,640,000,000,000,000,000,000,000
41
fixed two more problems found by fuzzer
static SECURITY_STATUS SEC_ENTRY kerberos_QueryContextAttributesW(PCtxtHandle phContext, ULONG ulAttribute, void* pBuffer) { return kerberos_QueryContextAttributesA(phContext, ulAttribute, pBuffer); }
0
[]
FreeRDP
479e891545473f01c187daffdfa05fc752b54b72
235,264,434,153,077,900,000,000,000,000,000,000,000
5
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
void test_checkout_nasty__dotcapitalgit_tree(void) { test_checkout_fails("refs/heads/dotcapitalgit_tree", ".GIT/foobar"); }
0
[ "CWE-20", "CWE-706" ]
libgit2
3f7851eadca36a99627ad78cbe56a40d3776ed01
131,166,764,884,343,920,000,000,000,000,000,000,000
4
Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS A little-known feature of NTFS is that it offers to store metadata in so-called "Alternate Data Streams" (inspired by Apple's "resource forks") that are copied together with the file they are associated with. These Alternate Data Streams can be accessed via `<file name>:<stream name>:<stream type>`. Directories, too, have Alternate Data Streams, and they even have a default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and `abc::$INDEX_ALLOCATION/` are actually equivalent. This is of course another attack vector on the Git directory that we definitely want to prevent. On Windows, we already do this incidentally, by disallowing colons in file/directory names. While it looks as if files'/directories' Alternate Data Streams are not accessible in the Windows Subsystem for Linux, and neither via CIFS/SMB-mounted network shares in Linux, it _is_ possible to access them on SMB-mounted network shares on macOS. Therefore, let's go the extra mile and prevent this particular attack _everywhere_. To keep things simple, let's just disallow *any* Alternate Data Stream of `.git`. This is libgit2's variant of CVE-2019-1352. Signed-off-by: Johannes Schindelin <[email protected]>
int ff_set_sar(AVCodecContext *avctx, AVRational sar) { int ret = av_image_check_sar(avctx->width, avctx->height, sar); if (ret < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n", sar.num, sar.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; return ret; } else { avctx->sample_aspect_ratio = sar; } return 0; }
0
[ "CWE-787" ]
FFmpeg
2080bc33717955a0e4268e738acf8c1eeddbf8cb
80,678,128,754,104,140,000,000,000,000,000,000,000
14
avcodec/utils: correct align value for interplay Fixes out of array access Fixes: 452/fuzz-1-ffmpeg_VIDEO_AV_CODEC_ID_INTERPLAY_VIDEO_fuzzer Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]>
void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) { int i = 0; for (; i < bin->nsegs; i++) { if (!bin->chained_starts[i]) { continue; } int page_size = bin->chained_starts[i]->page_size; if (page_size < 1) { page_size = 4096; } ut64 start = bin->segs[i].fileoff; ut64 end = start + bin->segs[i].filesize; if (end >= limit_start && start <= limit_end) { ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size; ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= bin->chained_starts[i]->page_count) { break; } ut16 page_start = bin->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < limit_end && cursor < end) { ut8 tmp[8]; bool previous_rebasing = bin->rebasing_buffer; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) { bin->rebasing_buffer = previous_rebasing; break; } bin->rebasing_buffer = previous_rebasing; ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta, stride, addend; ut16 pointer_format = bin->chained_starts[i]->pointer_format; RFixupEvent event = R_FIXUP_EVENT_NONE; ut8 key = 0, addr_div = 0; ut16 diversity = 0; ut32 ordinal = UT32_MAX; if (pointer_format == DYLD_CHAINED_PTR_ARM64E) { stride = 8; bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; delta = p->next; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; } } else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) { stride = 8; struct dyld_chained_ptr_arm64e_bind24 *bind = (struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr; if (bind->bind) { delta = bind->next; if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_bind24 *p = (struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { event = R_FIXUP_EVENT_BIND; ordinal = bind->ordinal; addend = bind->addend; } } else { if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } } else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) { stride = 4; struct dyld_chained_ptr_64_bind *bind = (struct dyld_chained_ptr_64_bind *) &raw_ptr; if (bind->bind) { event = R_FIXUP_EVENT_BIND; delta = bind->next; ordinal = bind->ordinal; addend = bind->addend; } else { struct dyld_chained_ptr_64_rebase *p = (struct dyld_chained_ptr_64_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } else { eprintf ("Unsupported chained pointer format %d\n", pointer_format); return; } if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) { bool carry_on; switch (event) { case R_FIXUP_EVENT_BIND: { RFixupBindEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.addend = addend; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_BIND_AUTH: { RFixupBindAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE: { RFixupRebaseEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE_AUTH: { RFixupRebaseAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } default: eprintf ("Unexpected event while iterating chained fixups\n"); carry_on = false; } if (!carry_on) { return; } } cursor += delta * stride; if (!delta) { break; } } } } } }
1
[ "CWE-125", "CWE-787" ]
radare2
0052500c1ed5bf8263b26b9fd7773dbdc6f170c4
324,910,062,053,814,640,000,000,000,000,000,000,000
209
Fix heap OOB read in macho.iterate_chained_fixups ##crash * Reported by peacock-doris via huntr.dev * Reproducer 'tests_65305' mrmacete: * Return early if segs_count is 0 * Initialize segs_count also for reconstructed fixups Co-authored-by: pancake <[email protected]> Co-authored-by: Francesco Tamagni <[email protected]>
insert_file_id (struct stat const *st, enum file_id_type type) { __insert_file_id (st, type); }
0
[ "CWE-59" ]
patch
dce4683cbbe107a95f1f0d45fabc304acfb5d71a
227,199,721,750,449,070,000,000,000,000,000,000,000
4
Don't follow symlinks unless --follow-symlinks is given * src/inp.c (plan_a, plan_b), src/util.c (copy_to_fd, copy_file, append_to_file): Unless the --follow-symlinks option is given, open files with the O_NOFOLLOW flag to avoid following symlinks. So far, we were only doing that consistently for input files. * src/util.c (create_backup): When creating empty backup files, (re)create them with O_CREAT | O_EXCL to avoid following symlinks in that case as well.
static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, int err, struct nfs4_exception *exception) { if (err != -EINVAL) return false; if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) return false; server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; exception->retry = 1; return true; }
0
[ "CWE-787" ]
linux
b4487b93545214a9db8cbf32e86411677b0cca21
114,627,069,619,264,770,000,000,000,000,000,000,000
11
nfs: Fix getxattr kernel panic and memory overflow Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell <[email protected]> [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust <[email protected]>
**/ CImg<T>& load_gzip_external(const char *const filename) { if (!filename) throw CImgIOException(_cimg_instance "load_gzip_external(): Specified filename is (null).", cimg_instance); cimg::fclose(cimg::fopen(filename,"rb")); // Check if file exists CImg<charT> command(1024), filename_tmp(256), body(256); const char *const ext = cimg::split_filename(filename,body), *const ext2 = cimg::split_filename(body,0); std::FILE *file = 0; do { if (!cimg::strcasecmp(ext,"gz")) { if (*ext2) cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.%s", cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext2); else cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s", cimg::temporary_path(),cimg_file_separator,cimg::filenamerand()); } else { if (*ext) cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.%s", cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext); else cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s", cimg::temporary_path(),cimg_file_separator,cimg::filenamerand()); } if ((file=cimg::std_fopen(filename_tmp,"rb"))!=0) cimg::fclose(file); } while (file); cimg_snprintf(command,command._width,"%s -c \"%s\" > \"%s\"", cimg::gunzip_path(), CImg<charT>::string(filename)._system_strescape().data(), CImg<charT>::string(filename_tmp)._system_strescape().data()); cimg::system(command); if (!(file=cimg::std_fopen(filename_tmp,"rb"))) { cimg::fclose(cimg::fopen(filename,"r")); throw CImgIOException(_cimg_instance "load_gzip_external(): Failed to load file '%s' with external command 'gunzip'.", cimg_instance, filename); } else cimg::fclose(file); load(filename_tmp); std::remove(filename_tmp); return *this;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
165,754,323,564,823,940,000,000,000,000,000,000,000
43
.
static void __devinit init_hrtimers_cpu(int cpu) { struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; spin_lock_init(&cpu_base->lock); lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; hrtimer_init_hres(cpu_base); }
0
[ "CWE-189" ]
linux-2.6
13788ccc41ceea5893f9c747c59bc0b28f2416c2
44,337,289,243,387,070,000,000,000,000,000,000,000
13
[PATCH] hrtimer: prevent overrun DoS in hrtimer_forward() hrtimer_forward() does not check for the possible overflow of timer->expires. This can happen on 64 bit machines with large interval values and results currently in an endless loop in the softirq because the expiry value becomes negative and therefor the timer is expired all the time. Check for this condition and set the expiry value to the max. expiry time in the future. The fix should be applied to stable kernel series as well. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Ingo Molnar <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
*/ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags) { const struct net_device_ops *ops = dev->netdev_ops; enum bpf_netdev_command query; struct bpf_prog *prog = NULL; bpf_op_t bpf_op, bpf_chk; bool offload; int err; ASSERT_RTNL(); offload = flags & XDP_FLAGS_HW_MODE; query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; bpf_op = bpf_chk = ops->ndo_bpf; if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); return -EOPNOTSUPP; } if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) bpf_op = generic_xdp_install; if (bpf_op == bpf_chk) bpf_chk = generic_xdp_install; if (fd >= 0) { if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); return -EEXIST; } if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && __dev_xdp_query(dev, bpf_op, query)) { NL_SET_ERR_MSG(extack, "XDP program already attached"); return -EBUSY; } prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, bpf_op == ops->ndo_bpf); if (IS_ERR(prog)) return PTR_ERR(prog); if (!offload && bpf_prog_is_dev_bound(prog->aux)) { NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); bpf_prog_put(prog); return -EINVAL; } } err = dev_xdp_install(dev, bpf_op, extack, flags, prog); if (err < 0 && prog) bpf_prog_put(prog); return err;
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
299,472,131,672,891,040,000,000,000,000,000,000,000
54
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void PrintHintUsage() { u32 i=0; gf_sys_format_help(helpout, help_flags, "# Hinting Options\n" "IsoMedia hinting consists in creating special tracks in the file that contain transport protocol specific information and optionally multiplexing information. These tracks are then used by the server to create the actual packets being sent over the network, in other words they provide the server with hints on how to build packets, hence their names `hint tracks`.\n" "MP4Box supports creation of hint tracks for RTSP servers supporting these such as QuickTime Streaming Server, DarwinStreaming Server or 3GPP-compliant RTSP servers.\n" "Note: GPAC streaming tools [rtp output](rtpout) and [rtsp server](rtspout) do not use hint tracks, they use on-the-fly packetization " "from any media sources, not just MP4\n" " \n" "Options:\n" ); while (m4b_hint_args[i].name) { GF_GPACArg *arg = (GF_GPACArg *) &m4b_hint_args[i]; i++; gf_sys_print_arg(helpout, help_flags, arg, "mp4box-hint"); } }
0
[ "CWE-476" ]
gpac
87afe070cd6866df7fe80f11b26ef75161de85e0
38,450,855,118,843,554,000,000,000,000,000,000,000
17
fixed #1734
qtdemux_parse_tree (GstQTDemux * qtdemux) { GNode *mvhd; GNode *trak; GNode *udta; GNode *mvex; GstClockTime duration; GNode *pssh; guint64 creation_time; GstDateTime *datetime = NULL; gint version; /* make sure we have a usable taglist */ if (!qtdemux->tag_list) { qtdemux->tag_list = gst_tag_list_new_empty (); gst_tag_list_set_scope (qtdemux->tag_list, GST_TAG_SCOPE_GLOBAL); } else { qtdemux->tag_list = gst_tag_list_make_writable (qtdemux->tag_list); } mvhd = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_mvhd); if (mvhd == NULL) { GST_LOG_OBJECT (qtdemux, "No mvhd node found, looking for redirects."); return qtdemux_parse_redirects (qtdemux); } version = QT_UINT8 ((guint8 *) mvhd->data + 8); if (version == 1) { creation_time = QT_UINT64 ((guint8 *) mvhd->data + 12); qtdemux->timescale = QT_UINT32 ((guint8 *) mvhd->data + 28); qtdemux->duration = QT_UINT64 ((guint8 *) mvhd->data + 32); } else if (version == 0) { creation_time = QT_UINT32 ((guint8 *) mvhd->data + 12); qtdemux->timescale = QT_UINT32 ((guint8 *) mvhd->data + 20); qtdemux->duration = QT_UINT32 ((guint8 *) mvhd->data + 24); } else { GST_WARNING_OBJECT (qtdemux, "Unhandled mvhd version %d", version); return FALSE; } /* Moving qt creation time (secs since 1904) to unix time */ if (creation_time != 0) { /* Try to use epoch first as it should be faster and more commonly found */ if (creation_time >= QTDEMUX_SECONDS_FROM_1904_TO_1970) { GTimeVal now; creation_time -= QTDEMUX_SECONDS_FROM_1904_TO_1970; /* some data cleansing sanity */ g_get_current_time (&now); if (now.tv_sec + 24 * 3600 < creation_time) { GST_DEBUG_OBJECT (qtdemux, "discarding bogus future creation time"); } else { datetime = gst_date_time_new_from_unix_epoch_utc (creation_time); } } else { GDateTime *base_dt = g_date_time_new_utc (1904, 1, 1, 0, 0, 0); GDateTime *dt, *dt_local; dt = g_date_time_add_seconds (base_dt, creation_time); dt_local = g_date_time_to_local (dt); datetime = gst_date_time_new_from_g_date_time (dt_local); g_date_time_unref (base_dt); g_date_time_unref (dt); } } if (datetime) { /* Use KEEP as explicit tags should have a higher priority than mvhd tag */ gst_tag_list_add (qtdemux->tag_list, GST_TAG_MERGE_KEEP, GST_TAG_DATE_TIME, datetime, NULL); gst_date_time_unref (datetime); } GST_INFO_OBJECT (qtdemux, "timescale: %u", qtdemux->timescale); GST_INFO_OBJECT (qtdemux, "duration: %" G_GUINT64_FORMAT, qtdemux->duration); /* check for fragmented file and get some (default) data */ mvex = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_mvex); if (mvex) { GNode *mehd; GstByteReader mehd_data; /* let track parsing or anyone know weird stuff might happen ... */ qtdemux->fragmented = TRUE; /* compensate for total duration */ mehd = qtdemux_tree_get_child_by_type_full (mvex, FOURCC_mehd, &mehd_data); if (mehd) qtdemux_parse_mehd (qtdemux, &mehd_data); } /* set duration in the segment info */ gst_qtdemux_get_duration (qtdemux, &duration); if (duration) { qtdemux->segment.duration = duration; /* also do not exceed duration; stop is set that way post seek anyway, * and segment activation falls back to duration, * whereas loop only checks stop, so let's align this here as well */ qtdemux->segment.stop = duration; } /* parse all traks */ trak = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_trak); while (trak) { qtdemux_parse_trak (qtdemux, trak); /* iterate all siblings */ trak = qtdemux_tree_get_sibling_by_type (trak, FOURCC_trak); } if (!qtdemux->tag_list) { GST_DEBUG_OBJECT (qtdemux, "new tag list"); qtdemux->tag_list = gst_tag_list_new_empty (); gst_tag_list_set_scope (qtdemux->tag_list, GST_TAG_SCOPE_GLOBAL); } else { qtdemux->tag_list = gst_tag_list_make_writable (qtdemux->tag_list); } /* find tags */ udta = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_udta); if (udta) { qtdemux_parse_udta (qtdemux, qtdemux->tag_list, udta); } else { GST_LOG_OBJECT (qtdemux, "No udta node found."); } /* maybe also some tags in meta box */ udta = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_meta); if (udta) { GST_DEBUG_OBJECT (qtdemux, "Parsing meta box for tags."); qtdemux_parse_udta (qtdemux, qtdemux->tag_list, udta); } else { GST_LOG_OBJECT (qtdemux, "No meta node found."); } /* parse any protection system info */ pssh = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_pssh); while (pssh) { GST_LOG_OBJECT (qtdemux, "Parsing pssh box."); qtdemux_parse_pssh (qtdemux, pssh); pssh = qtdemux_tree_get_sibling_by_type (pssh, FOURCC_pssh); } qtdemux->tag_list = qtdemux_add_container_format (qtdemux, qtdemux->tag_list); return TRUE; }
0
[ "CWE-125" ]
gst-plugins-good
d0949baf3dadea6021d54abef6802fed5a06af75
184,073,519,885,563,100,000,000,000,000,000,000,000
146
qtdemux: Fix out of bounds read in tag parsing code We can't simply assume that the length of the tag value as given inside the stream is correct but should also check against the amount of data we have actually available. https://bugzilla.gnome.org/show_bug.cgi?id=775451
asmlinkage int arm_syscall(int no, struct pt_regs *regs) { struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs); switch (no & 0xffff) { case 0: /* branch through 0 */ info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = NULL; arm_notify_die("branch through zero", regs, &info, 0, 0); return 0; case NR(breakpoint): /* SWI BREAK_POINT */ regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; ptrace_break(current, regs); return regs->ARM_r0; /* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; * user space does not need to know the hardware cache layout. * * r2 contains flags. It should ALWAYS be passed as ZERO until it * is defined to be something else. For now we ignore it, but may * the fires of hell burn in your belly if you break this rule. ;) * * (at a later date, we may want to allow this call to not flush * various aspects of the cache. Passing '0' will guarantee that * everything necessary gets flushed to maintain consistency in * the specified region). */ case NR(cacheflush): return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); case NR(usr26): if (!(elf_hwcap & HWCAP_26BIT)) break; regs->ARM_cpsr &= ~MODE32_BIT; return regs->ARM_r0; case NR(usr32): if (!(elf_hwcap & HWCAP_26BIT)) break; regs->ARM_cpsr |= MODE32_BIT; return regs->ARM_r0; case NR(set_tls): thread->tp_value = regs->ARM_r0; if (tls_emu) return 0; if (has_tls_reg) { asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0)); } else { /* * User space must never try to access this directly. * Expect your app to break eventually if you do so. * The user helper at 0xffff0fe0 must be used instead. * (see entry-armv.S for details) */ *((unsigned int *)0xffff0ff0) = regs->ARM_r0; } return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG /* * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. * Return zero in r0 if *MEM was changed or non-zero if no exchange * happened. Also set the user C flag accordingly. * If access permissions have to be fixed up then non-zero is * returned and the operation has to be re-attempted. * * *NOTE*: This is a ghost syscall private to the kernel. Only the * __kuser_cmpxchg code in entry-armv.S should be aware of its * existence. Don't ever use this from user code. */ case NR(cmpxchg): for (;;) { extern void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs); unsigned long val; unsigned long addr = regs->ARM_r2; struct mm_struct *mm = current->mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; regs->ARM_cpsr &= ~PSR_C_BIT; down_read(&mm->mmap_sem); pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) goto bad_access; pmd = pmd_offset(pgd, addr); if (!pmd_present(*pmd)) goto bad_access; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) { pte_unmap_unlock(pte, ptl); goto bad_access; } val = *(unsigned long *)addr; val -= regs->ARM_r0; if (val == 0) { *(unsigned long *)addr = regs->ARM_r1; regs->ARM_cpsr |= PSR_C_BIT; } pte_unmap_unlock(pte, ptl); up_read(&mm->mmap_sem); return val; bad_access: up_read(&mm->mmap_sem); /* simulate a write access fault */ do_DataAbort(addr, 15 + (1 << 11), regs); } #endif default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This way the calling program can gracefully determine whether a feature is supported. */ if ((no & 0xffff) <= 0x7ff) return -ENOSYS; break; } #ifdef CONFIG_DEBUG_USER /* * experience shows that these seem to indicate that * something catastrophic has happened */ if (user_debug & UDBG_SYSCALL) { printk("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); dump_instr("", regs); if (user_mode(regs)) { __show_regs(regs); c_backtrace(regs->ARM_fp, processor_mode(regs)); } } #endif info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLTRP; info.si_addr = (void __user *)instruction_pointer(regs) - (thumb_mode(regs) ? 2 : 4); arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0); return 0; }
1
[ "CWE-284", "CWE-264" ]
linux
a4780adeefd042482f624f5e0d577bf9cdcbb760
112,016,323,066,832,320,000,000,000,000,000,000,000
155
ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to prevent it from being used as a covert channel between two tasks. There are more and more applications coming to Windows RT, Wine could support them, but mostly they expect to have the thread environment block (TEB) in TPIDRURW. This patch preserves that register per thread instead of clearing it. Unlike the TPIDRURO, which is already switched, the TPIDRURW can be updated from userspace so needs careful treatment in the case that we modify TPIDRURW and call fork(). To avoid this we must always read TPIDRURW in copy_thread. Signed-off-by: André Hentschel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Jonathan Austin <[email protected]> Signed-off-by: Russell King <[email protected]>
njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info) { char *p; size_t length; njs_bool_t trail; char src[NJS_MAX_PATH + 1]; trail = 0; length = info->name.length; if (dir != NULL) { length += dir->length; if (length == 0) { return NJS_DECLINED; } trail = (dir->start[dir->length - 1] != '/'); if (trail) { length++; } } if (njs_slow_path(length > NJS_MAX_PATH)) { return NJS_ERROR; } p = &src[0]; if (dir != NULL) { p = (char *) njs_cpymem(p, dir->start, dir->length); if (trail) { *p++ = '/'; } } p = (char *) njs_cpymem(p, info->name.start, info->name.length); *p = '\0'; p = realpath(&src[0], &info->path[0]); if (p == NULL) { return NJS_DECLINED; } info->fd = open(&info->path[0], O_RDONLY); if (info->fd < 0) { return NJS_DECLINED; } info->file.start = (u_char *) &info->path[0]; info->file.length = njs_strlen(info->file.start); return NJS_OK; }
0
[ "CWE-787" ]
njs
ab1702c7af9959366a5ddc4a75b4357d4e9ebdc1
139,471,826,297,193,750,000,000,000,000,000,000,000
57
Fixed typo while calculating module path length. The issue was introduced in 77c398f26d7e (not released yet).
bool Virtual_tmp_table::init(uint field_count) { uint *blob_field; uchar *bitmaps; DBUG_ENTER("Virtual_tmp_table::init"); if (!multi_alloc_root(in_use->mem_root, &s, sizeof(*s), &field, (field_count + 1) * sizeof(Field*), &blob_field, (field_count + 1) * sizeof(uint), &bitmaps, bitmap_buffer_size(field_count) * 6, NullS)) DBUG_RETURN(true); s->reset(); s->blob_field= blob_field; setup_tmp_table_column_bitmaps(this, bitmaps, field_count); m_alloced_field_count= field_count; DBUG_RETURN(false); };
0
[]
server
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
23,999,206,636,004,340,000,000,000,000,000,000,000
18
MDEV-22464 Server crash on UPDATE with nested subquery Uninitialized ref_pointer_array[] because setup_fields() got empty fields list. mysql_multi_update() for some reason does that by substituting the fields list with empty total_list for the mysql_select() call (looks like wrong merge since total_list is not used anywhere else and is always empty). The fix would be to return back the original fields list. But this fails update_use_source.test case: --error ER_BAD_FIELD_ERROR update v1 set t1c1=2 order by 1; Actually not failing the above seems to be ok. The other fix would be to keep resolve_in_select_list false (and that keeps outer context from being resolved in Item_ref::fix_fields()). This fix is more consistent with how SELECT behaves: --error ER_SUBQUERY_NO_1_ROW select a from t1 where a= (select 2 from t1 having (a = 3)); So this patch implements this fix.
static struct lo_data *lo_data(fuse_req_t req) { return (struct lo_data *)fuse_req_userdata(req); }
0
[]
qemu
6084633dff3a05d63176e06d7012c7e15aba15be
65,797,719,386,089,210,000,000,000,000,000,000,000
4
tools/virtiofsd: xattr name mappings: Add option Add an option to define mappings of xattr names so that the client and server filesystems see different views. This can be used to have different SELinux mappings as seen by the guest, to run the virtiofsd with less privileges (e.g. in a case where it can't set trusted/system/security xattrs but you want the guest to be able to), or to isolate multiple users of the same name; e.g. trusted attributes used by stacking overlayfs. A mapping engine is used with 3 simple rules; the rules can be combined to allow most useful mapping scenarios. The ruleset is defined by -o xattrmap='rules...'. This patch doesn't use the rule maps yet. Signed-off-by: Dr. David Alan Gilbert <[email protected]> Message-Id: <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]> Signed-off-by: Dr. David Alan Gilbert <[email protected]>
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; #endif #ifdef CONFIG_RT_GROUP_SCHED p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.parent = task_group(p)->rt_se[cpu]; #endif }
0
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
291,070,559,489,472,200,000,000,000,000,000,000,000
12
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
read_gif(Gif_Reader *grr, int read_flags, const char* landmark, Gif_ReadErrorHandler handler) { Gif_Stream *gfs; Gif_Image *gfi; Gif_Context gfc; int unknown_block_type = 0; if (gifgetc(grr) != 'G' || gifgetc(grr) != 'I' || gifgetc(grr) != 'F') return 0; (void)gifgetc(grr); (void)gifgetc(grr); (void)gifgetc(grr); gfs = Gif_NewStream(); gfi = Gif_NewImage(); gfc.stream = gfs; gfc.prefix = Gif_NewArray(Gif_Code, GIF_MAX_CODE); gfc.suffix = Gif_NewArray(uint8_t, GIF_MAX_CODE); gfc.length = Gif_NewArray(uint16_t, GIF_MAX_CODE); gfc.handler = handler; gfc.gfi = gfi; gfc.errors[0] = gfc.errors[1] = 0; if (!gfs || !gfi || !gfc.prefix || !gfc.suffix || !gfc.length) goto done; gfs->landmark = landmark; GIF_DEBUG(("\nGIF ")); if (!read_logical_screen_descriptor(gfs, grr)) goto done; GIF_DEBUG(("logscrdesc ")); while (!gifeof(grr)) { uint8_t block = gifgetbyte(grr); switch (block) { case ',': /* image block */ GIF_DEBUG(("imageread %d ", gfs->nimages)); gfi->identifier = last_name; last_name = 0; if (!Gif_AddImage(gfs, gfi)) goto done; else if (!read_image(grr, &gfc, gfi, read_flags)) { Gif_RemoveImage(gfs, gfs->nimages - 1); goto done; } gfc.gfi = gfi = Gif_NewImage(); if (!gfi) goto done; break; case ';': /* terminator */ GIF_DEBUG(("term\n")); goto done; case '!': /* extension */ block = gifgetbyte(grr); GIF_DEBUG(("ext(0x%02X) ", block)); switch (block) { case 0xF9: read_graphic_control_extension(&gfc, gfi, grr); break; case 0xCE: last_name = suck_data(last_name, 0, grr); break; case 0xFE: if (!read_comment_extension(gfi, grr)) goto done; break; case 0xFF: read_application_extension(&gfc, grr); break; default: read_unknown_extension(&gfc, grr, block, 0, 0); break; } break; default: if (!unknown_block_type) { char buf[256]; sprintf(buf, "unknown block type %d at file offset %u", block, grr->pos - 1); gif_read_error(&gfc, 1, buf); unknown_block_type = 1; } break; } } done: /* Move comments and extensions after last image into stream. */ if (gfs && gfi) { Gif_Extension* gfex; gfs->end_comment = gfi->comment; gfi->comment = 0; gfs->end_extension_list = gfi->extension_list; gfi->extension_list = 0; for (gfex = gfs->end_extension_list; gfex; gfex = gfex->next) gfex->image = NULL; } Gif_DeleteImage(gfi); Gif_DeleteArray(last_name); Gif_DeleteArray(gfc.prefix); Gif_DeleteArray(gfc.suffix); Gif_DeleteArray(gfc.length); gfc.gfi = 0; if (gfs) gfs->errors = gfc.errors[1]; if (gfs && gfc.errors[1] == 0 && !(read_flags & GIF_READ_TRAILING_GARBAGE_OK) && !grr->eofer(grr)) gif_read_error(&gfc, 0, "trailing garbage after GIF ignored"); /* finally, export last message */ gif_read_error(&gfc, -1, 0); return gfs; }
1
[ "CWE-416" ]
gifsicle
81fd7823f6d9c85ab598bc850e40382068361185
117,685,919,978,235,230,000,000,000,000,000,000,000
135
Fix use-after-free problems reported in #114.
static void cc_init(void) { int i; if (is_cc_init) return; for (i = 0; i < CS_MAX; i++) cc_tab[i].valid = false; set_cc(CS_HSTEM, true, 2, true); set_cc(CS_VSTEM, true, 2, true); set_cc(CS_VMOVETO, true, 1, true); set_cc(CS_RLINETO, true, 2, true); set_cc(CS_HLINETO, true, 1, true); set_cc(CS_VLINETO, true, 1, true); set_cc(CS_RRCURVETO, true, 6, true); set_cc(CS_CLOSEPATH, false, 0, true); set_cc(CS_CALLSUBR, false, 1, false); set_cc(CS_RETURN, false, 0, false); set_cc(CS_HSBW, true, 2, true); set_cc(CS_ENDCHAR, false, 0, true); set_cc(CS_RMOVETO, true, 2, true); set_cc(CS_HMOVETO, true, 1, true); set_cc(CS_VHCURVETO, true, 4, true); set_cc(CS_HVCURVETO, true, 4, true); set_cc(CS_DOTSECTION, false, 0, true); set_cc(CS_VSTEM3, true, 6, true); set_cc(CS_HSTEM3, true, 6, true); set_cc(CS_SEAC, true, 5, true); set_cc(CS_SBW, true, 4, true); set_cc(CS_DIV, false, 2, false); set_cc(CS_CALLOTHERSUBR, false, 0, false); set_cc(CS_POP, false, 0, false); set_cc(CS_SETCURRENTPOINT, true, 2, true); is_cc_init = true; }
0
[ "CWE-119" ]
texlive-source
6ed0077520e2b0da1fd060c7f88db7b2e6068e4c
201,510,998,817,478,140,000,000,000,000,000,000,000
34
writet1 protection against buffer overflow git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751
ZSTD_encodeSequences_body( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", (int)(blockStream.endPtr - blockStream.startPtr), (unsigned)dstCapacity); /* first symbols */ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); BIT_flushBits(&blockStream); } BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ofBits - extraBits); } else { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); { size_t n; for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */ BYTE const llCode = llCodeTable[n]; BYTE const ofCode = ofCodeTable[n]; BYTE const mlCode = mlCodeTable[n]; U32 const llBits = LL_bits[llCode]; U32 const ofBits = ofCode; U32 const mlBits = ML_bits[mlCode]; DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u", sequences[n].litLength, sequences[n].matchLength + MINMATCH, sequences[n].offset); /* 32b*/ /* 64b*/ /* (7)*/ /* (7)*/ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog))) BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[n].offset, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ } else { BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); } } DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); FSE_flushCState(&blockStream, &stateMatchLength); DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); FSE_flushCState(&blockStream, &stateOffsetBits); DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); FSE_flushCState(&blockStream, &stateLitLength); { size_t const streamSize = BIT_closeCStream(&blockStream); if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ return streamSize; } }
0
[ "CWE-362" ]
zstd
3e5cdf1b6a85843e991d7d10f6a2567c15580da0
210,247,028,153,812,460,000,000,000,000,000,000,000
90
fixed T36302429
TestDnsServer(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher), record_ttl_(0), stream_info_(dispatcher.timeSource()) {}
0
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
25,791,392,838,462,715,000,000,000,000,000,000,000
2
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <[email protected]>
PHP_FUNCTION(fmod) { double num1, num2; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "dd", &num1, &num2) == FAILURE) { return; } RETURN_DOUBLE(fmod(num1, num2)); }
0
[]
php-src
0d822f6df946764f3f0348b82efae2e1eaa83aa0
307,468,962,551,245,250,000,000,000,000,000,000,000
9
Bug #71201 round() segfault on 64-bit builds
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { /* * flush_tlb_range() implementations that look at VM_HUGETLB (tile, * mips-4k) flush only large pages. * * flush_tlb_range() implementations that flush I-TLB also flush D-TLB * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing * range. * * We rely on tlb_end_vma() to issue a flush, such that when we reset * these values the batch is empty. */ tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); }
0
[ "CWE-362" ]
linux
b67fbebd4cf980aecbcc750e1462128bffe8ae15
246,018,628,176,971,470,000,000,000,000,000,000,000
17
mmu_gather: Force tlb-flush VM_PFNMAP vmas Jann reported a race between munmap() and unmap_mapping_range(), where unmap_mapping_range() will no-op once unmap_vmas() has unlinked the VMA; however munmap() will not yet have invalidated the TLBs. Therefore unmap_mapping_range() will complete while there are still (stale) TLB entries for the specified range. Mitigate this by force flushing TLBs for VM_PFNMAP ranges. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Will Deacon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void setBitOfReversedStream(size_t* bitpointer, unsigned char* bitstream, unsigned char bit) { /*the current bit in bitstream may be 0 or 1 for this to work*/ if(bit == 0) { size_t pos = (*bitpointer) >> 3; bitstream[pos] &= (unsigned char)(~(1 << (7 - ((*bitpointer) & 0x7)))); } else { size_t pos = (*bitpointer) >> 3; bitstream[pos] |= (1 << (7 - ((*bitpointer) & 0x7))); } (*bitpointer)++; }
0
[ "CWE-401" ]
FreeRDP
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
195,000,316,183,198,400,000,000,000,000,000,000,000
15
Fixed #5645: realloc return handling
GF_Err strk_AddBox(GF_Box *s, GF_Box *a) { GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STRI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SubTrackInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STRD: if (ptr->strd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->strd = a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK;
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
74,211,822,137,781,890,000,000,000,000,000,000,000
18
prevent dref memleak on invalid input (#1183)
static Image *ReadMIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define BZipMaxExtent(x) ((x)+((x)/100)+600) #define LZMAMaxExtent(x) ((x)+((x)/3)+128) #define ThrowMIFFException(exception,message) \ { \ if (quantum_info != (QuantumInfo *) NULL) \ quantum_info=DestroyQuantumInfo(quantum_info); \ if (compress_pixels != (unsigned char *) NULL) \ compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels); \ ThrowReaderException((exception),(message)); \ } #define ZipMaxExtent(x) ((x)+(((x)+7) >> 3)+(((x)+63) >> 6)+11) #if defined(MAGICKCORE_BZLIB_DELEGATE) bz_stream bzip_info; #endif char id[MagickPathExtent], keyword[MagickPathExtent], *options; double version; GeometryInfo geometry_info; Image *image; int c; LinkedListInfo *profiles; #if defined(MAGICKCORE_LZMA_DELEGATE) lzma_stream initialize_lzma = LZMA_STREAM_INIT, lzma_info; lzma_allocator allocator; #endif MagickBooleanType status; PixelInfo pixel; MagickStatusType flags; QuantumFormatType quantum_format; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t compress_extent, length, packet_size; ssize_t count; unsigned char *compress_pixels, *pixels; size_t colors; ssize_t y; #if defined(MAGICKCORE_ZLIB_DELEGATE) z_stream zip_info; #endif /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Decode image header; header terminates one character beyond a ':'. */ c=ReadBlobByte(image); if (c == EOF) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); *id='\0'; compress_pixels=(unsigned char *) NULL; quantum_info=(QuantumInfo *) NULL; (void) memset(keyword,0,sizeof(keyword)); version=0.0; (void) version; do { /* Decode image header; header terminates one character beyond a ':'. */ SetGeometryInfo(&geometry_info); length=MagickPathExtent; options=AcquireString((char *) NULL); quantum_format=UndefinedQuantumFormat; profiles=(LinkedListInfo *) NULL; colors=0; image->depth=8UL; image->compression=NoCompression; while ((isgraph(c) != MagickFalse) && (c != (int) ':')) { register char *p; if (c == (int) '{') { char *comment; /* Read comment-- any text between { }. */ length=MagickPathExtent; comment=AcquireString((char *) NULL); for (p=comment; comment != (char *) NULL; p++) { c=ReadBlobByte(image); if (c == (int) '\\') c=ReadBlobByte(image); else if ((c == EOF) || (c == (int) '}')) break; if ((size_t) (p-comment+1) >= length) { *p='\0'; length<<=1; comment=(char *) ResizeQuantumMemory(comment,length+ MagickPathExtent,sizeof(*comment)); if (comment == (char *) NULL) break; p=comment+strlen(comment); } *p=(char) c; } if (comment == (char *) NULL) { options=DestroyString(options); ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); } *p='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); c=ReadBlobByte(image); } else if (isalnum(c) != MagickFalse) { /* Get the keyword. */ length=MagickPathExtent-1; p=keyword; do { if (c == (int) '=') break; if ((size_t) (p-keyword) < (MagickPathExtent-1)) *p++=(char) c; c=ReadBlobByte(image); } while (c != EOF); *p='\0'; p=options; while ((isspace((int) ((unsigned char) c)) != 0) && (c != EOF)) c=ReadBlobByte(image); if (c == (int) '=') { /* Get the keyword value. */ c=ReadBlobByte(image); while ((c != (int) '}') && (c != EOF)) { if ((size_t) (p-options+1) >= length) { *p='\0'; length<<=1; options=(char *) ResizeQuantumMemory(options,length+ MagickPathExtent,sizeof(*options)); if (options == (char *) NULL) break; p=options+strlen(options); } *p++=(char) c; c=ReadBlobByte(image); if (c == '\\') { c=ReadBlobByte(image); if (c == (int) '}') { *p++=(char) c; c=ReadBlobByte(image); } } if (*options != '{') if (isspace((int) ((unsigned char) c)) != 0) break; } if (options == (char *) NULL) ThrowMIFFException(ResourceLimitError, "MemoryAllocationFailed"); } *p='\0'; if (*options == '{') (void) CopyMagickString(options,options+1,strlen(options)); /* Assign a value to the specified keyword. */ switch (*keyword) { case 'a': case 'A': { if (LocaleCompare(keyword,"alpha-trait") == 0) { ssize_t alpha_trait; alpha_trait=ParseCommandOption(MagickPixelTraitOptions, MagickFalse,options); if (alpha_trait < 0) break; image->alpha_trait=(PixelTrait) alpha_trait; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'b': case 'B': { if (LocaleCompare(keyword,"background-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->background_color,exception); break; } if (LocaleCompare(keyword,"blue-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y= image->chromaticity.blue_primary.x; break; } if (LocaleCompare(keyword,"border-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->border_color,exception); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'c': case 'C': { if (LocaleCompare(keyword,"class") == 0) { ssize_t storage_class; storage_class=ParseCommandOption(MagickClassOptions, MagickFalse,options); if (storage_class < 0) break; image->storage_class=(ClassType) storage_class; break; } if (LocaleCompare(keyword,"colors") == 0) { colors=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"colorspace") == 0) { ssize_t colorspace; colorspace=ParseCommandOption(MagickColorspaceOptions, MagickFalse,options); if (colorspace < 0) break; image->colorspace=(ColorspaceType) colorspace; break; } if (LocaleCompare(keyword,"compression") == 0) { ssize_t compression; compression=ParseCommandOption(MagickCompressOptions, MagickFalse,options); if (compression < 0) break; image->compression=(CompressionType) compression; break; } if (LocaleCompare(keyword,"columns") == 0) { image->columns=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'd': case 'D': { if (LocaleCompare(keyword,"delay") == 0) { image->delay=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"depth") == 0) { image->depth=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"dispose") == 0) { ssize_t dispose; dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, options); if (dispose < 0) break; image->dispose=(DisposeType) dispose; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'e': case 'E': { if (LocaleCompare(keyword,"endian") == 0) { ssize_t endian; endian=ParseCommandOption(MagickEndianOptions,MagickFalse, options); if (endian < 0) break; image->endian=(EndianType) endian; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'g': case 'G': { if (LocaleCompare(keyword,"gamma") == 0) { image->gamma=StringToDouble(options,(char **) NULL); break; } if (LocaleCompare(keyword,"gravity") == 0) { ssize_t gravity; gravity=ParseCommandOption(MagickGravityOptions,MagickFalse, options); if (gravity < 0) break; image->gravity=(GravityType) gravity; break; } if (LocaleCompare(keyword,"green-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y= image->chromaticity.green_primary.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'i': case 'I': { if (LocaleCompare(keyword,"id") == 0) { (void) CopyMagickString(id,options,MagickPathExtent); break; } if (LocaleCompare(keyword,"iterations") == 0) { image->iterations=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'm': case 'M': { if (LocaleCompare(keyword,"matte") == 0) { ssize_t matte; matte=ParseCommandOption(MagickBooleanOptions,MagickFalse, options); if (matte < 0) break; image->alpha_trait=matte == 0 ? UndefinedPixelTrait : BlendPixelTrait; break; } if (LocaleCompare(keyword,"mattecolor") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->matte_color,exception); break; } if (LocaleCompare(keyword,"montage") == 0) { (void) CloneString(&image->montage,options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'o': case 'O': { if (LocaleCompare(keyword,"orientation") == 0) { ssize_t orientation; orientation=ParseCommandOption(MagickOrientationOptions, MagickFalse,options); if (orientation < 0) break; image->orientation=(OrientationType) orientation; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'p': case 'P': { if (LocaleCompare(keyword,"page") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); break; } if (LocaleCompare(keyword,"pixel-intensity") == 0) { ssize_t intensity; intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,options); if (intensity < 0) break; image->intensity=(PixelIntensityMethod) intensity; break; } if (LocaleCompare(keyword,"profile") == 0) { if (profiles == (LinkedListInfo *) NULL) profiles=NewLinkedList(0); (void) AppendValueToLinkedList(profiles, AcquireString(options)); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'q': case 'Q': { if (LocaleCompare(keyword,"quality") == 0) { image->quality=StringToUnsignedLong(options); break; } if ((LocaleCompare(keyword,"quantum-format") == 0) || (LocaleCompare(keyword,"quantum:format") == 0)) { ssize_t format; format=ParseCommandOption(MagickQuantumFormatOptions, MagickFalse,options); if (format < 0) break; quantum_format=(QuantumFormatType) format; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'r': case 'R': { if (LocaleCompare(keyword,"red-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y= image->chromaticity.red_primary.x; break; } if (LocaleCompare(keyword,"rendering-intent") == 0) { ssize_t rendering_intent; rendering_intent=ParseCommandOption(MagickIntentOptions, MagickFalse,options); if (rendering_intent < 0) break; image->rendering_intent=(RenderingIntent) rendering_intent; break; } if (LocaleCompare(keyword,"resolution") == 0) { flags=ParseGeometry(options,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; break; } if (LocaleCompare(keyword,"rows") == 0) { image->rows=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 's': case 'S': { if (LocaleCompare(keyword,"scene") == 0) { image->scene=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 't': case 'T': { if (LocaleCompare(keyword,"ticks-per-second") == 0) { image->ticks_per_second=(ssize_t) StringToLong(options); break; } if (LocaleCompare(keyword,"tile-offset") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); break; } if (LocaleCompare(keyword,"type") == 0) { ssize_t type; type=ParseCommandOption(MagickTypeOptions,MagickFalse, options); if (type < 0) break; image->type=(ImageType) type; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'u': case 'U': { if (LocaleCompare(keyword,"units") == 0) { ssize_t units; units=ParseCommandOption(MagickResolutionOptions, MagickFalse,options); if (units < 0) break; image->units=(ResolutionType) units; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'v': case 'V': { if (LocaleCompare(keyword,"version") == 0) { version=StringToDouble(options,(char **) NULL); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'w': case 'W': { if (LocaleCompare(keyword,"white-point") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y= image->chromaticity.white_point.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } default: { (void) SetImageProperty(image,keyword,options,exception); break; } } } else c=ReadBlobByte(image); while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); } options=DestroyString(options); (void) ReadBlobByte(image); /* Verify that required image information is defined. */ if ((LocaleCompare(id,"ImageMagick") != 0) || (image->depth > 64) || (image->storage_class == UndefinedClass) || (image->compression == UndefinedCompression) || (image->colorspace == UndefinedColorspace) || (image->columns == 0) || (image->rows == 0)) { if (profiles != (LinkedListInfo *) NULL) profiles=DestroyLinkedList(profiles,RelinquishMagickMemory); if (image->previous == (Image *) NULL) ThrowMIFFException(CorruptImageError,"ImproperImageHeader"); DeleteImageFromList(&image); (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageError,"ImproperImageHeader","`%s'",image->filename); break; } if (image->montage != (char *) NULL) { register char *p; /* Image directory. */ length=MagickPathExtent; image->directory=AcquireString((char *) NULL); p=image->directory; do { *p='\0'; if ((strlen(image->directory)+MagickPathExtent) >= length) { /* Allocate more memory for the image directory. */ length<<=1; image->directory=(char *) ResizeQuantumMemory(image->directory, length+MagickPathExtent,sizeof(*image->directory)); if (image->directory == (char *) NULL) ThrowMIFFException(CorruptImageError,"UnableToReadImageData"); p=image->directory+strlen(image->directory); } c=ReadBlobByte(image); if (c == EOF) break; *p++=(char) c; } while (c != (int) '\0'); } if (profiles != (LinkedListInfo *) NULL) { const char *name; StringInfo *profile; /* Read image profiles. */ ResetLinkedListIterator(profiles); name=(const char *) GetNextValueInLinkedList(profiles); while (name != (const char *) NULL) { length=ReadBlobMSBLong(image); if ((MagickSizeType) length > GetBlobSize(image)) break; profile=AcquireStringInfo(length); if (profile == (StringInfo *) NULL) break; count=ReadBlob(image,length,GetStringInfoDatum(profile)); if (count != (ssize_t) length) { profile=DestroyStringInfo(profile); break; } status=SetImageProfile(image,name,profile,exception); profile=DestroyStringInfo(profile); if (status == MagickFalse) break; name=(const char *) GetNextValueInLinkedList(profiles); } profiles=DestroyLinkedList(profiles,RelinquishMagickMemory); } image->depth=GetImageQuantumDepth(image,MagickFalse); if (image->storage_class == PseudoClass) { unsigned char *colormap; /* Create image colormap. */ packet_size=(size_t) (3UL*image->depth/8UL); if ((MagickSizeType) colors > GetBlobSize(image)) ThrowMIFFException(CorruptImageError,"InsufficientImageDataInFile"); if (((MagickSizeType) packet_size*colors) > GetBlobSize(image)) ThrowMIFFException(CorruptImageError,"InsufficientImageDataInFile"); status=AcquireImageColormap(image,colors != 0 ? colors : 256,exception); if (status == MagickFalse) ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (colors != 0) { const unsigned char *p; /* Read image colormap from file. */ colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,packet_size*image->colors,colormap); p=colormap; switch (image->depth) { default: colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowMIFFException(CorruptImageError,"ImageDepthNotSupported"); case 8: { unsigned char char_pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushCharPixel(p,&char_pixel); image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(char_pixel); p=PushCharPixel(p,&char_pixel); image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(char_pixel); p=PushCharPixel(p,&char_pixel); image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(char_pixel); } break; } case 16: { unsigned short short_pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushShortPixel(MSBEndian,p,&short_pixel); image->colormap[i].red=(MagickRealType) ScaleShortToQuantum(short_pixel); p=PushShortPixel(MSBEndian,p,&short_pixel); image->colormap[i].green=(MagickRealType) ScaleShortToQuantum(short_pixel); p=PushShortPixel(MSBEndian,p,&short_pixel); image->colormap[i].blue=(MagickRealType) ScaleShortToQuantum(short_pixel); } break; } case 32: { unsigned int long_pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushLongPixel(MSBEndian,p,&long_pixel); image->colormap[i].red=(MagickRealType) ScaleLongToQuantum(long_pixel); p=PushLongPixel(MSBEndian,p,&long_pixel); image->colormap[i].green=(MagickRealType) ScaleLongToQuantum(long_pixel); p=PushLongPixel(MSBEndian,p,&long_pixel); image->colormap[i].blue=(MagickRealType) ScaleLongToQuantum(long_pixel); } break; } } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate image pixels. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); if (quantum_format != UndefinedQuantumFormat) { status=SetQuantumFormat(image,quantum_info,quantum_format); if (status == MagickFalse) ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); } packet_size=(size_t) (quantum_info->depth/8); if (image->storage_class == DirectClass) packet_size=(size_t) (3*quantum_info->depth/8); if (IsGrayColorspace(image->colorspace) != MagickFalse) packet_size=quantum_info->depth/8; if (image->alpha_trait != UndefinedPixelTrait) packet_size+=quantum_info->depth/8; if (image->colorspace == CMYKColorspace) packet_size+=quantum_info->depth/8; if (image->compression == RLECompression) packet_size++; compress_extent=MagickMax(MagickMax(BZipMaxExtent(packet_size* image->columns),LZMAMaxExtent(packet_size*image->columns)), ZipMaxExtent(packet_size*image->columns)); compress_pixels=(unsigned char *) AcquireQuantumMemory(compress_extent, sizeof(*compress_pixels)); if (compress_pixels == (unsigned char *) NULL) ThrowMIFFException(ResourceLimitError,"MemoryAllocationFailed"); /* Read image pixels. */ quantum_type=RGBQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=RGBAQuantum; if (image->colorspace == CMYKColorspace) { quantum_type=CMYKQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=CMYKAQuantum; } if (IsGrayColorspace(image->colorspace) != MagickFalse) { quantum_type=GrayQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=GrayAlphaQuantum; } if (image->storage_class == PseudoClass) { quantum_type=IndexQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=IndexAlphaQuantum; } status=MagickTrue; GetPixelInfo(image,&pixel); #if defined(MAGICKCORE_BZLIB_DELEGATE) (void) memset(&bzip_info,0,sizeof(bzip_info)); #endif #if defined(MAGICKCORE_LZMA_DELEGATE) (void) memset(&allocator,0,sizeof(allocator)); #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) (void) memset(&zip_info,0,sizeof(zip_info)); #endif switch (image->compression) { #if defined(MAGICKCORE_BZLIB_DELEGATE) case BZipCompression: { int code; bzip_info.bzalloc=AcquireBZIPMemory; bzip_info.bzfree=RelinquishBZIPMemory; bzip_info.opaque=(void *) image; code=BZ2_bzDecompressInit(&bzip_info,(int) image_info->verbose, MagickFalse); if (code != BZ_OK) status=MagickFalse; break; } #endif #if defined(MAGICKCORE_LZMA_DELEGATE) case LZMACompression: { int code; allocator.alloc=AcquireLZMAMemory; allocator.free=RelinquishLZMAMemory; allocator.opaque=(void *) image; lzma_info=initialize_lzma; lzma_info.allocator=(&allocator); code=lzma_auto_decoder(&lzma_info,(uint64_t) -1,0); if (code != LZMA_OK) status=MagickFalse; break; } #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) case LZWCompression: case ZipCompression: { int code; zip_info.zalloc=AcquireZIPMemory; zip_info.zfree=RelinquishZIPMemory; zip_info.opaque=(voidpf) image; code=inflateInit(&zip_info); if (code != Z_OK) status=MagickFalse; break; } #endif case RLECompression: break; default: break; } pixels=(unsigned char *) GetQuantumPixels(quantum_info); length=0; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; switch (image->compression) { #if defined(MAGICKCORE_BZLIB_DELEGATE) case BZipCompression: { bzip_info.next_out=(char *) pixels; bzip_info.avail_out=(unsigned int) (packet_size*image->columns); do { int code; if (bzip_info.avail_in == 0) { bzip_info.next_in=(char *) compress_pixels; length=(size_t) BZipMaxExtent(packet_size*image->columns); if (version != 0.0) length=(size_t) ReadBlobMSBLong(image); if (length <= compress_extent) bzip_info.avail_in=(unsigned int) ReadBlob(image,length, (unsigned char *) bzip_info.next_in); if ((length > compress_extent) || ((size_t) bzip_info.avail_in != length)) { (void) BZ2_bzDecompressEnd(&bzip_info); ThrowMIFFException(CorruptImageError, "UnableToReadImageData"); } } code=BZ2_bzDecompress(&bzip_info); if ((code != BZ_OK) && (code != BZ_STREAM_END)) { status=MagickFalse; break; } if (code == BZ_STREAM_END) break; } while (bzip_info.avail_out != 0); (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); break; } #endif #if defined(MAGICKCORE_LZMA_DELEGATE) case LZMACompression: { lzma_info.next_out=pixels; lzma_info.avail_out=packet_size*image->columns; do { int code; if (lzma_info.avail_in == 0) { lzma_info.next_in=compress_pixels; length=(size_t) ReadBlobMSBLong(image); if (length <= compress_extent) lzma_info.avail_in=(unsigned int) ReadBlob(image,length, (unsigned char *) lzma_info.next_in); if ((length > compress_extent) || (lzma_info.avail_in != length)) { lzma_end(&lzma_info); ThrowMIFFException(CorruptImageError, "UnableToReadImageData"); } } code=lzma_code(&lzma_info,LZMA_RUN); if ((code != LZMA_OK) && (code != LZMA_STREAM_END)) { status=MagickFalse; break; } if (code == LZMA_STREAM_END) break; } while (lzma_info.avail_out != 0); (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); break; } #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) case LZWCompression: case ZipCompression: { zip_info.next_out=pixels; zip_info.avail_out=(uInt) (packet_size*image->columns); do { int code; if (zip_info.avail_in == 0) { zip_info.next_in=compress_pixels; length=(size_t) ZipMaxExtent(packet_size*image->columns); if (version != 0.0) length=(size_t) ReadBlobMSBLong(image); if (length <= compress_extent) zip_info.avail_in=(unsigned int) ReadBlob(image,length, zip_info.next_in); if ((length > compress_extent) || ((size_t) zip_info.avail_in != length)) { (void) inflateEnd(&zip_info); ThrowMIFFException(CorruptImageError, "UnableToReadImageData"); } } code=inflate(&zip_info,Z_SYNC_FLUSH); if ((code != Z_OK) && (code != Z_STREAM_END)) { status=MagickFalse; break; } if (code == Z_STREAM_END) break; } while (zip_info.avail_out != 0); (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); break; } #endif case RLECompression: { for (x=0; x < (ssize_t) image->columns; x++) { if (length == 0) { count=ReadBlob(image,packet_size,pixels); if (count != (ssize_t) packet_size) ThrowMIFFException(CorruptImageError,"UnableToReadImageData"); PushRunlengthPacket(image,pixels,&length,&pixel,exception); } length--; if (image->storage_class == PseudoClass) SetPixelIndex(image,ClampToQuantum(pixel.index),q); else { SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (image->colorspace == CMYKColorspace) SetPixelBlack(image,ClampToQuantum(pixel.black),q); } if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } break; } default: { count=ReadBlob(image,packet_size*image->columns,pixels); if (count != (ssize_t) (packet_size*image->columns)) ThrowMIFFException(CorruptImageError,"UnableToReadImageData"); (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); break; } } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } SetQuantumImageType(image,quantum_type); switch (image->compression) { #if defined(MAGICKCORE_BZLIB_DELEGATE) case BZipCompression: { int code; if (version == 0.0) { MagickOffsetType offset; offset=SeekBlob(image,-((MagickOffsetType) bzip_info.avail_in), SEEK_CUR); if (offset < 0) { (void) BZ2_bzDecompressEnd(&bzip_info); ThrowMIFFException(CorruptImageError,"ImproperImageHeader"); } } code=BZ2_bzDecompressEnd(&bzip_info); if (code != BZ_OK) status=MagickFalse; break; } #endif #if defined(MAGICKCORE_LZMA_DELEGATE) case LZMACompression: { int code; code=lzma_code(&lzma_info,LZMA_FINISH); if ((code != LZMA_STREAM_END) && (code != LZMA_OK)) status=MagickFalse; lzma_end(&lzma_info); break; } #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) case LZWCompression: case ZipCompression: { int code; if (version == 0.0) { MagickOffsetType offset; offset=SeekBlob(image,-((MagickOffsetType) zip_info.avail_in), SEEK_CUR); if (offset < 0) { (void) inflateEnd(&zip_info); ThrowMIFFException(CorruptImageError,"ImproperImageHeader"); } } code=inflateEnd(&zip_info); if (code != Z_OK) status=MagickFalse; break; } #endif default: break; } quantum_info=DestroyQuantumInfo(quantum_info); compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels); if (((y != (ssize_t) image->rows)) || (status == MagickFalse)) { image=DestroyImageList(image); return((Image *) NULL); } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; do { c=ReadBlobByte(image); } while ((isgraph(c) == MagickFalse) && (c != EOF)); if (c != EOF) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (c != EOF); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
1
[ "CWE-772" ]
ImageMagick
4b352c0be410ad900469a079e389178f878aded8
234,753,711,832,752,280,000,000,000,000,000,000,000
1,299
https://github.com/ImageMagick/ImageMagick/issues/1191
void rgw::auth::s3::LDAPEngine::init(CephContext* const cct) { if (! cct->_conf->rgw_s3_auth_use_ldap || cct->_conf->rgw_ldap_uri.empty()) { return; } if (! ldh) { std::lock_guard<std::mutex> lck(mtx); if (! ldh) { const string& ldap_uri = cct->_conf->rgw_ldap_uri; const string& ldap_binddn = cct->_conf->rgw_ldap_binddn; const string& ldap_searchdn = cct->_conf->rgw_ldap_searchdn; const string& ldap_searchfilter = cct->_conf->rgw_ldap_searchfilter; const string& ldap_dnattr = cct->_conf->rgw_ldap_dnattr; std::string ldap_bindpw = parse_rgw_ldap_bindpw(cct); ldh = new rgw::LDAPHelper(ldap_uri, ldap_binddn, ldap_bindpw, ldap_searchdn, ldap_searchfilter, ldap_dnattr); ldh->init(); ldh->bind(); } } }
0
[ "CWE-79" ]
ceph
8f90658c731499722d5f4393c8ad70b971d05f77
51,733,031,682,983,600,000,000,000,000,000,000,000
25
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <[email protected]> Reviewed-by: Casey Bodley <[email protected]> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
batch_dict_exact(PicklerObject *self, PyObject *obj) { PyObject *key = NULL, *value = NULL; int i; Py_ssize_t dict_size, ppos = 0; const char mark_op = MARK; const char setitem_op = SETITEM; const char setitems_op = SETITEMS; assert(obj != NULL && PyDict_CheckExact(obj)); assert(self->proto > 0); dict_size = PyDict_GET_SIZE(obj); /* Special-case len(d) == 1 to save space. */ if (dict_size == 1) { PyDict_Next(obj, &ppos, &key, &value); if (save(self, key, 0) < 0) return -1; if (save(self, value, 0) < 0) return -1; if (_Pickler_Write(self, &setitem_op, 1) < 0) return -1; return 0; } /* Write in batches of BATCHSIZE. */ do { i = 0; if (_Pickler_Write(self, &mark_op, 1) < 0) return -1; while (PyDict_Next(obj, &ppos, &key, &value)) { if (save(self, key, 0) < 0) return -1; if (save(self, value, 0) < 0) return -1; if (++i == BATCHSIZE) break; } if (_Pickler_Write(self, &setitems_op, 1) < 0) return -1; if (PyDict_GET_SIZE(obj) != dict_size) { PyErr_Format( PyExc_RuntimeError, "dictionary changed size during iteration"); return -1; } } while (i == BATCHSIZE); return 0; }
0
[ "CWE-190", "CWE-369" ]
cpython
a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd
266,912,432,572,878,780,000,000,000,000,000,000,000
52
closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261)
search_impl(i_ctx_t *i_ctx_p, bool forward) { os_ptr op = osp; os_ptr op1 = op - 1; uint size = r_size(op); uint count; byte *pat; byte *ptr; byte ch; int incr = forward ? 1 : -1; check_read_type(*op1, t_string); check_read_type(*op, t_string); if (size > r_size(op1)) { /* can't match */ make_false(op); return 0; } count = r_size(op1) - size; ptr = op1->value.bytes; if (size == 0) goto found; if (!forward) ptr += count; pat = op->value.bytes; ch = pat[0]; do { if (*ptr == ch && (size == 1 || !memcmp(ptr, pat, size))) goto found; ptr += incr; } while (count--); /* No match */ make_false(op); return 0; found: op->tas.type_attrs = op1->tas.type_attrs; op->value.bytes = ptr; r_set_size(op, size); push(2); op[-1] = *op1; r_set_size(op - 1, ptr - op[-1].value.bytes); op1->value.bytes = ptr + size; r_set_size(op1, count + (!forward ? (size - 1) : 0)); make_true(op); return 0; }
1
[ "CWE-119", "CWE-787" ]
ghostpdl
5d499272b95a6b890a1397e11d20937de000d31b
249,132,726,675,055,930,000,000,000,000,000,000,000
46
Bug 702582, CVE 2020-15900 Memory Corruption in Ghostscript 9.52 Fix the 'rsearch' calculation for the 'post' size to give the correct size. Previous calculation would result in a size that was too large, and could underflow to max uint32_t. Also fix 'rsearch' to return the correct 'pre' string with empty string match. A future change may 'undefine' this undocumented, non-standard operator during initialization as we do with the many other non-standard internal PostScript operators and procedures.
static void tcp_chr_update_read_handler(CharDriverState *chr, GMainContext *context, int tag) { TCPCharDriver *s = chr->opaque; if (!s->connected) { return; } remove_fd_in_watch(chr); if (s->ioc) { chr->fd_in_tag = io_add_watch_poll(s->ioc, tcp_chr_read_poll, tcp_chr_read, chr, context); } }
1
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
30,165,151,661,296,556,000,000,000,000,000,000,000
18
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { if (attr == &efi_attr_fw_vendor.attr) { if (efi_enabled(EFI_PARAVIRT) || efi.fw_vendor == EFI_INVALID_TABLE_ADDR) return 0; } else if (attr == &efi_attr_runtime.attr) { if (efi.runtime == EFI_INVALID_TABLE_ADDR) return 0; } else if (attr == &efi_attr_config_table.attr) { if (efi.config_table == EFI_INVALID_TABLE_ADDR) return 0; } return attr->mode; }
0
[]
linux
1957a85b0032a81e6482ca4aab883643b8dae06e
297,200,526,554,415,500,000,000,000,000,000,000,000
17
efi: Restrict efivar_ssdt_load when the kernel is locked down efivar_ssdt_load allows the kernel to import arbitrary ACPI code from an EFI variable, which gives arbitrary code execution in ring 0. Prevent that when the kernel is locked down. Signed-off-by: Matthew Garrett <[email protected]> Acked-by: Ard Biesheuvel <[email protected]> Reviewed-by: Kees Cook <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: [email protected] Signed-off-by: James Morris <[email protected]>
void rwpng_free_image8(png8_image *image) { free(image->indexed_data); image->indexed_data = NULL; free(image->row_pointers); image->row_pointers = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; }
0
[ "CWE-190", "CWE-787" ]
pngquant
b7c217680cda02dddced245d237ebe8c383be285
115,684,127,560,978,150,000,000,000,000,000,000,000
11
Fix integer overflow in rwpng.h (CVE-2016-5735) Reported by Choi Jaeseung Found with Sparrow (http://ropas.snu.ac.kr/sparrow)
static void specialfunc(int key, int x, int y) { if (cmdopts.verbose) { fprintf(stderr, "specialfunc(%d, %d, %d)\n", key, x, y); } switch (key) { case GLUT_KEY_UP: { float panamount; panamount = (glutGetModifiers() & GLUT_ACTIVE_SHIFT) ? BIGPANAMOUNT : SMALLPANAMOUNT; pan(0.0, panamount * (gs.toprighty - gs.botlefty)); glutPostRedisplay(); } break; case GLUT_KEY_DOWN: { float panamount; panamount = (glutGetModifiers() & GLUT_ACTIVE_SHIFT) ? BIGPANAMOUNT : SMALLPANAMOUNT; pan(0.0, -panamount * (gs.toprighty - gs.botlefty)); glutPostRedisplay(); } break; case GLUT_KEY_LEFT: { float panamount; panamount = (glutGetModifiers() & GLUT_ACTIVE_SHIFT) ? BIGPANAMOUNT : SMALLPANAMOUNT; pan(-panamount * (gs.toprightx - gs.botleftx), 0.0); glutPostRedisplay(); } break; case GLUT_KEY_RIGHT: { float panamount; panamount = (glutGetModifiers() & GLUT_ACTIVE_SHIFT) ? BIGPANAMOUNT : SMALLPANAMOUNT; pan(panamount * (gs.toprightx - gs.botleftx), 0.0); glutPostRedisplay(); } break; default: break; } }
0
[ "CWE-119" ]
jasper
65536647d380571d1a9a6c91fa03775fb5bbd256
184,678,367,388,274,930,000,000,000,000,000,000,000
47
A new experimental memory allocator has been introduced. The allocator is experimental in the sense that its API is not considered stable and the allocator may change or disappear entirely in future versions of the code. This new allocator tracks how much memory is being used by jas_malloc and friends. A maximum upper bound on the memory usage can be set via the experimental API provided and a default value can be set at build time as well. Such functionality may be useful in run-time environments where the user wants to be able to limit the amount of memory used by JasPer. This allocator is not used by default. Note: This feature needs C11 functionality. Note: The memory allocator is not thread safe in its current form. A new --memory-limit CLI option has been added to the jasper, imginfo, imgcmp, and jiv programs. The option is only available when the code is built with the new memory allocator. The support for my old debug memory allocator from the 1990s has been purged from the code. The debug memory allocator is probably not a very useful thing with the advent of GCC/Clang code sanitizers. The safe size_t integer functions no longer set their result upon failure. A safe subtract operation was also added.
void FoFiTrueType::cvtEncoding(char **encoding, FoFiOutputFunc outputFunc, void *outputStream) { const char *name; GooString *buf; int i; (*outputFunc)(outputStream, "/Encoding 256 array\n", 20); if (encoding) { for (i = 0; i < 256; ++i) { if (!(name = encoding[i])) { name = ".notdef"; } buf = GooString::format("dup {0:d} /", i); (*outputFunc)(outputStream, buf->getCString(), buf->getLength()); delete buf; (*outputFunc)(outputStream, name, strlen(name)); (*outputFunc)(outputStream, " put\n", 5); } } else { for (i = 0; i < 256; ++i) { buf = GooString::format("dup {0:d} /c{1:02x} put\n", i, i); (*outputFunc)(outputStream, buf->getCString(), buf->getLength()); delete buf; } } (*outputFunc)(outputStream, "readonly def\n", 13); }
0
[ "CWE-125" ]
poppler
cdb7ad95f7c8fbf63ade040d8a07ec96467042fc
255,048,551,542,533,540,000,000,000,000,000,000,000
28
Fix malformed file crash in bug #85243
uint32_t readStructEnd() { T_VIRTUAL_CALL(); return readStructEnd_virt(); }
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
274,574,606,889,410,150,000,000,000,000,000,000,000
4
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <[email protected]>
dissect_u3v_event_cmd(proto_tree *u3v_telegram_tree, tvbuff_t *tvb, packet_info *pinfo, gint startoffset, gint length) { gint32 eventid; gint offset = startoffset; proto_item *item = NULL; /* Get event ID */ eventid = tvb_get_letohs(tvb, offset + 2); /* fill in Info column in Wireshark GUI */ col_append_fstr(pinfo->cinfo, COL_INFO, "[ID: 0x%04X]", eventid); item = proto_tree_add_item(u3v_telegram_tree, hf_u3v_scd_event_cmd, tvb, offset, length, ENC_NA); u3v_telegram_tree = proto_item_add_subtree(item, ett_u3v_payload_cmd); offset += 2; /* Use range to determine type of event */ if ((eventid >= 0x0000) && (eventid <= 0x8000)) { /* Standard ID */ proto_tree_add_item(u3v_telegram_tree, hf_u3v_eventcmd_id, tvb, offset, 2, ENC_LITTLE_ENDIAN); } else if ((eventid >= 0x8001) && (eventid <= 0x8FFF)) { /* Error */ proto_tree_add_item(u3v_telegram_tree, hf_u3v_eventcmd_error_id, tvb, offset, 2, ENC_LITTLE_ENDIAN); } else if ((eventid >= 0x9000) && (eventid <= 0xFFFF)) { /* Device specific */ proto_tree_add_item(u3v_telegram_tree, hf_u3v_eventcmd_device_specific_id, tvb, offset, 2, ENC_LITTLE_ENDIAN); } offset += 2; /* Timestamp (64 bit) associated with event */ proto_tree_add_item(u3v_telegram_tree, hf_u3v_eventcmd_timestamp, tvb, offset, 8, ENC_LITTLE_ENDIAN); offset += 8; /* Data */ if (length > offset ) { proto_tree_add_item(u3v_telegram_tree, hf_u3v_eventcmd_data, tvb, offset, length - 12, ENC_NA); } }
0
[ "CWE-476" ]
wireshark
2cb5985bf47bdc8bea78d28483ed224abdd33dc6
245,198,235,155,760,940,000,000,000,000,000,000,000
40
Make class "type" for USB conversations. USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match. Bug: 12356 Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209 Reviewed-on: https://code.wireshark.org/review/15212 Petri-Dish: Michael Mann <[email protected]> Reviewed-by: Martin Kaiser <[email protected]> Petri-Dish: Martin Kaiser <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]>
int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, int proto, int len) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *hdr; int totlen; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; totlen = len + sizeof(struct ipv6hdr); hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); skb->nh.ipv6h = hdr; *(u32*)hdr = htonl(0x60000000); hdr->payload_len = htons(len); hdr->nexthdr = proto; hdr->hop_limit = np->hop_limit; ipv6_addr_copy(&hdr->saddr, saddr); ipv6_addr_copy(&hdr->daddr, daddr); return 0; }
0
[]
linux
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
267,148,785,844,743,530,000,000,000,000,000,000,000
27
[IPv4/IPv6]: UFO Scatter-gather approach Attached is kernel patch for UDP Fragmentation Offload (UFO) feature. 1. This patch incorporate the review comments by Jeff Garzik. 2. Renamed USO as UFO (UDP Fragmentation Offload) 3. udp sendfile support with UFO This patches uses scatter-gather feature of skb to generate large UDP datagram. Below is a "how-to" on changes required in network device driver to use the UFO interface. UDP Fragmentation Offload (UFO) Interface: ------------------------------------------- UFO is a feature wherein the Linux kernel network stack will offload the IP fragmentation functionality of large UDP datagram to hardware. This will reduce the overhead of stack in fragmenting the large UDP datagram to MTU sized packets 1) Drivers indicate their capability of UFO using dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG NETIF_F_HW_CSUM is required for UFO over ipv6. 2) UFO packet will be submitted for transmission using driver xmit routine. UFO packet will have a non-zero value for "skb_shinfo(skb)->ufo_size" skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP fragment going out of the adapter after IP fragmentation by hardware. skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[] contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW indicating that hardware has to do checksum calculation. Hardware should compute the UDP checksum of complete datagram and also ip header checksum of each fragmented IP packet. For IPV6 the UFO provides the fragment identification-id in skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating IPv6 fragments. Signed-off-by: Ananda Raju <[email protected]> Signed-off-by: Rusty Russell <[email protected]> (forwarded) Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; if (hstate_is_gigantic(h)) return; for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(h, page); h->free_huge_pages--; h->free_huge_pages_node[page_to_nid(page)]--; } } }
0
[ "CWE-703" ]
linux
5af10dfd0afc559bb4b0f7e3e8227a1578333995
311,464,808,039,807,800,000,000,000,000,000,000,000
23
userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page before returning in case of errors. The error returned was -EEXIST by running UFFDIO_COPY on a non-hole offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that triggered it and the kernel must cope with it returning -EEXIST from ioctl(UFFDIO_COPY) as expected. page dumped because: VM_BUG_ON_PAGE(!PageLocked(page)) kernel BUG at mm/filemap.c:964! invalid opcode: 0000 [#1] SMP CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1 RIP: unlock_page+0x4a/0x50 Call Trace: hugetlb_mcopy_atomic_pte+0xc0/0x320 mcopy_atomic+0x96f/0xbe0 userfaultfd_ioctl+0x218/0xe90 do_vfs_ioctl+0xa5/0x600 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x1a/0xa9 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Andrea Arcangeli <[email protected]> Tested-by: Maxime Coquelin <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: "Dr. David Alan Gilbert" <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Alexey Perevalov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); }
0
[ "CWE-369" ]
ImageMagick
43539e67a47d2f8de832d33a5b26dc2a7a12294f
164,472,401,022,379,960,000,000,000,000,000,000,000
52
https://github.com/ImageMagick/ImageMagick/issues/1718
QPDFObjectHandle::getArrayAsVector() { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getAsVector(); }
0
[ "CWE-835" ]
qpdf
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
59,477,140,172,045,020,000,000,000,000,000,000,000
5
Handle object ID 0 (fixes #99) This is CVE-2017-9208. The QPDF library uses object ID 0 internally as a sentinel to represent a direct object, but prior to this fix, was not blocking handling of 0 0 obj or 0 0 R as a special case. Creating an object in the file with 0 0 obj could cause various infinite loops. The PDF spec doesn't allow for object 0. Having qpdf handle object 0 might be a better fix, but changing all the places in the code that assumes objid == 0 means direct would be risky.
static bool get_new_nicname(char **dest, char *br, int pid, char **cnic) { char template[IFNAMSIZ]; snprintf(template, sizeof(template), "vethXXXXXX"); *dest = lxc_mkifname(template); if (!create_nic(*dest, br, pid, cnic)) { return false; } return true; }
0
[ "CWE-284", "CWE-862" ]
lxc
16af238036a5464ae8f2420ed3af214f0de875f9
338,721,134,348,801,400,000,000,000,000,000,000,000
11
CVE-2017-5985: Ensure target netns is caller-owned Before this commit, lxc-user-nic could potentially have been tricked into operating on a network namespace over which the caller did not hold privilege. This commit ensures that the caller is privileged over the network namespace by temporarily dropping privilege. Launchpad: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1654676 Reported-by: Jann Horn <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
static inline bool dma_pte_present(struct dma_pte *pte) { return (pte->val & 3) != 0; }
0
[]
linux
fb58fdcd295b914ece1d829b24df00a17a9624bc
217,368,335,644,941,700,000,000,000,000,000,000,000
4
iommu/vt-d: Do not enable ATS for untrusted devices Currently Linux automatically enables ATS (Address Translation Service) for any device that supports it (and IOMMU is turned on). ATS is used to accelerate DMA access as the device can cache translations locally so there is no need to do full translation on IOMMU side. However, as pointed out in [1] ATS can be used to bypass IOMMU based security completely by simply sending PCIe read/write transaction with AT (Address Translation) field set to "translated". To mitigate this modify the Intel IOMMU code so that it does not enable ATS for any device that is marked as being untrusted. In case this turns out to cause performance issues we may selectively allow ATS based on user decision but currently use big hammer and disable it completely to be on the safe side. [1] https://www.repository.cam.ac.uk/handle/1810/274352 Signed-off-by: Mika Westerberg <[email protected]> Reviewed-by: Ashok Raj <[email protected]> Reviewed-by: Joerg Roedel <[email protected]> Acked-by: Joerg Roedel <[email protected]>
void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { // Track this as a timeout for outlier detection purposes even though we didn't // cancel the request yet and might get a 2xx later. updateOutlierDetection(Upstream::Outlier::Result::LocalOriginTimeout, upstream_request, absl::optional<uint64_t>(enumToInt(timeout_response_code_))); upstream_request.outlierDetectionTimeoutRecorded(true); if (!downstream_response_started_ && retry_state_) { RetryStatus retry_status = retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { runRetryOptionsPredicates(upstream_request); pending_retries_++; // Don't increment upstream_host->stats().rq_error_ here, we'll do that // later if 1) we hit global timeout or 2) we get bad response headers // back. upstream_request.retried(true); // TODO: cluster stat for hedge attempted. } else if (retry_status == RetryStatus::NoOverflow) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); } else if (retry_status == RetryStatus::NoRetryLimitExceeded) { callbacks_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded); } } }
0
[ "CWE-703" ]
envoy
f0bb2219112d8cdb4c4e8b346834f962925362ca
291,694,960,260,840,300,000,000,000,000,000,000,000
29
[1.20] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionStreamReset) { setupNoReuseConnectionHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); // Resets are considered network failures and make host unhealthy also after 2nd event. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); expectHostHealthy(true); // A new client is created because we close the connection // when a stream reset occurs and connection reuse is disabled. expectClientCreate(0); expectHealthcheckStart(0); test_sessions_[0]->interval_timer_->invokeCallback(); expectHealthcheckStop(0); // Test host state haven't changed. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); }
0
[ "CWE-476" ]
envoy
9b1c3962172a972bc0359398af6daa3790bb59db
279,305,948,460,889,900,000,000,000,000,000,000,000
28
healthcheck: fix grpc inline removal crashes (#749) Signed-off-by: Matt Klein <[email protected]> Signed-off-by: Pradeep Rao <[email protected]>
static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) { }
0
[]
qemu
acfc4846508a02cc4c83aa27799fd74ac280bdb2
152,993,205,004,414,530,000,000,000,000,000,000,000
3
virtio-gpu: use VIRTIO_GPU_MAX_SCANOUTS The value is defined in virtio_gpu.h already (changing from 4 to 16). Signed-off-by: Marc-André Lureau <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
gdm_session_worker_get_environment (GdmSessionWorker *worker) { GPtrArray *environment; environment = g_ptr_array_new (); g_hash_table_foreach (worker->priv->environment, (GHFunc) gdm_session_worker_fill_environment_array, environment); g_ptr_array_add (environment, NULL); return (char **) g_ptr_array_free (environment, FALSE); }
0
[]
gdm
c25ef9245be4e0be2126ef3d075df4401949b570
165,987,708,680,411,770,000,000,000,000,000,000,000
12
Store the face and dmrc files in a cache. Refer to bug #565151.
static int cardos_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file) { int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); r = iso_ops->select_file(card, in_path, file); if (r >= 0 && file) parse_sec_attr((*file), (*file)->sec_attr, (*file)->sec_attr_len); LOG_FUNC_RETURN(card->ctx, r); }
0
[]
OpenSC
1252aca9f10771ef5ba8405e73cf2da50827958f
220,167,332,415,412,340,000,000,000,000,000,000,000
12
cardos: Correctly calculate the left bytes to avoid buffer overrun Thanks oss-fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=29912
_Unpickler_ResizeMemoList(UnpicklerObject *self, size_t new_size) { size_t i; assert(new_size > self->memo_size); PyObject **memo_new = self->memo; PyMem_RESIZE(memo_new, PyObject *, new_size); if (memo_new == NULL) { PyErr_NoMemory(); return -1; } self->memo = memo_new; for (i = self->memo_size; i < new_size; i++) self->memo[i] = NULL; self->memo_size = new_size; return 0; }
0
[ "CWE-190", "CWE-369" ]
cpython
a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd
311,742,344,233,109,000,000,000,000,000,000,000,000
18
closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261)
static int add_recent_loose(const unsigned char *sha1, const char *path, void *data) { struct stat st; struct object *obj = lookup_object(sha1); if (obj && obj->flags & SEEN) return 0; if (stat(path, &st) < 0) { /* * It's OK if an object went away during our iteration; this * could be due to a simultaneous repack. But anything else * we should abort, since we might then fail to mark objects * which should not be pruned. */ if (errno == ENOENT) return 0; return error("unable to stat %s: %s", sha1_to_hex(sha1), strerror(errno)); } add_recent_object(sha1, st.st_mtime, data); return 0; }
0
[ "CWE-119", "CWE-787" ]
git
de1e67d0703894cb6ea782e36abb63976ab07e60
27,312,645,451,475,810,000,000,000,000,000,000,000
25
list-objects: pass full pathname to callbacks When we find a blob at "a/b/c", we currently pass this to our show_object_fn callbacks as two components: "a/b/" and "c". Callbacks which want the full value then call path_name(), which concatenates the two. But this is an inefficient interface; the path is a strbuf, and we could simply append "c" to it temporarily, then roll back the length, without creating a new copy. So we could improve this by teaching the callsites of path_name() this trick (and there are only 3). But we can also notice that no callback actually cares about the broken-down representation, and simply pass each callback the full path "a/b/c" as a string. The callback code becomes even simpler, then, as we do not have to worry about freeing an allocated buffer, nor rolling back our modification to the strbuf. This is theoretically less efficient, as some callbacks would not bother to format the final path component. But in practice this is not measurable. Since we use the same strbuf over and over, our work to grow it is amortized, and we really only pay to memcpy a few bytes. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
static void acpi_unregister_gsi_ioapic(u32 gsi) { #ifdef CONFIG_X86_IO_APIC int irq; mutex_lock(&acpi_ioapic_lock); irq = mp_map_gsi_to_irq(gsi, 0, NULL); if (irq > 0) mp_unmap_irq(irq); mutex_unlock(&acpi_ioapic_lock); #endif }
0
[ "CWE-120" ]
linux
dad5ab0db8deac535d03e3fe3d8f2892173fa6a4
127,991,995,553,828,590,000,000,000,000,000,000,000
12
x86/acpi: Prevent out of bound access caused by broken ACPI tables The bus_irq argument of mp_override_legacy_irq() is used as the index into the isa_irq_to_gsi[] array. The bus_irq argument originates from ACPI_MADT_TYPE_IO_APIC and ACPI_MADT_TYPE_INTERRUPT items in the ACPI tables, but is nowhere sanity checked. That allows broken or malicious ACPI tables to overwrite memory, which might cause malfunction, panic or arbitrary code execution. Add a sanity check and emit a warning when that triggers. [ tglx: Added warning and rewrote changelog ] Signed-off-by: Seunghun Han <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: "Rafael J. Wysocki" <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; unsigned long flags; /* * We can't change the weight of the root cgroup. */ if (!tg->se[0]) return -EINVAL; if (shares < MIN_SHARES) shares = MIN_SHARES; else if (shares > MAX_SHARES) shares = MAX_SHARES; mutex_lock(&shares_mutex); if (tg->shares == shares) goto done; spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) unregister_fair_sched_group(tg, i); list_del_rcu(&tg->siblings); spin_unlock_irqrestore(&task_group_lock, flags); /* wait for any ongoing reference to this group to finish */ synchronize_sched(); /* * Now we are free to modify the group's share on each cpu * w/o tripping rebalance_share or load_balance_fair. */ tg->shares = shares; for_each_possible_cpu(i) { /* * force a rebalance */ cfs_rq_set_shares(tg->cfs_rq[i], 0); set_se_shares(tg->se[i], shares); } /* * Enable load balance activity on this group, by inserting it back on * each cpu's rq->leaf_cfs_rq_list. */ spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) register_fair_sched_group(tg, i); list_add_rcu(&tg->siblings, &tg->parent->children); spin_unlock_irqrestore(&task_group_lock, flags); done: mutex_unlock(&shares_mutex); return 0; }
0
[ "CWE-703", "CWE-835" ]
linux
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
240,827,135,663,783,420,000,000,000,000,000,000,000
55
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <[email protected]> Reported-by: Bjoern B. Brandenburg <[email protected]> Tested-by: Yong Zhang <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: [email protected] LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, bool remount) { char *this_char, *value, *rest; uid_t uid; gid_t gid; while (options != NULL) { this_char = options; for (;;) { /* * NUL-terminate this option: unfortunately, * mount options form a comma-separated list, * but mpol's nodelist may also contain commas. */ options = strchr(options, ','); if (options == NULL) break; options++; if (!isdigit(*options)) { options[-1] = '\0'; break; } } if (!*this_char) continue; if ((value = strchr(this_char,'=')) != NULL) { *value++ = 0; } else { printk(KERN_ERR "tmpfs: No value for mount option '%s'\n", this_char); return 1; } if (!strcmp(this_char,"size")) { unsigned long long size; size = memparse(value,&rest); if (*rest == '%') { size <<= PAGE_SHIFT; size *= totalram_pages; do_div(size, 100); rest++; } if (*rest) goto bad_val; sbinfo->max_blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE); } else if (!strcmp(this_char,"nr_blocks")) { sbinfo->max_blocks = memparse(value, &rest); if (*rest) goto bad_val; } else if (!strcmp(this_char,"nr_inodes")) { sbinfo->max_inodes = memparse(value, &rest); if (*rest) goto bad_val; } else if (!strcmp(this_char,"mode")) { if (remount) continue; sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; if (*rest) goto bad_val; } else if (!strcmp(this_char,"uid")) { if (remount) continue; uid = simple_strtoul(value, &rest, 0); if (*rest) goto bad_val; sbinfo->uid = make_kuid(current_user_ns(), uid); if (!uid_valid(sbinfo->uid)) goto bad_val; } else if (!strcmp(this_char,"gid")) { if (remount) continue; gid = simple_strtoul(value, &rest, 0); if (*rest) goto bad_val; sbinfo->gid = make_kgid(current_user_ns(), gid); if (!gid_valid(sbinfo->gid)) goto bad_val; } else if (!strcmp(this_char,"mpol")) { if (mpol_parse_str(value, &sbinfo->mpol)) goto bad_val; } else { printk(KERN_ERR "tmpfs: Bad mount option %s\n", this_char); return 1; } } return 0; bad_val: printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", value, this_char); return 1; }
0
[ "CWE-399" ]
linux
5f00110f7273f9ff04ac69a5f85bb535a4fd0987
319,672,257,299,223,370,000,000,000,000,000,000,000
97
tmpfs: fix use-after-free of mempolicy object The tmpfs remount logic preserves filesystem mempolicy if the mpol=M option is not specified in the remount request. A new policy can be specified if mpol=M is given. Before this patch remounting an mpol bound tmpfs without specifying mpol= mount option in the remount request would set the filesystem's mempolicy object to a freed mempolicy object. To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run: # mkdir /tmp/x # mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x # grep /tmp/x /proc/mounts nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0 # mount -o remount,size=200M nodev /tmp/x # grep /tmp/x /proc/mounts nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0 # note ? garbage in mpol=... output above # dd if=/dev/zero of=/tmp/x/f count=1 # panic here Panic: BUG: unable to handle kernel NULL pointer dereference at (null) IP: [< (null)>] (null) [...] Oops: 0010 [#1] SMP DEBUG_PAGEALLOC Call Trace: mpol_shared_policy_init+0xa5/0x160 shmem_get_inode+0x209/0x270 shmem_mknod+0x3e/0xf0 shmem_create+0x18/0x20 vfs_create+0xb5/0x130 do_last+0x9a1/0xea0 path_openat+0xb3/0x4d0 do_filp_open+0x42/0xa0 do_sys_open+0xfe/0x1e0 compat_sys_open+0x1b/0x20 cstar_dispatch+0x7/0x1f Non-debug kernels will not crash immediately because referencing the dangling mpol will not cause a fault. Instead the filesystem will reference a freed mempolicy object, which will cause unpredictable behavior. The problem boils down to a dropped mpol reference below if shmem_parse_options() does not allocate a new mpol: config = *sbinfo shmem_parse_options(data, &config, true) mpol_put(sbinfo->mpol) sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */ This patch avoids the crash by not releasing the mempolicy if shmem_parse_options() doesn't create a new mpol. How far back does this issue go? I see it in both 2.6.36 and 3.3. I did not look back further. Signed-off-by: Greg Thelen <[email protected]> Acked-by: Hugh Dickins <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void sqlite3VdbeResolveLabel(Vdbe *v, int x){ Parse *p = v->pParse; int j = ADDR(x); assert( v->magic==VDBE_MAGIC_INIT ); assert( j<-p->nLabel ); assert( j>=0 ); #ifdef SQLITE_DEBUG if( p->db->flags & SQLITE_VdbeAddopTrace ){ printf("RESOLVE LABEL %d to %d\n", x, v->nOp); } #endif if( p->nLabelAlloc + p->nLabel < 0 ){ resizeResolveLabel(p,v,j); }else{ assert( p->aLabel[j]==(-1) ); /* Labels may only be resolved once */ p->aLabel[j] = v->nOp; } }
0
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
243,609,484,412,957,360,000,000,000,000,000,000,000
18
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
static void SerializeGltfTexture(Texture &texture, json &o) { if (texture.sampler > -1) { SerializeNumberProperty("sampler", texture.sampler, o); } if (texture.source > -1) { SerializeNumberProperty("source", texture.source, o); } if (texture.name.size()) { SerializeStringProperty("name", texture.name, o); } if (texture.extras.Type() != NULL_TYPE) { SerializeValue("extras", texture.extras, o); } SerializeExtensionMap(texture.extensions, o); }
0
[ "CWE-20" ]
tinygltf
52ff00a38447f06a17eab1caa2cf0730a119c751
26,655,294,741,693,000,000,000,000,000,000,000,000
15
Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`).
UrnState::start(HttpRequest * r, StoreEntry * e) { debugs(52, 3, "urnStart: '" << e->url() << "'" ); entry = e; request = r; entry->lock("UrnState::start"); setUriResFromRequest(r); if (urlres_r == NULL) return; auto urlEntry = storeGetPublic(urlres, Http::METHOD_GET); if (!urlEntry || (urlEntry->hittingRequiresCollapsing() && !startCollapsingOn(*urlEntry, false))) { urlres_e = storeCreateEntry(urlres, urlres, RequestFlags(), Http::METHOD_GET); sc = storeClientListAdd(urlres_e, this); FwdState::Start(Comm::ConnectionPointer(), urlres_e, urlres_r.getRaw(), ale); if (urlEntry) { urlEntry->abandon(__FUNCTION__); urlEntry = nullptr; } } else { urlres_e = urlEntry; urlres_e->lock("UrnState::start"); sc = storeClientListAdd(urlres_e, this); } reqofs = 0; StoreIOBuffer tempBuffer; tempBuffer.offset = reqofs; tempBuffer.length = URN_REQBUF_SZ; tempBuffer.data = reqbuf; storeClientCopy(sc, urlres_e, tempBuffer, urnHandleReply, this); }
0
[ "CWE-401" ]
squid
47a085ff06598b64817875769022b8707a0af7db
178,930,101,574,740,580,000,000,000,000,000,000,000
38
Bug 5104: Memory leak in RFC 2169 response parsing (#778) A temporary parsing buffer was not being released when parsing completed.
void mutt_encode_path (char *dest, size_t dlen, const char *src) { char *p = safe_strdup (src); int rc = mutt_convert_string (&p, Charset, "utf-8", 0); strfcpy (dest, rc == 0 ? p : src, dlen); FREE (&p); }
0
[ "CWE-668" ]
mutt
6d0624411a979e2e1d76af4dd97d03f47679ea4a
222,864,818,313,716,670,000,000,000,000,000,000,000
7
use a 64-bit random value in temporary filenames. closes #3158
static Agraph_t *pop_subg(void) { Agraph_t *g; if (GSP == 0) { fprintf(stderr, "graphml2gv: Gstack underflow in graph parser\n"); exit(1); } g = Gstack[--GSP]; if (GSP > 0) G = Gstack[GSP - 1]; return g; }
0
[ "CWE-476" ]
graphviz
839085f8026afd6f6920a0c31ad2a9d880d97932
30,693,300,559,249,098,000,000,000,000,000,000,000
12
attempted fix for null pointer deference on malformed input
static int aio_setup_ring(struct kioctx *ctx) { struct aio_ring *ring; struct aio_ring_info *info = &ctx->ring_info; unsigned nr_events = ctx->max_reqs; unsigned long size; int nr_pages; /* Compensate for the ring buffer's head/tail overlap entry */ nr_events += 2; /* 1 is required, 2 for good luck */ size = sizeof(struct aio_ring); size += sizeof(struct io_event) * nr_events; nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; if (nr_pages < 0) return -EINVAL; nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); info->nr = 0; info->ring_pages = info->internal_pages; if (nr_pages > AIO_RING_PAGES) { info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!info->ring_pages) return -ENOMEM; } info->mmap_size = nr_pages * PAGE_SIZE; dprintk("attempting mmap of %lu bytes\n", info->mmap_size); down_write(&ctx->mm->mmap_sem); info->mmap_base = do_mmap(NULL, 0, info->mmap_size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 0); if (IS_ERR((void *)info->mmap_base)) { up_write(&ctx->mm->mmap_sem); info->mmap_size = 0; aio_free_ring(ctx); return -EAGAIN; } dprintk("mmap address: 0x%08lx\n", info->mmap_base); info->nr_pages = get_user_pages(current, ctx->mm, info->mmap_base, nr_pages, 1, 0, info->ring_pages, NULL); up_write(&ctx->mm->mmap_sem); if (unlikely(info->nr_pages != nr_pages)) { aio_free_ring(ctx); return -EAGAIN; } ctx->user_id = info->mmap_base; info->nr = nr_events; /* trusted copy */ ring = kmap_atomic(info->ring_pages[0], KM_USER0); ring->nr = nr_events; /* user copy */ ring->id = ctx->user_id; ring->head = ring->tail = 0; ring->magic = AIO_RING_MAGIC; ring->compat_features = AIO_RING_COMPAT_FEATURES; ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->header_length = sizeof(struct aio_ring); kunmap_atomic(ring, KM_USER0); return 0; }
0
[ "CWE-190" ]
linux-2.6
75e1c70fc31490ef8a373ea2a4bea2524099b478
179,223,561,615,718,860,000,000,000,000,000,000,000
68
aio: check for multiplication overflow in do_io_submit Tavis Ormandy pointed out that do_io_submit does not do proper bounds checking on the passed-in iocb array:        if (unlikely(nr < 0))                return -EINVAL;        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(iocbpp)))))                return -EFAULT;                      ^^^^^^^^^^^^^^^^^^ The attached patch checks for overflow, and if it is detected, the number of iocbs submitted is scaled down to a number that will fit in the long.  This is an ok thing to do, as sys_io_submit is documented as returning the number of iocbs submitted, so callers should handle a return value of less than the 'nr' argument passed in. Reported-by: Tavis Ormandy <[email protected]> Signed-off-by: Jeff Moyer <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
StreamDecoder& ConnectionManagerImpl::newStream(StreamEncoder& response_encoder, bool is_internally_created) { if (connection_idle_timer_) { connection_idle_timer_->disableTimer(); } ENVOY_CONN_LOG(debug, "new stream", read_callbacks_->connection()); ActiveStreamPtr new_stream(new ActiveStream(*this)); new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); // If the network connection is backed up, the stream should be made aware of it on creation. // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacks_. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || new_stream->high_watermark_count_ > 0); new_stream->moveIntoList(std::move(new_stream), streams_); return **streams_.begin(); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
208,825,135,531,996,560,000,000,000,000,000,000,000
19
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
int ram_block_coordinated_discard_require(bool state) { int ret = 0; ram_block_discard_disable_mutex_lock(); if (!state) { ram_block_coordinated_discard_required_cnt--; } else if (ram_block_discard_disabled_cnt) { ret = -EBUSY; } else { ram_block_coordinated_discard_required_cnt++; } ram_block_discard_disable_mutex_unlock(); return ret; }
0
[ "CWE-908" ]
qemu
418ade7849ce7641c0f7333718caf5091a02fd4c
330,520,760,594,630,700,000,000,000,000,000,000,000
15
softmmu: Always initialize xlat in address_space_translate_for_iotlb The bug is an uninitialized memory read, along the translate_fail path, which results in garbage being read from iotlb_to_section, which can lead to a crash in io_readx/io_writex. The bug may be fixed by writing any value with zero in ~TARGET_PAGE_MASK, so that the call to iotlb_to_section using the xlat'ed address returns io_mem_unassigned, as desired by the translate_fail path. It is most useful to record the original physical page address, which will eventually be logged by memory_region_access_valid when the access is rejected by unassigned_mem_accepts. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1065 Signed-off-by: Richard Henderson <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Message-Id: <[email protected]>
int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg) { const struct ioctl_handler *handler; struct snd_seq_client *client; client = clientptr(clientid); if (client == NULL) return -ENXIO; for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) return handler->func(client, arg); } pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); return -ENOTTY; }
0
[ "CWE-416", "CWE-362" ]
linux
71105998845fb012937332fe2e806d443c09e026
288,179,016,469,097,540,000,000,000,000,000,000,000
18
ALSA: seq: Fix use-after-free at creating a port There is a potential race window opened at creating and deleting a port via ioctl, as spotted by fuzzing. snd_seq_create_port() creates a port object and returns its pointer, but it doesn't take the refcount, thus it can be deleted immediately by another thread. Meanwhile, snd_seq_ioctl_create_port() still calls the function snd_seq_system_client_ev_port_start() with the created port object that is being deleted, and this triggers use-after-free like: BUG: KASAN: use-after-free in snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] at addr ffff8801f2241cb1 ============================================================================= BUG kmalloc-512 (Tainted: G B ): kasan: bad access detected ----------------------------------------------------------------------------- INFO: Allocated in snd_seq_create_port+0x94/0x9b0 [snd_seq] age=1 cpu=3 pid=4511 ___slab_alloc+0x425/0x460 __slab_alloc+0x20/0x40 kmem_cache_alloc_trace+0x150/0x190 snd_seq_create_port+0x94/0x9b0 [snd_seq] snd_seq_ioctl_create_port+0xd1/0x630 [snd_seq] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] snd_seq_ioctl+0x40/0x80 [snd_seq] do_vfs_ioctl+0x54b/0xda0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x16/0x75 INFO: Freed in port_delete+0x136/0x1a0 [snd_seq] age=1 cpu=2 pid=4717 __slab_free+0x204/0x310 kfree+0x15f/0x180 port_delete+0x136/0x1a0 [snd_seq] snd_seq_delete_port+0x235/0x350 [snd_seq] snd_seq_ioctl_delete_port+0xc8/0x180 [snd_seq] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] snd_seq_ioctl+0x40/0x80 [snd_seq] do_vfs_ioctl+0x54b/0xda0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x16/0x75 Call Trace: [<ffffffff81b03781>] dump_stack+0x63/0x82 [<ffffffff81531b3b>] print_trailer+0xfb/0x160 [<ffffffff81536db4>] object_err+0x34/0x40 [<ffffffff815392d3>] kasan_report.part.2+0x223/0x520 [<ffffffffa07aadf4>] ? snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] [<ffffffff815395fe>] __asan_report_load1_noabort+0x2e/0x30 [<ffffffffa07aadf4>] snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] [<ffffffffa07aa8f0>] ? snd_seq_ioctl_delete_port+0x180/0x180 [snd_seq] [<ffffffff8136be50>] ? taskstats_exit+0xbc0/0xbc0 [<ffffffffa07abc5c>] snd_seq_do_ioctl+0x11c/0x190 [snd_seq] [<ffffffffa07abd10>] snd_seq_ioctl+0x40/0x80 [snd_seq] [<ffffffff8136d433>] ? acct_account_cputime+0x63/0x80 [<ffffffff815b515b>] do_vfs_ioctl+0x54b/0xda0 ..... We may fix this in a few different ways, and in this patch, it's fixed simply by taking the refcount properly at snd_seq_create_port() and letting the caller unref the object after use. Also, there is another potential use-after-free by sprintf() call in snd_seq_create_port(), and this is moved inside the lock. This fix covers CVE-2017-15265. Reported-and-tested-by: Michael23 Yu <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
void MD5_Final(unsigned char* hash, MD5_CTX* md5) { reinterpret_cast<TaoCrypt::MD5*>(md5->buffer)->Final(hash); }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
60,619,726,522,717,080,000,000,000,000,000,000,000
4
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
bluetooth_client_get_device (BluetoothClient *client, const char *path) { BluetoothClientPrivate *priv = BLUETOOTH_CLIENT_GET_PRIVATE(client); GtkTreeIter iter; GDBusProxy *proxy; if (get_iter_from_path (priv->store, &iter, path) == FALSE) { return NULL; } gtk_tree_model_get (GTK_TREE_MODEL(priv->store), &iter, BLUETOOTH_COLUMN_PROXY, &proxy, -1); return proxy; }
0
[]
gnome-bluetooth
6b5086d42ea64d46277f3c93b43984f331d12f89
83,802,336,452,483,410,000,000,000,000,000,000,000
16
lib: Fix Discoverable being reset when turned off Work-around race in bluetoothd which would reset the discoverable flag if a timeout change was requested before discoverable finished being set to off: See https://bugzilla.redhat.com/show_bug.cgi?id=1602985
int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) || ((IPV6_FLOWINFO_MASK & *(__be32 *)skb_network_header(skb)) && np->rxopt.bits.rxflow) || (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) || ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) return 1; } return 0; }
0
[]
linux-2.6
2e761e0532a784816e7e822dbaaece8c5d4be14d
52,738,426,935,171,680,000,000,000,000,000,000,000
19
ipv6 netns: init net is used to set bindv6only for new sock The bindv6only is tuned via sysctl. It is already on a struct net and per-net sysctls allow for its modification (ipv6_sysctl_net_init). Despite this the value configured in the init net is used for the rest of them. Signed-off-by: Pavel Emelyanov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
cnt_transmit(struct worker *wrk, struct req *req) { struct boc *boc; uint16_t status; int sendbody, head; intmax_t clval; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(req->transport, TRANSPORT_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); AZ(req->stale_oc); AZ(req->res_mode); /* Grab a ref to the bo if there is one (=streaming) */ boc = HSH_RefBoc(req->objcore); if (boc && boc->state < BOS_STREAM) ObjWaitState(req->objcore, BOS_STREAM); clval = http_GetContentLength(req->resp); /* RFC 7230, 3.3.3 */ status = http_GetStatus(req->resp); head = !strcmp(req->http0->hd[HTTP_HDR_METHOD].b, "HEAD"); if (boc != NULL) req->resp_len = clval; else req->resp_len = ObjGetLen(req->wrk, req->objcore); if (head || status < 200 || status == 204 || status == 304) { // rfc7230,l,1748,1752 sendbody = 0; } else { sendbody = 1; } if (req->filter_list == NULL) req->filter_list = resp_Get_Filter_List(req); if (req->filter_list == NULL || VCL_StackVDP(req, req->vcl, req->filter_list)) { VSLb(req->vsl, SLT_Error, "Failure to push processors"); req->doclose = SC_OVERLOAD; } else { if (cache_param->http_range_support && status == 200) http_ForceHeader(req->resp, H_Accept_Ranges, "bytes"); if (status < 200 || status == 204) { // rfc7230,l,1691,1695 http_Unset(req->resp, H_Content_Length); } else if (status == 304) { // rfc7230,l,1675,1677 http_Unset(req->resp, H_Content_Length); } else if (clval >= 0 && clval == req->resp_len) { /* Reuse C-L header */ } else if (head && req->objcore->flags & OC_F_HFM) { /* * Don't touch C-L header (debatable) * * The only way to do it correctly would be to GET * to the backend, and discard the body once the * filters have had a chance to chew on it, but that * would negate the "pass for huge objects" use case. */ } else { http_Unset(req->resp, H_Content_Length); if (req->resp_len >= 0) http_PrintfHeader(req->resp, "Content-Length: %jd", req->resp_len); } if (req->resp_len == 0) sendbody = 0; } req->transport->deliver(req, boc, sendbody); VSLb_ts_req(req, "Resp", W_TIM_real(wrk)); HSH_Cancel(wrk, req->objcore, boc); if (boc != NULL) HSH_DerefBoc(wrk, req->objcore); (void)HSH_DerefObjCore(wrk, &req->objcore, HSH_RUSH_POLICY); http_Teardown(req->resp); req->filter_list = NULL; req->res_mode = 0; return (REQ_FSM_DONE); }
0
[ "CWE-212" ]
varnish-cache
bd7b3d6d47ccbb5e1747126f8e2a297f38e56b8c
233,120,562,688,409,100,000,000,000,000,000,000,000
87
Clear err_code and err_reason at start of request handling req->err_code and req->err_reason are set when going to synthetic handling. From there the resp.reason HTTP field is set from req->err_reason if set, or the generic code based on req->err_code is used if it was NULL. This patch clears these members so that a value from the handling of a previous request doesn't linger. Fixes: VSV00004
int MethodHandles::find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, int mflags, KlassHandle caller, int skip, objArrayHandle results) { // %%% take caller into account! Thread* thread = Thread::current(); if (k.is_null() || !k->oop_is_instance()) return -1; int rfill = 0, rlimit = results->length(), rskip = skip; // overflow measurement: int overflow = 0, overflow_limit = MAX2(1000, rlimit); int match_flags = mflags; bool search_superc = ((match_flags & SEARCH_SUPERCLASSES) != 0); bool search_intfc = ((match_flags & SEARCH_INTERFACES) != 0); bool local_only = !(search_superc | search_intfc); bool classes_only = false; if (name != NULL) { if (name->utf8_length() == 0) return 0; // a match is not possible } if (sig != NULL) { if (sig->utf8_length() == 0) return 0; // a match is not possible if (sig->byte_at(0) == '(') match_flags &= ~(IS_FIELD | IS_TYPE); else match_flags &= ~(IS_CONSTRUCTOR | IS_METHOD); } if ((match_flags & IS_TYPE) != 0) { // NYI, and Core Reflection works quite well for this query } if ((match_flags & IS_FIELD) != 0) { for (FieldStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { if (name != NULL && st.name() != name) continue; if (sig != NULL && st.signature() != sig) continue; // passed the filters if (rskip > 0) { --rskip; } else if (rfill < rlimit) { Handle result(thread, results->obj_at(rfill++)); if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor()); if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow } } } if ((match_flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { // watch out for these guys: Symbol* init_name = vmSymbols::object_initializer_name(); Symbol* clinit_name = vmSymbols::class_initializer_name(); if (name == clinit_name) clinit_name = NULL; // hack for exposing <clinit> bool negate_name_test = false; // fix name so that it captures the intention of IS_CONSTRUCTOR if (!(match_flags & IS_METHOD)) { // constructors only if (name == NULL) { name = init_name; } else if (name != init_name) { return 0; // no constructors of this method name } } else if (!(match_flags & IS_CONSTRUCTOR)) { // methods only if (name == NULL) { name = init_name; negate_name_test = true; // if we see the name, we *omit* the entry } else if (name == init_name) { return 0; // no methods of this constructor name } } else { // caller will accept either sort; no need to adjust name } for (MethodStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { Method* m = st.method(); Symbol* m_name = m->name(); if (m_name == clinit_name) continue; if (name != NULL && ((m_name != name) ^ negate_name_test)) continue; if (sig != NULL && m->signature() != sig) continue; // passed the filters if (rskip > 0) { --rskip; } else if (rfill < rlimit) { Handle result(thread, results->obj_at(rfill++)); if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! CallInfo info(m); // Since this is going through the methods to create MemberNames, don't search // for matching methods already in the table oop saved = MethodHandles::init_method_MemberName(result, info, /*intern*/false); if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow } } } // return number of elements we at leasted wanted to initialize return rfill + overflow; }
0
[]
jdk8u
f14e35d20e1a4d0f507f05838844152f2242c6d3
161,945,484,591,221,170,000,000,000,000,000,000,000
113
8281866: Enhance MethodHandle invocations Reviewed-by: andrew Backport-of: d974d9da365f787f67971d88c79371c8b0769f75
void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr) { struct list_head *pos; struct list_head *temp; struct sctp_transport *transport; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { /* Do book keeping for removing the peer and free it. */ sctp_assoc_rm_peer(asoc, transport); break; } } }
0
[ "CWE-287" ]
linux-2.6
add52379dde2e5300e2d574b172e62c6cf43b3d3
272,704,959,219,008,340,000,000,000,000,000,000,000
16
sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH If INIT-ACK is received with SupportedExtensions parameter which indicates that the peer does not support AUTH, the packet will be silently ignore, and sctp_process_init() do cleanup all of the transports in the association. When T1-Init timer is expires, OOPS happen while we try to choose a different init transport. The solution is to only clean up the non-active transports, i.e the ones that the peer added. However, that introduces a problem with sctp_connectx(), because we don't mark the proper state for the transports provided by the user. So, we'll simply mark user-provided transports as ACTIVE. That will allow INIT retransmissions to work properly in the sctp_connectx() context and prevent the crash. Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); return err ? NET_RX_DROP : NET_RX_SUCCESS; }
0
[ "CWE-20" ]
net
bceaa90240b6019ed73b49965eac7d167610be69
74,519,092,560,859,760,000,000,000,000,000,000,000
8
inet: prevent leakage of uninitialized memory to user in recv syscalls Only update *addr_len when we actually fill in sockaddr, otherwise we can return uninitialized memory from the stack to the caller in the recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL) checks because we only get called with a valid addr_len pointer either from sock_common_recvmsg or inet_recvmsg. If a blocking read waits on a socket which is concurrently shut down we now return zero and set msg_msgnamelen to 0. Reported-by: mpb <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline void free_chunk(struct audit_chunk *chunk) { call_rcu(&chunk->head, __free_chunk); }
1
[ "CWE-362" ]
linux-2.6
8f7b0ba1c853919b85b54774775f567f30006107
232,660,666,733,245,280,000,000,000,000,000,000,000
4
Fix inotify watch removal/umount races Inotify watch removals suck violently. To kick the watch out we need (in this order) inode->inotify_mutex and ih->mutex. That's fine if we have a hold on inode; however, for all other cases we need to make damn sure we don't race with umount. We can *NOT* just grab a reference to a watch - inotify_unmount_inodes() will happily sail past it and we'll end with reference to inode potentially outliving its superblock. Ideally we just want to grab an active reference to superblock if we can; that will make sure we won't go into inotify_umount_inodes() until we are done. Cleanup is just deactivate_super(). However, that leaves a messy case - what if we *are* racing with umount() and active references to superblock can't be acquired anymore? We can bump ->s_count, grab ->s_umount, which will almost certainly wait until the superblock is shut down and the watch in question is pining for fjords. That's fine, but there is a problem - we might have hit the window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock is past the point of no return and is heading for shutdown) and the moment when deactivate_super() acquires ->s_umount. We could just do drop_super() yield() and retry, but that's rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having found that we'd got there first (i.e. that ->s_root is non-NULL) we know that we won't race with inotify_umount_inodes(). So we could grab a reference to watch and do the rest as above, just with drop_super() instead of deactivate_super(), right? Wrong. We had to drop ih->mutex before we could grab ->s_umount. So the watch could've been gone already. That still can be dealt with - we need to save watch->wd, do idr_find() and compare its result with our pointer. If they match, we either have the damn thing still alive or we'd lost not one but two races at once, the watch had been killed and a new one got created with the same ->wd at the same address. That couldn't have happened in inotify_destroy(), but inotify_rm_wd() could run into that. Still, "new one got created" is not a problem - we have every right to kill it or leave it alone, whatever's more convenient. So we can use idr_find(...) == watch && watch->inode->i_sb == sb as "grab it and kill it" check. If it's been our original watch, we are fine, if it's a newcomer - nevermind, just pretend that we'd won the race and kill the fscker anyway; we are safe since we know that its superblock won't be going away. And yes, this is far beyond mere "not very pretty"; so's the entire concept of inotify to start with. Signed-off-by: Al Viro <[email protected]> Acked-by: Greg KH <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); auto session_state = ctx->session_state(); OP_REQUIRES(ctx, session_state != nullptr, errors::FailedPrecondition( "GetSessionHandle called on null session state")); int64_t id = session_state->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } }
0
[ "CWE-20", "CWE-703" ]
tensorflow
cff267650c6a1b266e4b4500f69fbc49cdd773c5
323,510,880,336,430,400,000,000,000,000,000,000,000
24
Fix tf.raw_ops.DeleteSessionTensor vulnerability with invalid `handle`. Check that `handle` input is actually a scalar before treating it as such. PiperOrigin-RevId: 445228994
prime_trust_anchor(struct module_qstate* qstate, struct val_qstate* vq, int id, struct trust_anchor* toprime) { struct module_qstate* newq = NULL; int ret = generate_request(qstate, id, toprime->name, toprime->namelen, LDNS_RR_TYPE_DNSKEY, toprime->dclass, BIT_CD, &newq, 0); if(newq && qstate->env->cfg->trust_anchor_signaling && !generate_keytag_query(qstate, id, toprime)) { verbose(VERB_ALGO, "keytag signaling query failed"); return 0; } if(!ret) { verbose(VERB_ALGO, "Could not prime trust anchor"); return 0; } /* ignore newq; validator does not need state created for that * query, and its a 'normal' for iterator as well */ vq->wait_prime_ta = 1; /* to elicit PRIME_RESP_STATE processing from the validator inform_super() routine */ /* store trust anchor name for later lookup when prime returns */ vq->trust_anchor_name = regional_alloc_init(qstate->region, toprime->name, toprime->namelen); vq->trust_anchor_len = toprime->namelen; vq->trust_anchor_labs = toprime->namelabs; if(!vq->trust_anchor_name) { log_err("Could not prime trust anchor: out of memory"); return 0; } return 1; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
131,785,875,584,389,000,000,000,000,000,000,000,000
32
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { const std::string& config_path = options.configPath(); const std::string& config_yaml = options.configYaml(); const envoy::config::bootstrap::v3::Bootstrap& config_proto = options.configProto(); // Exactly one of config_path and config_yaml should be specified. if (config_path.empty() && config_yaml.empty() && config_proto.ByteSize() == 0) { throw EnvoyException("At least one of --config-path or --config-yaml or Options::configProto() " "should be non-empty"); } if (!config_path.empty()) { loadBootsrap( options.bootstrapVersion(), bootstrap, [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) { MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting); }); } if (!config_yaml.empty()) { envoy::config::bootstrap::v3::Bootstrap bootstrap_override; loadBootsrap(options.bootstrapVersion(), bootstrap_override, [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); }); // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test. bootstrap.MergeFrom(bootstrap_override); } if (config_proto.ByteSize() != 0) { bootstrap.MergeFrom(config_proto); } MessageUtil::validate(bootstrap, validation_visitor); }
0
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
56,297,176,491,095,890,000,000,000,000,000,000,000
35
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <[email protected]>
Item_func_or_sum(THD *thd, Item_func_or_sum *item): Item_result_field(thd, item), Item_args(thd, item), Used_tables_and_const_cache(item) { }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
151,721,658,218,341,220,000,000,000,000,000,000,000
3
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
ZEND_API int zend_set_local_var_str(const char *name, size_t len, zval *value, int force) /* {{{ */ { zend_execute_data *execute_data = EG(current_execute_data); while (execute_data && (!execute_data->func || !ZEND_USER_CODE(execute_data->func->common.type))) { execute_data = execute_data->prev_execute_data; } if (execute_data) { if (!execute_data->symbol_table) { zend_ulong h = zend_hash_func(name, len); zend_op_array *op_array = &execute_data->func->op_array; if (EXPECTED(op_array->last_var)) { zend_string **str = op_array->vars; zend_string **end = str + op_array->last_var; do { if (ZSTR_H(*str) == h && ZSTR_LEN(*str) == len && memcmp(ZSTR_VAL(*str), name, len) == 0) { zval *var = EX_VAR_NUM(str - op_array->vars); zval_ptr_dtor(var); ZVAL_COPY_VALUE(var, value); return SUCCESS; } str++; } while (str != end); } if (force) { zend_array *symbol_table = zend_rebuild_symbol_table(); if (symbol_table) { return zend_hash_str_update(symbol_table, name, len, value) ? SUCCESS : FAILURE;; } } } else { return (zend_hash_str_update_ind(execute_data->symbol_table, name, len, value) != NULL) ? SUCCESS : FAILURE; } } return FAILURE; }
0
[ "CWE-134" ]
php-src
b101a6bbd4f2181c360bd38e7683df4a03cba83e
133,562,586,571,695,900,000,000,000,000,000,000,000
40
Use format string
static void lp_attach (struct parport *port) { unsigned int i; switch (parport_nr[0]) { case LP_PARPORT_UNSPEC: case LP_PARPORT_AUTO: if (parport_nr[0] == LP_PARPORT_AUTO && port->probe_info[0].class != PARPORT_CLASS_PRINTER) return; if (lp_count == LP_NO) { printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO); return; } if (!lp_register(lp_count, port)) lp_count++; break; default: for (i = 0; i < LP_NO; i++) { if (port->number == parport_nr[i]) { if (!lp_register(i, port)) lp_count++; break; } } break; } }
0
[ "CWE-787" ]
linux
3e21f4af170bebf47c187c1ff8bf155583c9f3b1
116,872,138,336,984,320,000,000,000,000,000,000,000
29
char: lp: fix possible integer overflow in lp_setup() The lp_setup() code doesn't apply any bounds checking when passing "lp=none", and only in this case, resulting in an overflow of the parport_nr[] array. All versions in Git history are affected. Reported-By: Roee Hay <[email protected]> Cc: Ben Hutchings <[email protected]> Cc: [email protected] Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
TEST_F(QueryPlannerTest, OrOfAnd4) { addIndex(BSON("a" << 1 << "b" << 1)); runQuery( fromjson("{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, " "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}")); assertNumSolutions(2U); assertSolutionExists("{cscan: {dir: 1}}"); assertSolutionExists( "{or: {nodes: [" "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, " "bounds: {a: [[1,5,false,false]], b: [[0,3,false,false]]}}}}}, " "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: " "{ixscan: {pattern: {a:1,b:1}, " " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}"); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
314,269,255,720,389,830,000,000,000,000,000,000,000
16
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
int setup_tests(void) { #ifdef OPENSSL_NO_SM2 TEST_note("SM2 is disabled."); #else ADD_TEST(sm2_crypt_test); ADD_TEST(sm2_sig_test); #endif return 1; }
0
[ "CWE-120" ]
openssl
59f5e75f3bced8fc0e130d72a3f582cf7b480b46
57,117,527,097,957,010,000,000,000,000,000,000,000
10
Correctly calculate the length of SM2 plaintext given the ciphertext Previously the length of the SM2 plaintext could be incorrectly calculated. The plaintext length was calculated by taking the ciphertext length and taking off an "overhead" value. The overhead value was assumed to have a "fixed" element of 10 bytes. This is incorrect since in some circumstances it can be more than 10 bytes. Additionally the overhead included the length of two integers C1x and C1y, which were assumed to be the same length as the field size (32 bytes for the SM2 curve). However in some cases these integers can have an additional padding byte when the msb is set, to disambiguate them from negative integers. Additionally the integers can also be less than 32 bytes in length in some cases. If the calculated overhead is incorrect and larger than the actual value this can result in the calculated plaintext length being too small. Applications are likely to allocate buffer sizes based on this and therefore a buffer overrun can occur. CVE-2021-3711 Issue reported by John Ouyang. Reviewed-by: Paul Dale <[email protected]> Reviewed-by: Nicola Tuveri <[email protected]>
big5_mbc_to_code(const UChar* p, const UChar* end) { return onigenc_mbn_mbc_to_code(ONIG_ENCODING_BIG5, p, end); }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
257,643,886,653,998,460,000,000,000,000,000,000,000
4
onig-5.9.2
static int ipx_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct ipx_address *addr; struct sockaddr_ipx sipx; struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); int rc; *uaddr_len = sizeof(struct sockaddr_ipx); lock_sock(sk); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; addr = &ipxs->dest_addr; sipx.sipx_network = addr->net; sipx.sipx_port = addr->sock; memcpy(sipx.sipx_node, addr->node, IPX_NODE_LEN); } else { if (ipxs->intrfc) { sipx.sipx_network = ipxs->intrfc->if_netnum; #ifdef CONFIG_IPX_INTERN memcpy(sipx.sipx_node, ipxs->node, IPX_NODE_LEN); #else memcpy(sipx.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ } else { sipx.sipx_network = 0; memset(sipx.sipx_node, '\0', IPX_NODE_LEN); } sipx.sipx_port = ipxs->port; } sipx.sipx_family = AF_IPX; sipx.sipx_type = ipxs->type; sipx.sipx_zero = 0; memcpy(uaddr, &sipx, sizeof(sipx)); rc = 0; out: release_sock(sk); return rc; }
0
[ "CWE-416" ]
linux
ee0d8d8482345ff97a75a7d747efc309f13b0d80
27,791,678,458,256,914,000,000,000,000,000,000,000
49
ipx: call ipxitf_put() in ioctl error path We should call ipxitf_put() if the copy_to_user() fails. Reported-by: 李强 <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: David S. Miller <[email protected]>
CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { if (index < 0) { return NULL; } return get_array_item(array, (size_t)index); }
0
[ "CWE-754", "CWE-787" ]
cJSON
be749d7efa7c9021da746e685bd6dec79f9dd99b
4,046,838,043,036,896,400,000,000,000,000,000,000
9
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str, uint *prefix_length) { /* TODO: This code assumes that there are no multi-bytes characters that can be considered white-space. */ *prefix_length= 0; while ((str->length > 0) && (my_isspace(cs, str->str[0]))) { (*prefix_length)++; str->length --; str->str ++; } /* FIXME: Also, parsing backward is not safe with multi bytes characters */ while ((str->length > 0) && (my_isspace(cs, str->str[str->length-1]))) { str->length --; } }
0
[ "CWE-476" ]
server
3a52569499e2f0c4d1f25db1e81617a9d9755400
335,576,851,634,453,860,000,000,000,000,000,000,000
25
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294 The asserion failure was caused by this query select /*id=1*/ from t1 where col= ( select /*id=2*/ from ... where corr_cond1 union select /*id=4*/ from ... where corr_cond2) Here, - select with id=2 was correlated due to corr_cond1. - select with id=4 was initially correlated due to corr_cond2, but then the optimizer optimized away the correlation, making the select with id=4 uncorrelated. However, since select with id=2 remained correlated, the execution had to re-compute the whole UNION. When it tried to execute select with id=4, it hit an assertion (join buffer already free'd). This is because select with id=4 has freed its execution structures after it has been executed once. The select is uncorrelated, so it did not expect it would need to be executed for the second time. Fixed this by adding this logic in st_select_lex::optimize_unflattened_subqueries(): If a member of a UNION is correlated, mark all its members as correlated, so that they are prepared to be executed multiple times.
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); }
0
[ "CWE-200" ]
kvm
831d9d02f9522e739825a51a11e3bc5aa531a905
73,833,411,671,435,550,000,000,000,000,000,000,000
6
KVM: x86: fix information leak to userland Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and kvm_clock_data are copied to userland with some padding and reserved fields unitialized. It leads to leaking of contents of kernel stack memory. We have to initialize them to zero. In patch v1 Jan Kiszka suggested to fill reserved fields with zeros instead of memset'ting the whole struct. It makes sense as these fields are explicitly marked as padding. No more fields need zeroing. KVM-Stable-Tag. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
voting_schedule_free(voting_schedule_t *voting_schedule_to_free) { if (!voting_schedule_to_free) return; tor_free(voting_schedule_to_free); }
0
[]
tor
a0ef3cf0880e3cd343977b3fcbd0a2e7572f0cb4
116,112,019,823,432,270,000,000,000,000,000,000,000
6
Prevent int underflow in dirvote.c compare_vote_rs_. This should be "impossible" without making a SHA1 collision, but let's not keep the assumption that SHA1 collisions are super-hard. This prevents another case related to 21278. There should be no behavioral change unless -ftrapv is on.
DEFUN (show_ip_bgp_vpnv4_all_neighbor_advertised_routes, show_ip_bgp_vpnv4_all_neighbor_advertised_routes_cmd, "show ip bgp vpnv4 all neighbors A.B.C.D advertised-routes", SHOW_STR IP_STR BGP_STR "Display VPNv4 NLRI specific information\n" "Display information about all VPNv4 NLRIs\n" "Detailed information on TCP and BGP neighbor connections\n" "Neighbor to display information about\n" "Display the routes advertised to a BGP neighbor\n") { int ret; struct peer *peer; union sockunion su; ret = str2sockunion (argv[0], &su); if (ret < 0) { vty_out (vty, "%% Malformed address: %s%s", argv[0], VTY_NEWLINE); return CMD_WARNING; } peer = peer_lookup (NULL, &su); if (! peer || ! peer->afc[AFI_IP][SAFI_MPLS_VPN]) { vty_out (vty, "%% No such neighbor or address family%s", VTY_NEWLINE); return CMD_WARNING; } return show_adj_route_vpn (vty, peer, NULL); }
0
[ "CWE-119" ]
quagga
a3bc7e9400b214a0f078fdb19596ba54214a1442
263,404,756,089,319,540,000,000,000,000,000,000,000
31
bgpd: Fix VU#270232, VPNv4 NLRI parser memcpys to stack on unchecked length Address CERT vulnerability report VU#270232, memcpy to stack data structure based on length field from packet data whose length field upper-bound was not properly checked. This likely allows BGP peers that are enabled to send Labeled-VPN SAFI routes to Quagga bgpd to remotely exploit Quagga bgpd. Mitigation: Do not enable Labeled-VPN SAFI with untrusted neighbours. Impact: Labeled-VPN SAFI is not enabled by default. * bgp_mplsvpn.c: (bgp_nlri_parse_vpnv4) The prefixlen is checked for lower-bound, but not for upper-bound against received data length. The packet data is then memcpy'd to the stack based on the prefixlen. Extend the prefixlen check to ensure it is within the bound of the NLRI packet data AND the on-stack prefix structure AND the maximum size for the address family. Reported-by: Kostya Kortchinsky <[email protected]> This commit a joint effort between: Lou Berger <[email protected]> Donald Sharp <[email protected]> Paul Jakma <[email protected]> / <[email protected]>
static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) { u64 fact = scale_load_down(weight); int shift = WMULT_SHIFT; __update_inv_weight(lw); if (unlikely(fact >> 32)) { while (fact >> 32) { fact >>= 1; shift--; } } /* hint to use a 32x32->64 mul */ fact = (u64)(u32)fact * lw->inv_weight; while (fact >> 32) { fact >>= 1; shift--; } return mul_u64_u32_shr(delta_exec, fact, shift); }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
16,709,096,062,591,054,000,000,000,000,000,000,000
24
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
void Compute(OpKernelContext* context) override { const Tensor& prefix = context->input(0); const Tensor& tensor_names = context->input(1); const Tensor& shape_and_slices = context->input(2); OP_REQUIRES(context, tensor_names.NumElements() == dtypes_.size(), errors::InvalidArgument("Got ", tensor_names.NumElements(), " tensor names, but ", dtypes_.size(), " expected dtypes.")); ValidateInputs(false /* not save op */, context, prefix, tensor_names, shape_and_slices); if (!context->status().ok()) return; const string& prefix_string = prefix.scalar<tstring>()(); // Intention: we plan to use the RestoreV2 op as a backward-compatible // reader as we upgrade to the V2 format. This allows transparent upgrade. // We here attempt to read a V1 checkpoint, if "prefix_string" does not // refer to a V2 checkpoint. Env* env = Env::Default(); std::vector<string> paths; if (!env->GetMatchingPaths(MetaFilename(prefix_string), &paths).ok() || paths.empty()) { // Cannot find V2's metadata file, so "prefix_string" does not point to a // V2 checkpoint. Invokes the V1 read path instead. for (size_t i = 0; i < tensor_names.NumElements(); ++i) { RestoreTensor(context, &checkpoint::OpenTableTensorSliceReader, /* preferred_shard */ -1, /* restore_slice */ true, /* restore_index */ i); if (!context->status().ok()) { return; } } return; } // If found, invokes the V2 reader. OP_REQUIRES_OK(context, RestoreTensorsV2(context, prefix, tensor_names, shape_and_slices, dtypes_)); }
0
[ "CWE-476", "CWE-369" ]
tensorflow
9728c60e136912a12d99ca56e106b7cce7af5986
206,720,671,079,984,230,000,000,000,000,000,000,000
38
Ensure validation sticks in `save_restore_v2_ops.cc` PiperOrigin-RevId: 387924206 Change-Id: I6156842eb3230076b5812c0815f3e66bd5241454
TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, true); performRequest( Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, {":authority", "vhost-headers.com"}, {"x-routeconfig-request", "downstream"}, {"x-vhost-request", "downstream"}, {"x-route-request", "downstream"}, {"x-unmodified", "request"}, }, Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-route-request", "route"}, {"x-vhost-request", "vhost"}, {"x-routeconfig-request", "routeconfig"}, {":path", "/vhost-and-route"}, {":method", "GET"}, }, Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-routeconfig-response", "upstream"}, {"x-vhost-response", "upstream"}, {"x-route-response", "upstream"}, {"x-unmodified", "response"}, }, Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-route-response", "route"}, {"x-vhost-response", "vhost"}, {"x-routeconfig-response", "routeconfig"}, {":status", "200"}, }); }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
247,116,861,271,550,700,000,000,000,000,000,000,000
40
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
static int btrfs_dentry_delete(const struct dentry *dentry) { struct btrfs_root *root; struct inode *inode = dentry->d_inode; if (!inode && !IS_ROOT(dentry)) inode = dentry->d_parent->d_inode; if (inode) { root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0) return 1; if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return 1; } return 0; }
0
[ "CWE-310" ]
linux-2.6
9c52057c698fb96f8f07e7a4bcf4801a092bda89
214,171,219,526,738,750,000,000,000,000,000,000,000
18
Btrfs: fix hash overflow handling The handling for directory crc hash overflows was fairly obscure, split_leaf returns EOVERFLOW when we try to extend the item and that is supposed to bubble up to userland. For a while it did so, but along the way we added better handling of errors and forced the FS readonly if we hit IO errors during the directory insertion. Along the way, we started testing only for EEXIST and the EOVERFLOW case was dropped. The end result is that we may force the FS readonly if we catch a directory hash bucket overflow. This fixes a few problem spots. First I add tests for EOVERFLOW in the places where we can safely just return the error up the chain. btrfs_rename is harder though, because it tries to insert the new directory item only after it has already unlinked anything the rename was going to overwrite. Rather than adding very complex logic, I added a helper to test for the hash overflow case early while it is still safe to bail out. Snapshot and subvolume creation had a similar problem, so they are using the new helper now too. Signed-off-by: Chris Mason <[email protected]> Reported-by: Pascal Junod <[email protected]>
int wm_SemUnlock(wm_Sem *s){ pthread_mutex_lock(&s->mutex); s->lockCount--; pthread_cond_signal(&s->cond); pthread_mutex_unlock(&s->mutex); return 0; }
0
[ "CWE-787" ]
wolfMQTT
84d4b53122e0fa0280c7872350b89d5777dabbb2
41,705,015,182,450,905,000,000,000,000,000,000,000
7
Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free
sunday_quick_search(regex_t* reg, const UChar* target, const UChar* target_end, const UChar* text, const UChar* text_end, const UChar* text_range) { const UChar *s, *t, *p, *end; const UChar *tail; int map_offset; end = text_range + (target_end - target); if (end > text_end) end = text_end; map_offset = reg->map_offset; tail = target_end - 1; s = text + (tail - target); while (s < end) { p = s; t = tail; while (*p == *t) { if (t == target) return (UChar* )p; p--; t--; } if (s + map_offset >= text_end) break; s += reg->map[*(s + map_offset)]; } return (UChar* )NULL; }
0
[ "CWE-125" ]
oniguruma
d3e402928b6eb3327f8f7d59a9edfa622fec557b
81,728,381,155,790,930,000,000,000,000,000,000,000
29
fix heap-buffer-overflow
add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; /* indicate that we don't need to relock the file */ oparms->reconnect = false; iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) - 4 + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_handle_reconnect_v2)); inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_handle_reconnect_v2)); *num_iovec = num + 1; return 0; }
0
[ "CWE-476" ]
linux
cabfb3680f78981d26c078a26e5c748531257ebb
190,908,990,991,703,900,000,000,000,000,000,000,000
24
CIFS: Enable encryption during session setup phase In order to allow encryption on SMB connection we need to exchange a session key and generate encryption and decryption keys. Signed-off-by: Pavel Shilovsky <[email protected]>
rdp_out_newpointer_caps(STREAM s) { out_uint16_le(s, RDP_CAPSET_POINTER); out_uint16_le(s, RDP_CAPLEN_NEWPOINTER); out_uint16_le(s, 1); /* Colour pointer */ out_uint16_le(s, 20); /* Cache size */ out_uint16_le(s, 20); /* Cache size for new pointers */ }
0
[ "CWE-787" ]
rdesktop
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
337,503,536,862,201,950,000,000,000,000,000,000,000
9
Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182
ambsdtar_backup( application_argument_t *argument) { int dumpin; char *cmd = NULL; char *qdisk; char *timestamps; int mesgf = 3; int indexf = 4; int outf; int data_out; int index_out; int index_err; char *errmsg = NULL; amwait_t wait_status; GPtrArray *argv_ptr; pid_t tarpid; pid_t indexpid = 0; time_t tt; char *file_exclude; char *file_include; char new_timestamps[64]; char *option; mesgstream = fdopen(mesgf, "w"); if (!mesgstream) { error(_("error mesgstream(%d): %s\n"), mesgf, strerror(errno)); } if (!bsdtar_path) { error(_("BSDTAR-PATH not defined")); } if (!check_exec_for_suid(bsdtar_path, FALSE)) { error("'%s' binary is not secure", bsdtar_path); } if ((option = validate_command_options(argument))) { fprintf(stdout, "? Invalid '%s' COMMAND-OPTIONS\n", option); error("Invalid '%s' COMMAND-OPTIONS", option); } if (!state_dir) { error(_("STATE-DIR not defined")); } if (!argument->level) { fprintf(mesgstream, "? No level argument\n"); error(_("No level argument")); } if (!argument->dle.disk) { fprintf(mesgstream, "? No disk argument\n"); error(_("No disk argument")); } if (!argument->dle.device) { fprintf(mesgstream, "? No device argument\n"); error(_("No device argument")); } qdisk = quote_string(argument->dle.disk); tt = time(NULL); ctime_r(&tt, new_timestamps); new_timestamps[strlen(new_timestamps)-1] = '\0'; timestamps = ambsdtar_get_timestamps(argument, GPOINTER_TO_INT(argument->level->data), mesgstream, CMD_BACKUP); cmd = g_strdup(bsdtar_path); argv_ptr = ambsdtar_build_argv(argument, timestamps, &file_exclude, &file_include, CMD_BACKUP); if (argument->dle.create_index) { tarpid = pipespawnv(cmd, STDIN_PIPE|STDOUT_PIPE|STDERR_PIPE, 1, &dumpin, &data_out, &outf, (char **)argv_ptr->pdata); g_ptr_array_free_full(argv_ptr); argv_ptr = g_ptr_array_new(); g_ptr_array_add(argv_ptr, g_strdup(bsdtar_path)); g_ptr_array_add(argv_ptr, g_strdup("tf")); g_ptr_array_add(argv_ptr, g_strdup("-")); g_ptr_array_add(argv_ptr, NULL); indexpid = pipespawnv(cmd, STDIN_PIPE|STDOUT_PIPE|STDERR_PIPE, 1, &index_in, &index_out, &index_err, (char **)argv_ptr->pdata); g_ptr_array_free_full(argv_ptr); aclose(dumpin); indexstream = fdopen(indexf, "w"); if (!indexstream) { error(_("error indexstream(%d): %s\n"), indexf, strerror(errno)); } read_fd(data_out , "data out" , &read_data_out); read_fd(outf , "data err" , &read_text); read_fd(index_out, "index out", &read_text); read_fd(index_err, "index_err", &read_text); } else { tarpid = pipespawnv(cmd, STDIN_PIPE|STDERR_PIPE, 1, &dumpin, &dataf, &outf, (char **)argv_ptr->pdata); g_ptr_array_free_full(argv_ptr); aclose(dumpin); aclose(dataf); read_fd(outf, "data err", &read_text); } event_loop(0); waitpid(tarpid, &wait_status, 0); if (WIFSIGNALED(wait_status)) { errmsg = g_strdup_printf(_("%s terminated with signal %d: see %s"), cmd, WTERMSIG(wait_status), dbfn()); exit_status = 1; } else if (WIFEXITED(wait_status)) { if (exit_value[WEXITSTATUS(wait_status)] == 1) { errmsg = g_strdup_printf(_("%s exited with status %d: see %s"), cmd, WEXITSTATUS(wait_status), dbfn()); exit_status = 1; } else { /* Normal exit */ } } else { errmsg = g_strdup_printf(_("%s got bad exit: see %s"), cmd, dbfn()); exit_status = 1; } if (argument->dle.create_index) { waitpid(indexpid, &wait_status, 0); if (WIFSIGNALED(wait_status)) { errmsg = g_strdup_printf(_("'%s index' terminated with signal %d: see %s"), cmd, WTERMSIG(wait_status), dbfn()); exit_status = 1; } else if (WIFEXITED(wait_status)) { if (exit_value[WEXITSTATUS(wait_status)] == 1) { errmsg = g_strdup_printf(_("'%s index' exited with status %d: see %s"), cmd, WEXITSTATUS(wait_status), dbfn()); exit_status = 1; } else { /* Normal exit */ } } else { errmsg = g_strdup_printf(_("'%s index' got bad exit: see %s"), cmd, dbfn()); exit_status = 1; } } g_debug(_("after %s %s wait"), cmd, qdisk); g_debug(_("ambsdtar: %s: pid %ld"), cmd, (long)tarpid); if (errmsg) { g_debug("%s", errmsg); g_fprintf(mesgstream, "sendbackup: error [%s]\n", errmsg); exit_status = 1; } else if (argument->dle.record) { ambsdtar_set_timestamps(argument, GPOINTER_TO_INT(argument->level->data), new_timestamps, mesgstream); } g_debug("sendbackup: size %lld", (long long)dump_size); fprintf(mesgstream, "sendbackup: size %lld\n", (long long)dump_size); g_debug("sendbackup: end"); fprintf(mesgstream, "sendbackup: end\n"); if (argument->dle.create_index) fclose(indexstream); fclose(mesgstream); if (argument->verbose == 0) { if (file_exclude) unlink(file_exclude); if (file_include) unlink(file_include); } amfree(file_exclude); amfree(file_include); amfree(timestamps); amfree(qdisk); amfree(cmd); amfree(errmsg); }
1
[ "CWE-264" ]
amanda
4bf5b9b356848da98560ffbb3a07a9cb5c4ea6d7
293,446,969,001,343,140,000,000,000,000,000,000,000
182
* Add a /etc/amanda-security.conf file git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/branches/3_3@6486 a8d146d6-cc15-0410-8900-af154a0219e0
static void pwc_frame_dumped(struct pwc_device *pdev) { pdev->vframes_dumped++; if (pdev->vframe_count < FRAME_LOWMARK) return; if (pdev->vframes_dumped < 20) PWC_DEBUG_FLOW("Dumping frame %d\n", pdev->vframe_count); else if (pdev->vframes_dumped == 20) PWC_DEBUG_FLOW("Dumping frame %d (last message)\n", pdev->vframe_count); }
0
[ "CWE-399" ]
linux-2.6
85237f202d46d55c1bffe0c5b1aa3ddc0f1dce4d
70,839,021,204,016,750,000,000,000,000,000,000,000
12
USB: fix DoS in pwc USB video driver the pwc driver has a disconnect method that waits for user space to close the device. This opens up an opportunity for a DoS attack, blocking the USB subsystem and making khubd's task busy wait in kernel space. This patch shifts freeing resources to close if an opened device is disconnected. Signed-off-by: Oliver Neukum <[email protected]> CC: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
cmsHANDLE CMSEXPORT cmsDictDup(cmsHANDLE hDict) { _cmsDICT* old_dict = (_cmsDICT*) hDict; cmsHANDLE hNew; cmsDICTentry *entry; _cmsAssert(old_dict != NULL); hNew = cmsDictAlloc(old_dict ->ContextID); if (hNew == NULL) return NULL; // Walk the list freeing all nodes entry = old_dict ->head; while (entry != NULL) { if (!cmsDictAddEntry(hNew, entry ->Name, entry ->Value, entry ->DisplayName, entry ->DisplayValue)) { cmsDictFree(hNew); return NULL; } entry = entry -> Next; } return hNew; }
0
[ "CWE-703" ]
Little-CMS
91c2db7f2559be504211b283bc3a2c631d6f06d9
147,388,573,364,882,360,000,000,000,000,000,000,000
26
Non happy-path fixes
static int maria_rollback(handlerton *hton __attribute__ ((unused)), THD *thd, bool all) { TRN *trn= THD_TRN; DBUG_ENTER("maria_rollback"); DBUG_ASSERT(trnman_has_locked_tables(trn) == 0); trnman_reset_locked_tables(trn, 0); /* statement or transaction ? */ if ((thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && !all) { trnman_rollback_statement(trn); DBUG_RETURN(0); // end of statement } reset_thd_trn(thd, (MARIA_HA*) trn->used_instances); DBUG_RETURN(trnman_rollback_trn(trn) ? HA_ERR_OUT_OF_MEM : 0); // end of transaction }
0
[ "CWE-400" ]
server
9e39d0ae44595dbd1570805d97c9c874778a6be8
85,433,052,726,546,800,000,000,000,000,000,000,000
19
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields fix a debug assert to account for not opened temp tables