func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
TEST(ExpressionArrayToObjectTest, KVFormatWithDuplicates) { assertExpectedResults("$arrayToObject", {{{Value(BSON_ARRAY(BSON("k" << "hi" << "v" << 2) << BSON("k" << "hi" << "v" << 3)))}, {Value(BSON("hi" << 3))}}}); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
12,598,649,452,284,020,000,000,000,000,000,000,000
12
SERVER-38070 fix infinite loop in agg expression
WReverseVideo(p, on) struct win *p; int on; { struct canvas *cv; for (cv = p->w_layer.l_cvlist; cv; cv = cv->c_lnext) { display = cv->c_display; if (cv != D_forecv) continue; ReverseVideo(on); if (!on && p->w_revvid && !D_CVR) { if (D_VB) AddCStr(D_VB); else p->w_bell = BELL_VISUAL; } } }
0
[]
screen
c5db181b6e017cfccb8d7842ce140e59294d9f62
265,226,327,952,252,000,000,000,000,000,000,000,000
20
ansi: add support for xterm OSC 11 It allows for getting and setting the background color. Notably, Vim uses OSC 11 to learn whether it's running on a light or dark colored terminal and choose a color scheme accordingly. Tested with gnome-terminal and xterm. When called with "?" argument the current background color is returned: $ echo -ne "\e]11;?\e\\" $ 11;rgb:2323/2727/2929 Signed-off-by: Lubomir Rintel <[email protected]> (cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309) Signed-off-by: Amadeusz Sławiński <[email protected]
static bool rename_path_prefix_equal(const struct smb_filename *smb_fname_src, const struct smb_filename *smb_fname_dst) { const char *psrc = smb_fname_src->base_name; const char *pdst = smb_fname_dst->base_name; size_t slen; if (psrc[0] == '.' && psrc[1] == '/') { psrc += 2; } if (pdst[0] == '.' && pdst[1] == '/') { pdst += 2; } if ((slen = strlen(psrc)) > strlen(pdst)) { return False; } return ((memcmp(psrc, pdst, slen) == 0) && pdst[slen] == '/'); }
0
[ "CWE-200" ]
samba
3ddc9344c2fa7461336899fbddb0bb80995e9170
177,816,631,675,608,950,000,000,000,000,000,000,000
18
CVE-2022-32742: s3: smbd: Harden the smbreq_bufrem() macro. Fixes the raw.write.bad-write test. NB. We need the two (==0) changes in source3/smbd/smb2_reply.c as the gcc optimizer now knows that the return from smbreq_bufrem() can never be less than zero. BUG: https://bugzilla.samba.org/show_bug.cgi?id=15085 Remove knownfail. Signed-off-by: Jeremy Allison <[email protected]> Reviewed-by: David Disseldorp <[email protected]> Autobuild-User(master): Jule Anger <[email protected]> Autobuild-Date(master): Wed Jul 27 11:46:46 UTC 2022 on sn-devel-184
static void fuse_lib_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name, const char *value, size_t size, int flags) { struct fuse *f = req_fuse_prepare(req); char *path; int err; err = -ENOENT; pthread_rwlock_rdlock(&f->tree_lock); path = get_path(f, ino); if (path != NULL) { struct fuse_intr_data d; fuse_prepare_interrupt(f, req, &d); err = fuse_fs_setxattr(f->fs, path, name, value, size, flags); fuse_finish_interrupt(f, req, &d); free(path); } pthread_rwlock_unlock(&f->tree_lock); reply_err(req, err); }
0
[]
ntfs-3g
fb28eef6f1c26170566187c1ab7dc913a13ea43c
59,380,260,220,781,210,000,000,000,000,000,000,000
20
Hardened the checking of directory offset requested by a readdir When asked for the next directory entries, make sure the chunk offset is within valid values, otherwise return no more entries in chunk.
uint32_t cli_bcapi_trace_op(struct cli_bc_ctx *ctx, const uint8_t *op, uint32_t col) { if (LIKELY(ctx->trace_level < trace_col)) return 0; if (ctx->trace_level&0xc0) { ctx->col = col; /* func/scope changed and they needed param/location event */ ctx->trace(ctx, (ctx->trace_level&0x80) ? trace_func : trace_scope); ctx->trace_level &= ~0xc0; } if (LIKELY(ctx->trace_level < trace_col)) return 0; if (ctx->col != col) { ctx->col = col; ctx->trace(ctx, trace_col); } else { ctx->trace(ctx, trace_line); } if (LIKELY(ctx->trace_level < trace_op)) return 0; if (ctx->trace_op && op) ctx->trace_op(ctx, (const char*)op); return 0; }
0
[ "CWE-189" ]
clamav-devel
3d664817f6ef833a17414a4ecea42004c35cc42f
181,191,246,905,119,720,000,000,000,000,000,000,000
24
fix recursion level crash (bb #3706). Thanks to Stephane Chazelas for the analysis.
static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); mutex_lock(&mapping->i_mmap_mutex); if (!prio_tree_empty(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); mutex_unlock(&mapping->i_mmap_mutex); truncate_hugepages(inode, offset); return 0; }
0
[ "CWE-399" ]
linux
90481622d75715bfcb68501280a917dbfe516029
296,548,478,170,199,920,000,000,000,000,000,000,000
17
hugepages: fix use after free bug in "quota" handling hugetlbfs_{get,put}_quota() are badly named. They don't interact with the general quota handling code, and they don't much resemble its behaviour. Rather than being about maintaining limits on on-disk block usage by particular users, they are instead about maintaining limits on in-memory page usage (including anonymous MAP_PRIVATE copied-on-write pages) associated with a particular hugetlbfs filesystem instance. Worse, they work by having callbacks to the hugetlbfs filesystem code from the low-level page handling code, in particular from free_huge_page(). This is a layering violation of itself, but more importantly, if the kernel does a get_user_pages() on hugepages (which can happen from KVM amongst others), then the free_huge_page() can be delayed until after the associated inode has already been freed. If an unmount occurs at the wrong time, even the hugetlbfs superblock where the "quota" limits are stored may have been freed. Andrew Barry proposed a patch to fix this by having hugepages, instead of storing a pointer to their address_space and reaching the superblock from there, had the hugepages store pointers directly to the superblock, bumping the reference count as appropriate to avoid it being freed. Andrew Morton rejected that version, however, on the grounds that it made the existing layering violation worse. This is a reworked version of Andrew's patch, which removes the extra, and some of the existing, layering violation. It works by introducing the concept of a hugepage "subpool" at the lower hugepage mm layer - that is a finite logical pool of hugepages to allocate from. hugetlbfs now creates a subpool for each filesystem instance with a page limit set, and a pointer to the subpool gets added to each allocated hugepage, instead of the address_space pointer used now. The subpool has its own lifetime and is only freed once all pages in it _and_ all other references to it (i.e. superblocks) are gone. subpools are optional - a NULL subpool pointer is taken by the code to mean that no subpool limits are in effect. Previous discussion of this bug found in: "Fix refcounting in hugetlbfs quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or http://marc.info/?l=linux-mm&m=126928970510627&w=1 v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to alloc_huge_page() - since it already takes the vma, it is not necessary. Signed-off-by: Andrew Barry <[email protected]> Signed-off-by: David Gibson <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Paul Mackerras <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
rsvg_linear_gradient_set_atts (RsvgNode * self, RsvgHandle * ctx, RsvgPropertyBag * atts) { RsvgLinearGradient *grad = (RsvgLinearGradient *) self; const char *value; if (rsvg_property_bag_size (atts)) { if ((value = rsvg_property_bag_lookup (atts, "id"))) rsvg_defs_register_name (ctx->priv->defs, value, self); if ((value = rsvg_property_bag_lookup (atts, "x1"))) { grad->x1 = _rsvg_css_parse_length (value); grad->hasx1 = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "y1"))) { grad->y1 = _rsvg_css_parse_length (value); grad->hasy1 = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "x2"))) { grad->x2 = _rsvg_css_parse_length (value); grad->hasx2 = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "y2"))) { grad->y2 = _rsvg_css_parse_length (value); grad->hasy2 = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "spreadMethod"))) { if (!strcmp (value, "pad")) { grad->spread = RSVG_GRADIENT_PAD; } else if (!strcmp (value, "reflect")) { grad->spread = RSVG_GRADIENT_REFLECT; } else if (!strcmp (value, "repeat")) { grad->spread = RSVG_GRADIENT_REPEAT; } grad->hasspread = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "xlink:href"))) { if (self != rsvg_defs_lookup (ctx->priv->defs, value)) rsvg_defs_add_resolver (ctx->priv->defs, &grad->fallback, value); } if ((value = rsvg_property_bag_lookup (atts, "gradientTransform"))) { rsvg_parse_transform (grad->affine, value); grad->hastransform = TRUE; } if ((value = rsvg_property_bag_lookup (atts, "color"))) grad->current_color = rsvg_css_parse_color (value, 0); if ((value = rsvg_property_bag_lookup (atts, "gradientUnits"))) { if (!strcmp (value, "userSpaceOnUse")) grad->obj_bbox = FALSE; else if (!strcmp (value, "objectBoundingBox")) grad->obj_bbox = TRUE; grad->hasbbox = TRUE; } rsvg_parse_style_attrs (ctx, self->state, "linearGradient", NULL, NULL, atts); } }
0
[]
librsvg
34c95743ca692ea0e44778e41a7c0a129363de84
220,899,803,106,098,820,000,000,000,000,000,000,000
54
Store node type separately in RsvgNode The node name (formerly RsvgNode:type) cannot be used to infer the sub-type of RsvgNode that we're dealing with, since for unknown elements we put type = node-name. This lead to a (potentially exploitable) crash e.g. when the element name started with "fe" which tricked the old code into considering it as a RsvgFilterPrimitive. CVE-2011-3146 https://bugzilla.gnome.org/show_bug.cgi?id=658014
static void srpt_queue_tm_rsp(struct se_cmd *cmd) { srpt_queue_response(cmd); }
0
[ "CWE-200", "CWE-476" ]
linux
51093254bf879bc9ce96590400a87897c7498463
2,945,010,982,471,973,000,000,000,000,000,000,000
4
IB/srpt: Simplify srpt_handle_tsk_mgmt() Let the target core check task existence instead of the SRP target driver. Additionally, let the target core check the validity of the task management request instead of the ib_srpt driver. This patch fixes the following kernel crash: BUG: unable to handle kernel NULL pointer dereference at 0000000000000001 IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt] Oops: 0002 [#1] SMP Call Trace: [<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt] [<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt] [<ffffffff8109726f>] kthread+0xcf/0xe0 [<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0 Signed-off-by: Bart Van Assche <[email protected]> Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr") Tested-by: Alex Estrin <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Cc: Nicholas Bellinger <[email protected]> Cc: Sagi Grimberg <[email protected]> Cc: stable <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
DEFUN(chkWORD, MARK_WORD, "Turn current word into hyperlink") { char *p; int spos, epos; p = getCurWord(Currentbuf, &spos, &epos); if (p == NULL) return; reAnchorWord(Currentbuf, Currentbuf->currentLine, spos, epos); displayBuffer(Currentbuf, B_FORCE_REDRAW); }
0
[ "CWE-59", "CWE-241" ]
w3m
18dcbadf2771cdb0c18509b14e4e73505b242753
117,725,816,082,257,760,000,000,000,000,000,000,000
10
Make temporary directory safely when ~/.w3m is unwritable
static int ipgre_tunnel_encap_add_mpls_ops(void) { return ip_tunnel_encap_add_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS); }
0
[]
net
6c8991f41546c3c472503dff1ea9daaddf9331c2
164,816,358,459,864,170,000,000,000,000,000,000,000
4
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup ipv6_stub uses the ip6_dst_lookup function to allow other modules to perform IPv6 lookups. However, this function skips the XFRM layer entirely. All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the ip_route_output_key and ip_route_output helpers) for their IPv4 lookups, which calls xfrm_lookup_route(). This patch fixes this inconsistent behavior by switching the stub to ip6_dst_lookup_flow, which also calls xfrm_lookup_route(). This requires some changes in all the callers, as these two functions take different arguments and have different return types. Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan") Reported-by: Xiumei Mu <[email protected]> Signed-off-by: Sabrina Dubroca <[email protected]> Signed-off-by: David S. Miller <[email protected]>
GC_API void GC_CALL GC_set_all_interior_pointers(int value) { DCL_LOCK_STATE; GC_all_interior_pointers = value ? 1 : 0; if (GC_is_initialized) { /* It is not recommended to change GC_all_interior_pointers value */ /* after GC is initialized but it seems GC could work correctly */ /* even after switching the mode. */ LOCK(); GC_initialize_offsets(); /* NOTE: this resets manual offsets as well */ if (!GC_all_interior_pointers) GC_bl_init_no_interiors(); UNLOCK(); } }
0
[ "CWE-119" ]
bdwgc
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
166,699,026,037,673,670,000,000,000,000,000,000,000
16
Fix malloc routines to prevent size value wrap-around See issue #135 on Github. * allchblk.c (GC_allochblk, GC_allochblk_nth): Use OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS. * malloc.c (GC_alloc_large): Likewise. * alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent overflow when computing GC_heapsize+bytes > GC_max_heapsize. * dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page, GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc, GC_debug_generic_malloc_inner, GC_debug_generic_malloc_inner_ignore_off_page, GC_debug_malloc_stubborn, GC_debug_malloc_atomic, GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable): Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb value. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * gcj_mlc.c (GC_debug_gcj_malloc): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Likewise. * include/private/gcconfig.h (GET_MEM): Likewise. * mallocx.c (GC_malloc_many, GC_memalign): Likewise. * os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise. * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed): Likewise. * headers.c (GC_scratch_alloc): Change type of bytes_to_get from word to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed). * include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already defined). * include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from malloc.c file. * include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before include gcconfig.h). * include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type to size_t. * os_dep.c (GC_page_size): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument. * include/private/gcconfig.h (GET_MEM): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE, ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb". * include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro. * include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Change argument type from word to int. * os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem, GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise. * malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only if no value wrap around is guaranteed. * malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case (because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value wrap around). * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise. * misc.c (GC_init_size_map): Change "i" local variable type from int to size_t. * os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise. * misc.c (GC_envfile_init): Cast len to size_t when passed to ROUNDUP_PAGESIZE_IF_MMAP. * os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and GETPAGESIZE() to size_t (when setting GC_page_size). * os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection): Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking (the argument is of word type). * os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with ~GC_page_size+1 (because GC_page_size is unsigned); remove redundant cast to size_t. * os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size to SBRK_ARG_T. * os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable to size_t. * typd_mlc.c: Do not include limits.h. * typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in gc_priv.h now).
void HttpIntegrationTest::testRouterHeaderOnlyRequestAndResponse( ConnectionCreationFunction* create_connection, int upstream_index, const std::string& path, const std::string& authority) { auto response = makeHeaderOnlyRequest(create_connection, upstream_index, path, authority); checkSimpleRequestSuccess(0U, 0U, response.get()); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
231,062,319,421,519,350,000,000,000,000,000,000,000
6
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
void aes_setkey_enc( aes_context *ctx, const unsigned char *key, int keysize ) { int i; unsigned long *RK; #if !defined(XYSSL_AES_ROM_TABLES) if( aes_init_done == 0 ) { aes_gen_tables(); aes_init_done = 1; } #endif switch( keysize ) { case 128: ctx->nr = 10; break; case 192: ctx->nr = 12; break; case 256: ctx->nr = 14; break; default : return; } #if defined(PADLOCK_ALIGN16) ctx->rk = RK = PADLOCK_ALIGN16( ctx->buf ); #else ctx->rk = RK = ctx->buf; #endif for( i = 0; i < (keysize >> 5); i++ ) { GET_ULONG_LE( RK[i], key, i << 2 ); } switch( ctx->nr ) { case 10: for( i = 0; i < 10; i++, RK += 4 ) { RK[4] = RK[0] ^ RCON[i] ^ ( FSb[ ( RK[3] >> 8 ) & 0xFF ] ) ^ ( FSb[ ( RK[3] >> 16 ) & 0xFF ] << 8 ) ^ ( FSb[ ( RK[3] >> 24 ) & 0xFF ] << 16 ) ^ ( ((unsigned int)FSb[ ( RK[3] ) & 0xFF ]) << 24 ); RK[5] = RK[1] ^ RK[4]; RK[6] = RK[2] ^ RK[5]; RK[7] = RK[3] ^ RK[6]; } break; case 12: for( i = 0; i < 8; i++, RK += 6 ) { RK[6] = RK[0] ^ RCON[i] ^ ( FSb[ ( RK[5] >> 8 ) & 0xFF ] ) ^ ( FSb[ ( RK[5] >> 16 ) & 0xFF ] << 8 ) ^ ( FSb[ ( RK[5] >> 24 ) & 0xFF ] << 16 ) ^ ( ((unsigned int)FSb[ ( RK[5] ) & 0xFF ]) << 24 ); RK[7] = RK[1] ^ RK[6]; RK[8] = RK[2] ^ RK[7]; RK[9] = RK[3] ^ RK[8]; RK[10] = RK[4] ^ RK[9]; RK[11] = RK[5] ^ RK[10]; } break; case 14: for( i = 0; i < 7; i++, RK += 8 ) { RK[8] = RK[0] ^ RCON[i] ^ ( FSb[ ( RK[7] >> 8 ) & 0xFF ] ) ^ ( FSb[ ( RK[7] >> 16 ) & 0xFF ] << 8 ) ^ ( FSb[ ( RK[7] >> 24 ) & 0xFF ] << 16 ) ^ ( ((unsigned int)FSb[ ( RK[7] ) & 0xFF ]) << 24 ); RK[9] = RK[1] ^ RK[8]; RK[10] = RK[2] ^ RK[9]; RK[11] = RK[3] ^ RK[10]; RK[12] = RK[4] ^ ( FSb[ ( RK[11] ) & 0xFF ] ) ^ ( FSb[ ( RK[11] >> 8 ) & 0xFF ] << 8 ) ^ ( FSb[ ( RK[11] >> 16 ) & 0xFF ] << 16 ) ^ ( ((unsigned int)FSb[ ( RK[11] >> 24 ) & 0xFF ]) << 24 ); RK[13] = RK[5] ^ RK[12]; RK[14] = RK[6] ^ RK[13]; RK[15] = RK[7] ^ RK[14]; } break; default: break; } }
0
[]
ghostpdl
8e9ce5016db968b40e4ec255a3005f2786cce45f
288,502,722,046,080,840,000,000,000,000,000,000,000
99
Bug 699665 "memory corruption in aesdecode" The specimen file calls aesdecode without specifying the key to be used, though it does manage to do enough work with the PDF interpreter routines to get access to aesdecode (which isn't normally available). This causes us to read uninitialised memory, which can (and often does) lead to a segmentation fault. In this commit we set the key to NULL explicitly during intialisation and then check it before we read it. If its NULL we just return. It seems bizarre that we don't return error codes, we should probably look into that at some point, but this prevents the code trying to read uninitialised memory.
static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, unsigned int size) { SysBusESPState *sysbus = opaque; ESPState *s = ESP(&sysbus->esp); uint64_t val = 0; trace_esp_pdma_read(size); switch (size) { case 1: val = esp_pdma_read(s); break; case 2: val = esp_pdma_read(s); val = (val << 8) | esp_pdma_read(s); break; } if (fifo8_num_used(&s->fifo) < 2) { s->pdma_cb(s); } return val; }
0
[ "CWE-476" ]
qemu
0db895361b8a82e1114372ff9f4857abea605701
97,766,147,808,067,200,000,000,000,000,000,000,000
23
esp: always check current_req is not NULL before use in DMA callbacks After issuing a SCSI command the SCSI layer can call the SCSIBusInfo .cancel callback which resets both current_req and current_dev to NULL. If any data is left in the transfer buffer (async_len != 0) then the next TI (Transfer Information) command will attempt to reference the NULL pointer causing a segfault. Buglink: https://bugs.launchpad.net/qemu/+bug/1910723 Buglink: https://bugs.launchpad.net/qemu/+bug/1909247 Signed-off-by: Mark Cave-Ayland <[email protected]> Tested-by: Alexander Bulekov <[email protected]> Message-Id: <[email protected]>
static int add_object_entry_from_bitmap(const unsigned char *sha1, enum object_type type, int flags, uint32_t name_hash, struct packed_git *pack, off_t offset) { uint32_t index_pos; if (have_duplicate_entry(sha1, 0, &index_pos)) return 0; create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset); display_progress(progress_state, nr_result); return 1; }
0
[ "CWE-119", "CWE-787" ]
git
de1e67d0703894cb6ea782e36abb63976ab07e60
31,195,744,343,091,934,000,000,000,000,000,000,000
15
list-objects: pass full pathname to callbacks When we find a blob at "a/b/c", we currently pass this to our show_object_fn callbacks as two components: "a/b/" and "c". Callbacks which want the full value then call path_name(), which concatenates the two. But this is an inefficient interface; the path is a strbuf, and we could simply append "c" to it temporarily, then roll back the length, without creating a new copy. So we could improve this by teaching the callsites of path_name() this trick (and there are only 3). But we can also notice that no callback actually cares about the broken-down representation, and simply pass each callback the full path "a/b/c" as a string. The callback code becomes even simpler, then, as we do not have to worry about freeing an allocated buffer, nor rolling back our modification to the strbuf. This is theoretically less efficient, as some callbacks would not bother to format the final path component. But in practice this is not measurable. Since we use the same strbuf over and over, our work to grow it is amortized, and we really only pay to memcpy a few bytes. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
virDomainVcpuThreadSchedParse(xmlNodePtr node, virDomainDefPtr def) { return virDomainThreadSchedParseHelper(node, "vcpus", virDomainDefGetVcpuSched, def); }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
146,462,036,385,438,040,000,000,000,000,000,000,000
7
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
void WAVEFile::initCompressionParams() { Track *track = getTrack(); if (track->f.compressionType == AF_COMPRESSION_IMA) initIMACompressionParams(); else if (track->f.compressionType == AF_COMPRESSION_MS_ADPCM) initMSADPCMCompressionParams(); }
0
[ "CWE-119", "CWE-284" ]
audiofile
a2e9eab8ea87c4ffc494d839ebb4ea145eb9f2e6
111,985,314,265,112,020,000,000,000,000,000,000,000
8
Actually fail when error occurs in parseFormat When there's an unsupported number of bits per sample or an invalid number of samples per block, don't only print an error message using the error handler, but actually stop parsing the file. This fixes #35 (also reported at https://bugzilla.opensuse.org/show_bug.cgi?id=1026983 and https://blogs.gentoo.org/ago/2017/02/20/audiofile-heap-based-buffer-overflow-in-imadecodeblockwave-ima-cpp/ )
MOBI_RET mobi_reconstruct_infl(char *outstring, const MOBIIndx *infl, const MOBIIndexEntry *orth_entry) { const char *label = orth_entry->label; uint32_t *infl_groups = NULL; size_t infl_count = mobi_get_indxentry_tagarray(&infl_groups, orth_entry, INDX_TAGARR_ORTH_INFL); if (infl_count == 0 || !infl_groups) { return MOBI_SUCCESS; } const char *start_tag = "<idx:infl>"; const char *end_tag = "</idx:infl>"; const char *iform_tag = "<idx:iform%s value=\"%s\"/>"; char name_attr[INDX_INFLBUF_SIZEMAX + 1]; char infl_tag[INDX_INFLBUF_SIZEMAX + 1]; strcpy(outstring, start_tag); size_t initlen = strlen(start_tag) + strlen(end_tag); size_t outlen = initlen; size_t label_length = strlen(label); if (label_length > INDX_INFLBUF_SIZEMAX) { debug_print("Entry label too long (%s)\n", label); return MOBI_DATA_CORRUPT; } if (infl->cncx_record == NULL) { debug_print("%s\n", "Missing cncx record"); return MOBI_DATA_CORRUPT; } for (size_t i = 0; i < infl_count; i++) { size_t offset = infl_groups[i]; if (offset >= infl->entries_count) { debug_print("%s\n", "Invalid entry offset"); return MOBI_DATA_CORRUPT; } uint32_t *groups; size_t group_cnt = mobi_get_indxentry_tagarray(&groups, &infl->entries[offset], INDX_TAGARR_INFL_GROUPS); uint32_t *parts; size_t part_cnt = mobi_get_indxentry_tagarray(&parts, &infl->entries[offset], INDX_TAGARR_INFL_PARTS_V2); if (group_cnt != part_cnt) { return MOBI_DATA_CORRUPT; } for (size_t j = 0; j < part_cnt; j++) { name_attr[0] = '\0'; char *group_name = mobi_get_cncx_string(infl->cncx_record, groups[j]); if (group_name == NULL) { debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } if (strlen(group_name)) { snprintf(name_attr, INDX_INFLBUF_SIZEMAX, " name=\"%s\"", group_name); } free(group_name); unsigned char decoded[INDX_INFLBUF_SIZEMAX + 1]; memset(decoded, 0, INDX_INFLBUF_SIZEMAX + 1); if (parts[j] >= infl->entries_count) { debug_print("%s\n", "Invalid entry offset"); return MOBI_DATA_CORRUPT; } unsigned char *rule = (unsigned char *) infl->entries[parts[j]].label; memcpy(decoded, label, label_length); int decoded_length = (int) label_length; MOBI_RET ret = mobi_decode_infl(decoded, &decoded_length, rule); if (ret != MOBI_SUCCESS) { return ret; } if (decoded_length == 0) { continue; } int n = snprintf(infl_tag, INDX_INFLBUF_SIZEMAX, iform_tag, name_attr, decoded); if (n > INDX_INFLBUF_SIZEMAX) { debug_print("Skipping truncated tag: %s\n", infl_tag); continue; } outlen += strlen(infl_tag); if (outlen > INDX_INFLTAG_SIZEMAX) { debug_print("Inflections text in %s too long (%zu)\n", label, outlen); return MOBI_ERROR; } strcat(outstring, infl_tag); } } if (outlen == initlen) { outstring[0] = '\0'; } else { strcat(outstring, end_tag); } return MOBI_SUCCESS; }
0
[ "CWE-703", "CWE-125" ]
libmobi
fb1ab50e448ddbed746fd27ae07469bc506d838b
300,271,360,489,014,300,000,000,000,000,000,000,000
86
Fix array boundary check when parsing inflections which could result in buffer over-read with corrupt input
R_API void r_egg_free(REgg *egg) { if (egg) { r_buf_free (egg->src); r_buf_free (egg->buf); r_buf_free (egg->bin); r_list_free (egg->list); r_asm_free (egg->rasm); r_syscall_free (egg->syscall); sdb_free (egg->db); r_list_free (egg->plugins); r_list_free (egg->patches); r_egg_lang_free (egg); free (egg); } }
0
[ "CWE-125" ]
radare2
e710401ebb4a892a87b0c709d709af8b5dcbbb01
183,363,271,337,069,630,000,000,000,000,000,000,000
15
patch #14211 heap buffer overflow in large ragg2 inputs. this should be refactored to use an RBuffer to enable dynamic resizing, but for now just patching it to bail out if we are about to overwrite the allocated statically sized buffer
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; __u16 len; __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } /* Make sure that the FORWARD_TSN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk->subh.fwdtsn_hdr = fwdtsn_hdr; len = ntohs(chunk->chunk_hdr->length); len -= sizeof(struct sctp_chunkhdr); skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. */ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) goto gen_shutdown; sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); if (len > sizeof(struct sctp_fwdtsn_hdr)) sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, SCTP_CHUNK(chunk)); /* Go a head and force a SACK, since we are shutting down. */ gen_shutdown: /* Implementor's Guide. * * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately * respond to each received packet containing one or more DATA chunk(s) * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); return SCTP_DISPOSITION_CONSUME; }
1
[ "CWE-119" ]
linux-2.6
9fcb95a105758b81ef0131cd18e2db5149f13e95
44,099,590,711,358,880,000,000,000,000,000,000,000
58
sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID If FWD-TSN chunk is received with bad stream ID, the sctp will not do the validity check, this may cause memory overflow when overwrite the TSN of the stream ID. The FORWARD-TSN chunk is like this: FORWARD-TSN chunk Type = 192 Flags = 0 Length = 172 NewTSN = 99 Stream = 10000 StreamSequence = 0xFFFF This patch fix this problem by discard the chunk if stream ID is not less than MIS. Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
UsbHubAckHubStatus ( IN USB_DEVICE *HubDev ) { EFI_USB_PORT_STATUS HubState; EFI_STATUS Status; Status = UsbHubCtrlGetHubStatus (HubDev, (UINT32 *) &HubState); if (EFI_ERROR (Status)) { return Status; } if (USB_BIT_IS_SET (HubState.PortChangeStatus, USB_HUB_STAT_C_LOCAL_POWER)) { UsbHubCtrlClearHubFeature (HubDev, USB_HUB_C_HUB_LOCAL_POWER); } if (USB_BIT_IS_SET (HubState.PortChangeStatus, USB_HUB_STAT_C_OVER_CURRENT)) { UsbHubCtrlClearHubFeature (HubDev, USB_HUB_C_HUB_OVER_CURRENT); } return EFI_SUCCESS; }
0
[ "CWE-787" ]
edk2
acebdf14c985c5c9f50b37ece0b15ada87767359
75,206,444,899,490,620,000,000,000,000,000,000,000
23
MdeModulePkg UsbBusDxe: Fix wrong buffer length used to read hub desc REF: https://bugzilla.tianocore.org/show_bug.cgi?id=973 HUB descriptor has variable length. But the code uses stack (HubDesc in UsbHubInit) with fixed length sizeof(EFI_USB_HUB_DESCRIPTOR) to hold HUB descriptor data. It uses hard code length value (32 that is greater than sizeof(EFI_USB_HUB_DESCRIPTOR)) for SuperSpeed path, then there will be stack overflow when IOMMU is enabled because the Unmap operation will copy the data from device buffer to host buffer. And it uses HubDesc->Length for none SuperSpeed path, then there will be stack overflow when HubDesc->Length is greater than sizeof(EFI_USB_HUB_DESCRIPTOR). The patch updates the code to use a big enough buffer to hold the descriptor data. The definition EFI_USB_SUPER_SPEED_HUB_DESCRIPTOR is wrong (HubDelay field should be UINT16 type) and no code is using it, the patch removes it. Cc: Jiewen Yao <[email protected]> Cc: Ruiyu Ni <[email protected]> Cc: Bret Barkelew <[email protected]> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Star Zeng <[email protected]> Reviewed-by: Bret Barkelew <[email protected]>
void auth_request_log_error(struct auth_request *auth_request, const char *subsystem, const char *format, ...) { struct event *event = get_request_event(auth_request, subsystem); va_list va; va_start(va, format); T_BEGIN { string_t *str = t_str_new(128); str_vprintfa(str, format, va); e_error(event, "%s", str_c(str)); } T_END; va_end(va); }
0
[ "CWE-284" ]
core
7bad6a24160e34bce8f10e73dbbf9e5fbbcd1904
98,636,302,629,229,880,000,000,000,000,000,000,000
15
auth: Fix handling passdbs with identical driver/args but different mechanisms/username_filter The passdb was wrongly deduplicated in this situation, causing wrong mechanisms or username_filter setting to be used. This would be a rather unlikely configuration though. Fixed by moving mechanisms and username_filter from struct passdb_module to struct auth_passdb, which is where they should have been in the first place.
static long ebcdic_ctrl(BIO *b, int cmd, long num, void *ptr) { long ret; if (b->next_bio == NULL) return (0); switch (cmd) { case BIO_CTRL_DUP: ret = 0L; break; default: ret = BIO_ctrl(b->next_bio, cmd, num, ptr); break; } return (ret); }
0
[ "CWE-399" ]
openssl
380f18ed5f140e0ae1b68f3ab8f4f7c395658d9e
283,117,869,575,567,840,000,000,000,000,000,000,000
16
CVE-2016-0798: avoid memory leak in SRP The SRP user database lookup method SRP_VBASE_get_by_user had confusing memory management semantics; the returned pointer was sometimes newly allocated, and sometimes owned by the callee. The calling code has no way of distinguishing these two cases. Specifically, SRP servers that configure a secret seed to hide valid login information are vulnerable to a memory leak: an attacker connecting with an invalid username can cause a memory leak of around 300 bytes per connection. Servers that do not configure SRP, or configure SRP but do not configure a seed are not vulnerable. In Apache, the seed directive is known as SSLSRPUnknownUserSeed. To mitigate the memory leak, the seed handling in SRP_VBASE_get_by_user is now disabled even if the user has configured a seed. Applications are advised to migrate to SRP_VBASE_get1_by_user. However, note that OpenSSL makes no strong guarantees about the indistinguishability of valid and invalid logins. In particular, computations are currently not carried out in constant time. Reviewed-by: Rich Salz <[email protected]>
static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); } /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; /* * On SH5 all edited pointers are subject to NEFF */ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; } else { /* * Different approach on SH5. * . Endianness independent asm code gets placed in entry.S . * This is limited to four ASM instructions corresponding * to two long longs in size. * . err checking is done on the else branch only * . flush_icache_range() is called upon __put_user() only * . all edited pointers are subject to NEFF * . being code, linker turns ShMedia bit on, always * dereference index -1. */ DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) goto give_sigsegv; /* Cohere the trampoline with the I-cache. */ flush_cache_sigtramp(DEREF_REG_PR-1); } /* * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ regs->regs[REG_SP] = (unsigned long) frame; regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ /* FIXME: The glibc profiling support for SH-5 needs to be passed a sigcontext so it can retrieve the PC. At some point during 2003 the glibc support was changed to receive the sigcontext through the 2nd argument, but there are still versions of libc.so in use that use the 3rd argument. Until libc.so is stabilised, pass the sigcontext through both 2nd and 3rd arguments. */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->pc = (unsigned long) ka->sa.sa_handler; regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; set_fs(USER_DS); /* Broken %016Lx */ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", signal, current->comm, current->pid, frame, regs->pc >> 32, regs->pc & 0xffffffff, DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
153,381,219,701,835,070,000,000,000,000,000,000,000
104
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
int lstat_cache_aware_rmdir(const char *path) { /* Any change in this function must be made also in `mingw_rmdir()` */ int ret = rmdir(path); if (!ret) invalidate_lstat_cache(); return ret; }
0
[ "CWE-59", "CWE-61" ]
git
684dd4c2b414bcf648505e74498a608f28de4592
268,154,461,426,149,150,000,000,000,000,000,000,000
10
checkout: fix bug that makes checkout follow symlinks in leading path Before checking out a file, we have to confirm that all of its leading components are real existing directories. And to reduce the number of lstat() calls in this process, we cache the last leading path known to contain only directories. However, when a path collision occurs (e.g. when checking out case-sensitive files in case-insensitive file systems), a cached path might have its file type changed on disk, leaving the cache on an invalid state. Normally, this doesn't bring any bad consequences as we usually check out files in index order, and therefore, by the time the cached path becomes outdated, we no longer need it anyway (because all files in that directory would have already been written). But, there are some users of the checkout machinery that do not always follow the index order. In particular: checkout-index writes the paths in the same order that they appear on the CLI (or stdin); and the delayed checkout feature -- used when a long-running filter process replies with "status=delayed" -- postpones the checkout of some entries, thus modifying the checkout order. When we have to check out an out-of-order entry and the lstat() cache is invalid (due to a previous path collision), checkout_entry() may end up using the invalid data and thrusting that the leading components are real directories when, in reality, they are not. In the best case scenario, where the directory was replaced by a regular file, the user will get an error: "fatal: unable to create file 'foo/bar': Not a directory". But if the directory was replaced by a symlink, checkout could actually end up following the symlink and writing the file at a wrong place, even outside the repository. Since delayed checkout is affected by this bug, it could be used by an attacker to write arbitrary files during the clone of a maliciously crafted repository. Some candidate solutions considered were to disable the lstat() cache during unordered checkouts or sort the entries before passing them to the checkout machinery. But both ideas include some performance penalty and they don't future-proof the code against new unordered use cases. Instead, we now manually reset the lstat cache whenever we successfully remove a directory. Note: We are not even checking whether the directory was the same as the lstat cache points to because we might face a scenario where the paths refer to the same location but differ due to case folding, precomposed UTF-8 issues, or the presence of `..` components in the path. Two regression tests, with case-collisions and utf8-collisions, are also added for both checkout-index and delayed checkout. Note: to make the previously mentioned clone attack unfeasible, it would be sufficient to reset the lstat cache only after the remove_subtree() call inside checkout_entry(). This is the place where we would remove a directory whose path collides with the path of another entry that we are currently trying to check out (possibly a symlink). However, in the interest of a thorough fix that does not leave Git open to similar-but-not-identical attack vectors, we decided to intercept all `rmdir()` calls in one fell swoop. This addresses CVE-2021-21300. Co-authored-by: Johannes Schindelin <[email protected]> Signed-off-by: Matheus Tavares <[email protected]>
bool Item_hex_constant::eq(const Item *arg, bool binary_cmp) const { if (arg->basic_const_item() && arg->type() == type() && arg->cast_to_int_type() == cast_to_int_type()) { if (binary_cmp) return !stringcmp(&str_value, &arg->str_value); return !sortcmp(&str_value, &arg->str_value, collation.collation); } return FALSE; }
0
[]
server
b000e169562697aa072600695d4f0c0412f94f4f
23,308,330,674,468,137,000,000,000,000,000,000,000
11
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) based on: commit f7316aa0c9a Author: Ajo Robert <[email protected]> Date: Thu Aug 24 17:03:21 2017 +0530 Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
svcauth_gss_destroy(SVCAUTH *auth) { struct svc_rpc_gss_data *gd; OM_uint32 min_stat; log_debug("in svcauth_gss_destroy()"); gd = SVCAUTH_PRIVATE(auth); gss_delete_sec_context(&min_stat, &gd->ctx, GSS_C_NO_BUFFER); gss_release_buffer(&min_stat, &gd->cname); gss_release_buffer(&min_stat, &gd->checksum); if (gd->client_name) gss_release_name(&min_stat, &gd->client_name); mem_free(gd, sizeof(*gd)); mem_free(auth, sizeof(*auth)); return (TRUE); }
0
[ "CWE-200" ]
krb5
5bb8a6b9c9eb8dd22bc9526751610aaa255ead9c
272,674,287,954,367,300,000,000,000,000,000,000,000
21
Fix gssrpc data leakage [CVE-2014-9423] [MITKRB5-SA-2015-001] In svcauth_gss_accept_sec_context(), do not copy bytes from the union context into the handle field we send to the client. We do not use this handle field, so just supply a fixed string of "xxxx". In gss_union_ctx_id_struct, remove the unused "interposer" field which was causing part of the union context to remain uninitialized. ticket: 8058 (new) target_version: 1.13.1 tags: pullup
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, u32 curr, const bool probe_pass) { const s32 off_min = S16_MIN, off_max = S16_MAX; s32 off = insn->off; if (curr < pos && curr + off + 1 > pos) off += delta; else if (curr > pos + delta && curr + off + 1 <= pos + delta) off -= delta; if (off < off_min || off > off_max) return -ERANGE; if (!probe_pass) insn->off = off; return 0; }
0
[ "CWE-120" ]
linux
050fad7c4534c13c8eb1d9c2ba66012e014773cb
141,157,251,856,020,550,000,000,000,000,000,000,000
16
bpf: fix truncated jump targets on heavy expansions Recently during testing, I ran into the following panic: [ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP [ 207.901637] Modules linked in: binfmt_misc [...] [ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7 [ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017 [ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO) [ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 207.992603] lr : 0xffff000000bdb754 [ 207.996080] sp : ffff000013703ca0 [ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001 [ 208.004688] x27: 0000000000000001 x26: 0000000000000000 [ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00 [ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000 [ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a [ 208.025903] x19: ffff000009578000 x18: 0000000000000a03 [ 208.031206] x17: 0000000000000000 x16: 0000000000000000 [ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000 [ 208.041813] x13: 0000000000000000 x12: 0000000000000000 [ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18 [ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000 [ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000 [ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6 [ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500 [ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08 [ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974) [ 208.086235] Call trace: [ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 208.093713] 0xffff000000bdb754 [ 208.096845] bpf_test_run+0x78/0xf8 [ 208.100324] bpf_prog_test_run_skb+0x148/0x230 [ 208.104758] sys_bpf+0x314/0x1198 [ 208.108064] el0_svc_naked+0x30/0x34 [ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680) [ 208.117717] ---[ end trace 263cb8a59b5bf29f ]--- The program itself which caused this had a long jump over the whole instruction sequence where all of the inner instructions required heavy expansions into multiple BPF instructions. Additionally, I also had BPF hardening enabled which requires once more rewrites of all constant values in order to blind them. Each time we rewrite insns, bpf_adj_branches() would need to potentially adjust branch targets which cross the patchlet boundary to accommodate for the additional delta. Eventually that lead to the case where the target offset could not fit into insn->off's upper 0x7fff limit anymore where then offset wraps around becoming negative (in s16 universe), or vice versa depending on the jump direction. Therefore it becomes necessary to detect and reject any such occasions in a generic way for native eBPF and cBPF to eBPF migrations. For the latter we can simply check bounds in the bpf_convert_filter()'s BPF_EMIT_JMP helper macro and bail out once we surpass limits. The bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case of subsequent hardening) is a bit more complex in that we need to detect such truncations before hitting the bpf_prog_realloc(). Thus the latter is split into an extra pass to probe problematic offsets on the original program in order to fail early. With that in place and carefully tested I no longer hit the panic and the rewrites are rejected properly. The above example panic I've seen on bpf-next, though the issue itself is generic in that a guard against this issue in bpf seems more appropriate in this case. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Martin KaFai Lau <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) { int net_id = tipc_netid(n->net); struct tipc_net *tn_peer; struct net *tmp; u32 hash_chk; if (n->peer_net) return; for_each_net_rcu(tmp) { tn_peer = tipc_net(tmp); if (!tn_peer) continue; /* Integrity checking whether node exists in namespace or not */ if (tn_peer->net_id != net_id) continue; if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) continue; hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); if (hash_mixes ^ hash_chk) continue; n->peer_net = tmp; n->peer_hash_mix = hash_mixes; break; } }
0
[]
linux
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
271,838,336,926,301,800,000,000,000,000,000,000,000
27
tipc: better validate user input in tipc_nl_retrieve_key() Before calling tipc_aead_key_size(ptr), we need to ensure we have enough data to dereference ptr->keylen. We probably also want to make sure tipc_aead_key_size() wont overflow with malicious ptr->keylen values. Syzbot reported: BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x21c/0x280 lib/dump_stack.c:120 kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197 __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline] genl_family_rcv_msg net/netlink/genetlink.c:783 [inline] genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800 netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494 genl_rcv+0x63/0x80 net/netlink/genetlink.c:811 netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline] netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330 netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c RIP: 0023:0xf7f60549 Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00 RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline] kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104 kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76 slab_alloc_node mm/slub.c:2907 [inline] __kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527 __kmalloc_reserve net/core/skbuff.c:142 [inline] __alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210 alloc_skb include/linux/skbuff.h:1099 [inline] netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline] netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink") Signed-off-by: Eric Dumazet <[email protected]> Cc: Tuong Lien <[email protected]> Cc: Jon Maloy <[email protected]> Cc: Ying Xue <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int router_differences_are_cosmetic(const routerinfo_t *r1, const routerinfo_t *r2) { time_t r1pub, r2pub; long time_difference; tor_assert(r1 && r2); /* r1 should be the one that was published first. */ if (r1->cache_info.published_on > r2->cache_info.published_on) { const routerinfo_t *ri_tmp = r2; r2 = r1; r1 = ri_tmp; } /* If any key fields differ, they're different. */ if (r1->addr != r2->addr || strcasecmp(r1->nickname, r2->nickname) || r1->or_port != r2->or_port || !tor_addr_eq(&r1->ipv6_addr, &r2->ipv6_addr) || r1->ipv6_orport != r2->ipv6_orport || r1->dir_port != r2->dir_port || r1->purpose != r2->purpose || !crypto_pk_eq_keys(r1->onion_pkey, r2->onion_pkey) || !crypto_pk_eq_keys(r1->identity_pkey, r2->identity_pkey) || strcasecmp(r1->platform, r2->platform) || (r1->contact_info && !r2->contact_info) || /* contact_info is optional */ (!r1->contact_info && r2->contact_info) || (r1->contact_info && r2->contact_info && strcasecmp(r1->contact_info, r2->contact_info)) || r1->is_hibernating != r2->is_hibernating || cmp_addr_policies(r1->exit_policy, r2->exit_policy) || (r1->supports_tunnelled_dir_requests != r2->supports_tunnelled_dir_requests)) return 0; if ((r1->declared_family == NULL) != (r2->declared_family == NULL)) return 0; if (r1->declared_family && r2->declared_family) { int i, n; if (smartlist_len(r1->declared_family)!=smartlist_len(r2->declared_family)) return 0; n = smartlist_len(r1->declared_family); for (i=0; i < n; ++i) { if (strcasecmp(smartlist_get(r1->declared_family, i), smartlist_get(r2->declared_family, i))) return 0; } } /* Did bandwidth change a lot? */ if ((r1->bandwidthcapacity < r2->bandwidthcapacity/2) || (r2->bandwidthcapacity < r1->bandwidthcapacity/2)) return 0; /* Did the bandwidthrate or bandwidthburst change? */ if ((r1->bandwidthrate != r2->bandwidthrate) || (r1->bandwidthburst != r2->bandwidthburst)) return 0; /* Did more than 12 hours pass? */ if (r1->cache_info.published_on + ROUTER_MAX_COSMETIC_TIME_DIFFERENCE < r2->cache_info.published_on) return 0; /* Did uptime fail to increase by approximately the amount we would think, * give or take some slop? */ r1pub = r1->cache_info.published_on; r2pub = r2->cache_info.published_on; time_difference = labs(r2->uptime - (r1->uptime + (r2pub - r1pub))); if (time_difference > ROUTER_ALLOW_UPTIME_DRIFT && time_difference > r1->uptime * .05 && time_difference > r2->uptime * .05) return 0; /* Otherwise, the difference is cosmetic. */ return 1;
1
[]
tor
1afc2ed956a35b40dfd1d207652af5b50c295da7
117,352,906,012,763,910,000,000,000,000,000,000,000
75
Fix policies.c instance of the "if (r=(a-b)) return r" pattern I think this one probably can't underflow, since the input ranges are small. But let's not tempt fate. This patch also replaces the "cmp" functions here with just "eq" functions, since nothing actually checked for anything besides 0 and nonzero. Related to 21278.
GF_Err cprt_dump(GF_Box *a, FILE * trace) { GF_CopyrightBox *p; p = (GF_CopyrightBox *)a; gf_isom_box_dump_start(a, "CopyrightBox", trace); fprintf(trace, "LanguageCode=\"%s\" CopyrightNotice=\"%s\">\n", p->packedLanguageCode, p->notice); gf_isom_box_dump_done("CopyrightBox", a, trace); return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
269,072,427,334,602,500,000,000,000,000,000,000,000
10
fixed 2 possible heap overflows (inc. #1088)
void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { if (stream.max_stream_duration_timer_) { stream.max_stream_duration_timer_->disableTimer(); stream.max_stream_duration_timer_ = nullptr; } if (stream.stream_idle_timer_ != nullptr) { stream.stream_idle_timer_->disableTimer(); stream.stream_idle_timer_ = nullptr; } stream.filter_manager_.disarmRequestTimeout(); if (stream.request_header_timer_ != nullptr) { stream.request_header_timer_->disableTimer(); stream.request_header_timer_ = nullptr; } stream.completeRequest(); stream.filter_manager_.onStreamComplete(); stream.filter_manager_.log(); stream.filter_manager_.destroyFilters(); read_callbacks_->connection().dispatcher().deferredDelete(stream.removeFromList(streams_)); // The response_encoder should never be dangling (unless we're destroying a // stream we are recreating) as the codec level stream will either outlive the // ActiveStream, or be alive in deferred deletion queue at this point. if (stream.response_encoder_) { stream.response_encoder_->getStream().removeCallbacks(stream); } if (connection_idle_timer_ && streams_.empty()) { connection_idle_timer_->enableTimer(config_.idleTimeout().value()); } }
0
[ "CWE-416" ]
envoy
148de954ed3585d8b4298b424aa24916d0de6136
186,923,623,088,204,260,000,000,000,000,000,000,000
34
CVE-2021-43825 Response filter manager crash Signed-off-by: Yan Avlasov <[email protected]>
u32 tipc_node_get_addr(struct tipc_node *node) { return (node) ? node->addr : 0; }
0
[]
linux
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
215,973,246,284,203,400,000,000,000,000,000,000,000
4
tipc: better validate user input in tipc_nl_retrieve_key() Before calling tipc_aead_key_size(ptr), we need to ensure we have enough data to dereference ptr->keylen. We probably also want to make sure tipc_aead_key_size() wont overflow with malicious ptr->keylen values. Syzbot reported: BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x21c/0x280 lib/dump_stack.c:120 kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197 __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline] genl_family_rcv_msg net/netlink/genetlink.c:783 [inline] genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800 netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494 genl_rcv+0x63/0x80 net/netlink/genetlink.c:811 netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline] netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330 netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c RIP: 0023:0xf7f60549 Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00 RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline] kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104 kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76 slab_alloc_node mm/slub.c:2907 [inline] __kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527 __kmalloc_reserve net/core/skbuff.c:142 [inline] __alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210 alloc_skb include/linux/skbuff.h:1099 [inline] netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline] netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink") Signed-off-by: Eric Dumazet <[email protected]> Cc: Tuong Lien <[email protected]> Cc: Jon Maloy <[email protected]> Cc: Ying Xue <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
set_interp_require(bool trusted) { if (trusted) { PL_ppaddr[OP_REQUIRE] = pp_require_safe; PL_ppaddr[OP_DOFILE] = pp_require_safe; } else { PL_ppaddr[OP_REQUIRE] = pp_require_orig; PL_ppaddr[OP_DOFILE] = pp_require_orig; } }
0
[ "CWE-264" ]
postgres
537cbd35c893e67a63c59bc636c3e888bd228bc7
265,607,184,043,628,230,000,000,000,000,000,000,000
13
Prevent privilege escalation in explicit calls to PL validators. The primary role of PL validators is to be called implicitly during CREATE FUNCTION, but they are also normal functions that a user can call explicitly. Add a permissions check to each validator to ensure that a user cannot use explicit validator calls to achieve things he could not otherwise achieve. Back-patch to 8.4 (all supported versions). Non-core procedural language extensions ought to make the same two-line change to their own validators. Andres Freund, reviewed by Tom Lane and Noah Misch. Security: CVE-2014-0061
static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); else value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); }
0
[ "CWE-401" ]
linux
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
234,955,456,868,767,640,000,000,000,000,000,000,000
18
net: qlogic: Fix memory leak in ql_alloc_large_buffers In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb. This skb should be released if pci_dma_mapping_error fails. Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()") Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: David S. Miller <[email protected]>
next_packet (unsigned char const **bufptr, size_t *buflen, unsigned char const **r_data, size_t *r_datalen, int *r_pkttype, size_t *r_ntotal) { const unsigned char *buf = *bufptr; size_t len = *buflen; int c, ctb, pkttype; unsigned long pktlen; if (!len) return gpg_error (GPG_ERR_NO_DATA); ctb = *buf++; len--; if ( !(ctb & 0x80) ) return gpg_error (GPG_ERR_INV_PACKET); /* Invalid CTB. */ pktlen = 0; if ((ctb & 0x40)) /* New style (OpenPGP) CTB. */ { pkttype = (ctb & 0x3f); if (!len) return gpg_error (GPG_ERR_INV_PACKET); /* No 1st length byte. */ c = *buf++; len--; if (pkttype == PKT_COMPRESSED) return gpg_error (GPG_ERR_UNEXPECTED); /* ... packet in a keyblock. */ if ( c < 192 ) pktlen = c; else if ( c < 224 ) { pktlen = (c - 192) * 256; if (!len) return gpg_error (GPG_ERR_INV_PACKET); /* No 2nd length byte. */ c = *buf++; len--; pktlen += c + 192; } else if (c == 255) { if (len <4 ) return gpg_error (GPG_ERR_INV_PACKET); /* No length bytes. */ pktlen = (*buf++) << 24; pktlen |= (*buf++) << 16; pktlen |= (*buf++) << 8; pktlen |= (*buf++); len -= 4; } else /* Partial length encoding is not allowed for key packets. */ return gpg_error (GPG_ERR_UNEXPECTED); } else /* Old style CTB. */ { int lenbytes; pktlen = 0; pkttype = (ctb>>2)&0xf; lenbytes = ((ctb&3)==3)? 0 : (1<<(ctb & 3)); if (!lenbytes) /* Not allowed in key packets. */ return gpg_error (GPG_ERR_UNEXPECTED); if (len < lenbytes) return gpg_error (GPG_ERR_INV_PACKET); /* Not enough length bytes. */ for (; lenbytes; lenbytes--) { pktlen <<= 8; pktlen |= *buf++; len--; } } /* Do some basic sanity check. */ switch (pkttype) { case PKT_SIGNATURE: case PKT_SECRET_KEY: case PKT_PUBLIC_KEY: case PKT_SECRET_SUBKEY: case PKT_MARKER: case PKT_RING_TRUST: case PKT_USER_ID: case PKT_PUBLIC_SUBKEY: case PKT_OLD_COMMENT: case PKT_ATTRIBUTE: case PKT_COMMENT: case PKT_GPG_CONTROL: break; /* Okay these are allowed packets. */ default: return gpg_error (GPG_ERR_UNEXPECTED); } if (pktlen == (unsigned long)(-1)) return gpg_error (GPG_ERR_INV_PACKET); if (pktlen > len) return gpg_error (GPG_ERR_INV_PACKET); /* Packet length header too long. */ *r_data = buf; *r_datalen = pktlen; *r_pkttype = pkttype; *r_ntotal = (buf - *bufptr) + pktlen; *bufptr = buf + pktlen; *buflen = len - pktlen; if (!*buflen) *bufptr = NULL; return 0; }
1
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
257,739,300,163,296,070,000,000,000,000,000,000,000
104
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>
static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0) goto xdr_error; if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
146,280,553,292,779,540,000,000,000,000,000,000,000
24
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
static int ZEND_FASTCALL ZEND_INIT_ARRAY_SPEC_UNUSED_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); array_init(&EX_T(opline->result.u.var).tmp_var); if (IS_UNUSED == IS_UNUSED) { ZEND_VM_NEXT_OPCODE(); #if 0 || IS_UNUSED != IS_UNUSED } else { return ZEND_ADD_ARRAY_ELEMENT_SPEC_UNUSED_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); #endif } }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
237,512,413,984,813,900,000,000,000,000,000,000,000
13
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
symbol_free (void *ptr) { symbol *sym = (symbol *)ptr; if (!sym->is_alias) sym_content_free (sym->content); free (sym); }
0
[]
bison
b7aab2dbad43aaf14eebe78d54aafa245a000988
227,659,011,811,151,600,000,000,000,000,000,000,000
8
fix: crash when redefining the EOF token Reported by Agency for Defense Development. https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html On an empty such as %token FOO BAR FOO 0 %% input: %empty we crash because when we find FOO 0, we decrement ntokens (since FOO was discovered to be EOF, which is already known to be a token, so we increment ntokens for it, and need to cancel this). This "works well" when EOF is properly defined in one go, but here it is first defined and later only assign token code 0. In the meanwhile BAR was given the token number that we just decremented. To fix this, assign symbol numbers after parsing, not during parsing, so that we also saw all the explicit token codes. To maintain the current numbers (I'd like to keep no difference in the output, not just equivalence), we need to make sure the symbols are numbered in the same order: that of appearance in the source file. So we need the locations to be correct, which was almost the case, except for nterms that appeared several times as LHS (i.e., several times as "foo: ..."). Fixing the use of location_of_lhs sufficed (it appears it was intended for this use, but its implementation was unfinished: it was always set to "false" only). * src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs. (symbol_code_set): Remove broken hack that decremented ntokens. (symbol_class_set, dummy_symbol_get): Don't set number, ntokens and nnterms. (symbol_check_defined): Do it. (symbols): Don't count nsyms here. Actually, don't count nsyms at all: let it be done in... * src/reader.c (check_and_convert_grammar): here. Define nsyms from ntokens and nnterms after parsing. * tests/input.at (EOF redeclared): New. * examples/c/bistromathic/bistromathic.test: Adjust the traces: in "%nterm <double> exp %% input: ...", exp used to be numbered before input.
MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); }
0
[ "CWE-665" ]
ImageMagick6
27b1c74979ac473a430e266ff6c4b645664bc805
192,835,350,202,540,350,000,000,000,000,000,000,000
10
https://github.com/ImageMagick/ImageMagick/issues/1522
static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, const void *buf, size_t size) { if (n->has_vnet_hdr) { /* FIXME this cast is evil */ void *wbuf = (void *)buf; work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, size - n->host_hdr_len); if (n->needs_vnet_hdr_swap) { virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); } iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); } else { struct virtio_net_hdr hdr = { .flags = 0, .gso_type = VIRTIO_NET_HDR_GSO_NONE }; iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); } }
0
[ "CWE-703" ]
qemu
abe300d9d894f7138e1af7c8e9c88c04bfe98b37
207,405,363,215,056,760,000,000,000,000,000,000,000
21
virtio-net: fix map leaking on error during receive Commit bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg") tries to fix the use after free of the sg by caching the virtqueue elements in an array and unmap them at once after receiving the packets, But it forgot to unmap the cached elements on error which will lead to leaking of mapping and other unexpected results. Fixing this by detaching the cached elements on error. This addresses CVE-2022-26353. Reported-by: Victor Tom <[email protected]> Cc: [email protected] Fixes: CVE-2022-26353 Fixes: bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg") Reviewed-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Jason Wang <[email protected]>
file_id_hasher (void const *entry, size_t table_size) { file_id const *e = entry; size_t i = e->ino + e->dev; return i % table_size; }
0
[ "CWE-22" ]
patch
685a78b6052f4df6eac6d625a545cfb54a6ac0e1
149,403,079,241,256,150,000,000,000,000,000,000,000
6
Do not let a malicious patch create files above current directory This addresses CVE-2010-4651, reported by Jakub Wilk. https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2010-4651 * src/util.c (strip_leading_slashes): Reject absolute file names and file names containing a component of "..". * tests/bad-filenames: New file. Test for this. * tests/Makefile.am (TESTS): Add it. Improvements by Andreas Gruenbacher.
normalize_uri (const gchar *address, gchar **server) { SoupURI *uri; gchar *scheme; gchar *ret; gchar *uri_string; guint std_port; uri = NULL; uri_string = NULL; scheme = g_uri_parse_scheme (address); std_port = 0; ret = NULL; if (g_strcmp0 (scheme, "http") == 0 || g_strcmp0 (scheme, "dav") == 0) /* dav(s) is used by DNS-SD and gvfs */ { uri_string = g_strdup (address); std_port = 80; } else if (g_strcmp0 (scheme, "https") == 0 || g_strcmp0 (scheme, "davs") == 0) { uri_string = g_strdup (address); std_port = 443; } else if (scheme == NULL) { uri_string = g_strconcat ("https://", address, NULL); std_port = 443; } else goto out; uri = soup_uri_new (uri_string); if (uri == NULL) goto out; if (g_strcmp0 (scheme, "dav") == 0) soup_uri_set_scheme (uri, SOUP_URI_SCHEME_HTTP); else if (g_strcmp0 (scheme, "davs") == 0) soup_uri_set_scheme (uri, SOUP_URI_SCHEME_HTTPS); if (server != NULL) { const gchar *path; gchar *port_string; guint port; path = soup_uri_get_path (uri); if (g_strcmp0 (path, "/") == 0) path = ""; port = soup_uri_get_port (uri); port_string = g_strdup_printf (":%u", port); *server = g_strconcat (soup_uri_get_host (uri), (port == std_port) ? "" : port_string, path, NULL); g_free (port_string); } ret = soup_uri_to_string (uri, FALSE); out: g_clear_pointer (&uri, (GDestroyNotify *) soup_uri_free); g_free (scheme); g_free (uri_string); return ret; }
0
[ "CWE-310" ]
gnome-online-accounts
edde7c63326242a60a075341d3fea0be0bc4d80e
228,432,327,625,280,400,000,000,000,000,000,000,000
69
Guard against invalid SSL certificates None of the branded providers (eg., Google, Facebook and Windows Live) should ever have an invalid certificate. So set "ssl-strict" on the SoupSession object being used by GoaWebView. Providers like ownCloud and Exchange might have to deal with certificates that are not up to the mark. eg., self-signed certificates. For those, show a warning when the account is being created, and only proceed if the user decides to ignore it. In any case, save the status of the certificate that was used to create the account. So an account created with a valid certificate will never work with an invalid one, and one created with an invalid certificate will not throw any further warnings. Fixes: CVE-2013-0240
static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { int err = 0; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); journal_t *journal = osb->journal->j_journal; int ret; tid_t commit_tid; bool needs_barrier = false; trace_ocfs2_sync_file(inode, file, file->f_path.dentry, OCFS2_I(inode)->ip_blkno, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name, (unsigned long long)datasync); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; err = file_write_and_wait_range(file, start, end); if (err) return err; commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid; if (journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = true; err = jbd2_complete_transaction(journal, commit_tid); if (needs_barrier) { ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (!err) err = ret; } if (err) mlog_errno(err); return (err < 0) ? -EIO : 0; }
0
[ "CWE-401" ]
linux
28f5a8a7c033cbf3e32277f4cc9c6afd74f05300
52,769,835,675,701,690,000,000,000,000,000,000,000
41
ocfs2: should wait dio before inode lock in ocfs2_setattr() we should wait dio requests to finish before inode lock in ocfs2_setattr(), otherwise the following deadlock will happen: process 1 process 2 process 3 truncate file 'A' end_io of writing file 'A' receiving the bast messages ocfs2_setattr ocfs2_inode_lock_tracker ocfs2_inode_lock_full inode_dio_wait __inode_dio_wait -->waiting for all dio requests finish dlm_proxy_ast_handler dlm_do_local_bast ocfs2_blocking_ast ocfs2_generic_handle_bast set OCFS2_LOCK_BLOCKED flag dio_end_io dio_bio_end_aio dio_complete ocfs2_dio_end_io ocfs2_dio_end_io_write ocfs2_inode_lock __ocfs2_cluster_lock ocfs2_wait_for_mask -->waiting for OCFS2_LOCK_BLOCKED flag to be cleared, that is waiting for 'process 1' unlocking the inode lock inode_dio_end -->here dec the i_dio_count, but will never be called, so a deadlock happened. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Alex Chen <[email protected]> Reviewed-by: Jun Piao <[email protected]> Reviewed-by: Joseph Qi <[email protected]> Acked-by: Changwei Ge <[email protected]> Cc: Mark Fasheh <[email protected]> Cc: Joel Becker <[email protected]> Cc: Junxiao Bi <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int mbedtls_x509_crt_verify_with_profile( mbedtls_x509_crt *crt, mbedtls_x509_crt *trust_ca, mbedtls_x509_crl *ca_crl, const mbedtls_x509_crt_profile *profile, const char *cn, uint32_t *flags, int (*f_vrfy)(void *, mbedtls_x509_crt *, int, uint32_t *), void *p_vrfy ) { size_t cn_len; int ret; int pathlen = 0, selfsigned = 0; mbedtls_x509_crt *parent; mbedtls_x509_name *name; mbedtls_x509_sequence *cur = NULL; mbedtls_pk_type_t pk_type; if( profile == NULL ) return( MBEDTLS_ERR_X509_BAD_INPUT_DATA ); *flags = 0; if( cn != NULL ) { name = &crt->subject; cn_len = strlen( cn ); if( crt->ext_types & MBEDTLS_X509_EXT_SUBJECT_ALT_NAME ) { cur = &crt->subject_alt_names; while( cur != NULL ) { if( cur->buf.len == cn_len && x509_memcasecmp( cn, cur->buf.p, cn_len ) == 0 ) break; if( cur->buf.len > 2 && memcmp( cur->buf.p, "*.", 2 ) == 0 && x509_check_wildcard( cn, &cur->buf ) == 0 ) { break; } cur = cur->next; } if( cur == NULL ) *flags |= MBEDTLS_X509_BADCERT_CN_MISMATCH; } else { while( name != NULL ) { if( MBEDTLS_OID_CMP( MBEDTLS_OID_AT_CN, &name->oid ) == 0 ) { if( name->val.len == cn_len && x509_memcasecmp( name->val.p, cn, cn_len ) == 0 ) break; if( name->val.len > 2 && memcmp( name->val.p, "*.", 2 ) == 0 && x509_check_wildcard( cn, &name->val ) == 0 ) break; } name = name->next; } if( name == NULL ) *flags |= MBEDTLS_X509_BADCERT_CN_MISMATCH; } } /* Check the type and size of the key */ pk_type = mbedtls_pk_get_type( &crt->pk ); if( x509_profile_check_pk_alg( profile, pk_type ) != 0 ) *flags |= MBEDTLS_X509_BADCERT_BAD_PK; if( x509_profile_check_key( profile, pk_type, &crt->pk ) != 0 ) *flags |= MBEDTLS_X509_BADCERT_BAD_KEY; /* Look for a parent in trusted CAs */ for( parent = trust_ca; parent != NULL; parent = parent->next ) { if( x509_crt_check_parent( crt, parent, 0, pathlen == 0 ) == 0 ) break; } if( parent != NULL ) { ret = x509_crt_verify_top( crt, parent, ca_crl, profile, pathlen, selfsigned, flags, f_vrfy, p_vrfy ); if( ret != 0 ) return( ret ); } else { /* Look for a parent upwards the chain */ for( parent = crt->next; parent != NULL; parent = parent->next ) if( x509_crt_check_parent( crt, parent, 0, pathlen == 0 ) == 0 ) break; /* Are we part of the chain or at the top? */ if( parent != NULL ) { ret = x509_crt_verify_child( crt, parent, trust_ca, ca_crl, profile, pathlen, selfsigned, flags, f_vrfy, p_vrfy ); if( ret != 0 ) return( ret ); } else { ret = x509_crt_verify_top( crt, trust_ca, ca_crl, profile, pathlen, selfsigned, flags, f_vrfy, p_vrfy ); if( ret != 0 ) return( ret ); } } if( *flags != 0 ) return( MBEDTLS_ERR_X509_CERT_VERIFY_FAILED ); return( 0 ); }
1
[ "CWE-287", "CWE-284" ]
mbedtls
d15795acd5074e0b44e71f7ede8bdfe1b48591fc
260,571,214,915,258,930,000,000,000,000,000,000,000
125
Improve behaviour on fatal errors If we didn't walk the whole chain, then there may be any kind of errors in the part of the chain we didn't check, so setting all flags looks like the safe thing to do.
ecc_mod_mul_canonical (const struct ecc_modulo *m, mp_limb_t *rp, const mp_limb_t *ap, const mp_limb_t *bp, mp_limb_t *tp) { mp_limb_t cy; mpn_mul_n (tp, ap, bp, m->size); m->reduce (m, tp + m->size, tp); cy = mpn_sub_n (rp, tp + m->size, m->m, m->size); cnd_copy (cy, rp, tp + m->size, m->size); }
0
[ "CWE-787" ]
nettle
a63893791280d441c713293491da97c79c0950fe
139,026,181,108,634,070,000,000,000,000,000,000,000
10
New functions ecc_mod_mul_canonical and ecc_mod_sqr_canonical. * ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical): New functions. * ecc-internal.h: Declare and document new functions. * curve448-eh-to-x.c (curve448_eh_to_x): Use ecc_mod_sqr_canonical. * curve25519-eh-to-x.c (curve25519_eh_to_x): Use ecc_mod_mul_canonical. * ecc-eh-to-a.c (ecc_eh_to_a): Likewise. * ecc-j-to-a.c (ecc_j_to_a): Likewise. * ecc-mul-m.c (ecc_mul_m): Likewise. (cherry picked from commit 2bf497ba4d6acc6f352bca015837fad33008565c)
create_instance( /*============*/ uint64_t interface, /*!< in: protocol version, currently always 1 */ GET_SERVER_API get_server_api, /*!< in: Callback the engines may call to get the public server interface */ ENGINE_HANDLE** handle ) /*!< out: Engine handle */ { ENGINE_ERROR_CODE err_ret; struct innodb_engine* innodb_eng; SERVER_HANDLE_V1 *api = get_server_api(); if (interface != 1 || api == NULL) { return(ENGINE_ENOTSUP); } innodb_eng = malloc(sizeof(struct innodb_engine)); if (innodb_eng == NULL) { return(ENGINE_ENOMEM); } memset(innodb_eng, 0, sizeof(*innodb_eng)); innodb_eng->engine.interface.interface = 1; innodb_eng->engine.get_info = innodb_get_info; innodb_eng->engine.initialize = innodb_initialize; innodb_eng->engine.destroy = innodb_destroy; innodb_eng->engine.allocate = innodb_allocate; innodb_eng->engine.remove = innodb_remove; innodb_eng->engine.release = innodb_release; innodb_eng->engine.clean_engine= innodb_clean_engine; innodb_eng->engine.get = innodb_get; innodb_eng->engine.get_stats = innodb_get_stats; innodb_eng->engine.reset_stats = innodb_reset_stats; innodb_eng->engine.store = innodb_store; innodb_eng->engine.arithmetic = innodb_arithmetic; innodb_eng->engine.flush = innodb_flush; innodb_eng->engine.unknown_command = innodb_unknown_command; innodb_eng->engine.item_set_cas = item_set_cas; innodb_eng->engine.get_item_info = innodb_get_item_info; innodb_eng->engine.get_stats_struct = NULL; innodb_eng->engine.errinfo = NULL; innodb_eng->engine.bind = innodb_bind; innodb_eng->server = *api; innodb_eng->get_server_api = get_server_api; /* configuration, with default values*/ innodb_eng->info.info.description = "InnoDB Memcache " VERSION; innodb_eng->info.info.num_features = 3; innodb_eng->info.info.features[0].feature = ENGINE_FEATURE_CAS; innodb_eng->info.info.features[1].feature = ENGINE_FEATURE_PERSISTENT_STORAGE; innodb_eng->info.info.features[0].feature = ENGINE_FEATURE_LRU; /* Now call create_instace() for the default engine */ err_ret = create_my_default_instance(interface, get_server_api, &(innodb_eng->default_engine)); if (err_ret != ENGINE_SUCCESS) { free(innodb_eng); return(err_ret); } innodb_eng->clean_stale_conn = false; innodb_eng->initialized = true; *handle = (ENGINE_HANDLE*) &innodb_eng->engine; return(ENGINE_SUCCESS); }
0
[]
mysql-server
659514dc83299a7d8c7defeb543be4339fbe1ee1
15,892,018,649,331,350,000,000,000,000,000,000,000
73
Bug #25147515 SET DAEMON_MEMCACHED_R_BATCH_SIZE GREATER THAN 1 MAKE MYSQLD CRASHED PROBLEM ------- User starts a "get" the the connection with SET DAEMON_MEMCACHED_R_BATCH_SIZE= 5 and with binlog on. Since "get" is a read transaction this connection is not allocated any conn_data->thd (which is used for bin log commit).The connection is kept open. Innodb background thread tries to commit transactions which have exceed CONN_IDLE_TIME_TO_BK_COMMIT and whose read batch size > 0, when it finds this connection it tries to attach conn_data->thd to bin log thread.Since conn_data->thd is NULL it crashes. FIX --- Check conn_data->thd value before attaching it to binlog thread.
static int smack_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) { struct socket_smack *ssp; struct socket *sock; struct super_block *sbp; struct inode *ip = (struct inode *)inode; char *isp; int ilen; int rc = 0; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { isp = smk_of_inode(inode); ilen = strlen(isp) + 1; *buffer = isp; return ilen; } /* * The rest of the Smack xattrs are only on sockets. */ sbp = ip->i_sb; if (sbp->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(ip); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) isp = ssp->smk_in; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) isp = ssp->smk_out; else return -EOPNOTSUPP; ilen = strlen(isp) + 1; if (rc == 0) { *buffer = isp; rc = ilen; } return rc; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
220,710,989,559,537,540,000,000,000,000,000,000,000
47
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sx, sy)) { return NULL; } if (overflow2(sizeof (unsigned char *), sy)) { return NULL; } if (overflow2(sizeof (unsigned char), sx)) { return NULL; } im = (gdImage *) gdCalloc(1, sizeof(gdImage)); if (!im) { return NULL; } /* Row-major ever since gd 1.3 */ im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy); if (!im->pixels) { gdFree(im); return NULL; } im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; (i < sy); i++) { /* Row-major ever since gd 1.3 */ im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char)); if (!im->pixels[i]) { for (--i ; i >= 0; i--) { gdFree(im->pixels[i]); } gdFree(im->pixels); gdFree(im); return NULL; } } im->sx = sx; im->sy = sy; im->colorsTotal = 0; im->transparent = (-1); im->interlace = 0; im->thick = 1; im->AA = 0; for (i = 0; (i < gdMaxColors); i++) { im->open[i] = 1; }; im->trueColor = 0; im->tpixels = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->res_x = GD_RESOLUTION; im->res_y = GD_RESOLUTION; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; }
1
[ "CWE-20" ]
libgd
1846f48e5fcdde996e7c27a4bbac5d0aef183e4b
35,894,093,252,501,957,000,000,000,000,000,000,000
67
Fix #340: System frozen gdImageCreate() doesn't check for oversized images and as such is prone to DoS vulnerabilities. We fix that by applying the same overflow check that is already in place for gdImageCreateTrueColor(). CVE-2016-9317
static void setup_struct_phy_for_init(struct b43_wldev *dev, struct b43_phy *phy) { phy->hardware_power_control = !!modparam_hwpctl; phy->next_txpwr_check_time = jiffies; /* PHY TX errors counter. */ atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); #if B43_DEBUG phy->phy_locked = false; phy->radio_locked = false; #endif }
0
[ "CWE-134" ]
wireless
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
217,603,853,052,052,850,000,000,000,000,000,000,000
13
b43: stop format string leaking into error msgs The module parameter "fwpostfix" is userspace controllable, unfiltered, and is used to define the firmware filename. b43_do_request_fw() populates ctx->errors[] on error, containing the firmware filename. b43err() parses its arguments as a format string. For systems with b43 hardware, this could lead to a uid-0 to ring-0 escalation. CVE-2013-2852 Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: John W. Linville <[email protected]>
static void ov6xx0_configure(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc; gspca_dbg(gspca_dev, D_PROBE, "starting OV6xx0 configuration\n"); /* Detect sensor (sub)type */ rc = i2c_r(sd, OV7610_REG_COM_I); if (rc < 0) { gspca_err(gspca_dev, "Error detecting sensor type\n"); return; } /* Ugh. The first two bits are the version bits, but * the entire register value must be used. I guess OVT * underestimated how many variants they would make. */ switch (rc) { case 0x00: sd->sensor = SEN_OV6630; pr_warn("WARNING: Sensor is an OV66308. Your camera may have been misdetected in previous driver versions.\n"); break; case 0x01: sd->sensor = SEN_OV6620; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV6620\n"); break; case 0x02: sd->sensor = SEN_OV6630; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AE\n"); break; case 0x03: sd->sensor = SEN_OV66308AF; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AF\n"); break; case 0x90: sd->sensor = SEN_OV6630; pr_warn("WARNING: Sensor is an OV66307. Your camera may have been misdetected in previous driver versions.\n"); break; default: gspca_err(gspca_dev, "FATAL: Unknown sensor version: 0x%02x\n", rc); return; } /* Set sensor-specific vars */ sd->sif = 1; }
0
[ "CWE-476" ]
linux
998912346c0da53a6dbb71fab3a138586b596b30
178,564,232,116,790,760,000,000,000,000,000,000,000
47
media: ov519: add missing endpoint sanity checks Make sure to check that we have at least one endpoint before accessing the endpoint array to avoid dereferencing a NULL-pointer on stream start. Note that these sanity checks are not redundant as the driver is mixing looking up altsettings by index and by number, which need not coincide. Fixes: 1876bb923c98 ("V4L/DVB (12079): gspca_ov519: add support for the ov511 bridge") Fixes: b282d87332f5 ("V4L/DVB (12080): gspca_ov519: Fix ov518+ with OV7620AE (Trust spacecam 320)") Cc: stable <[email protected]> # 2.6.31 Cc: Hans de Goede <[email protected]> Signed-off-by: Johan Hovold <[email protected]> Signed-off-by: Hans Verkuil <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
int app_isdir(const char *name) { #if defined(S_ISDIR) struct stat st; if (stat(name,&st)==0) return S_ISDIR(st.st_mode); else return -1; #else return -1; #endif }
0
[]
openssl
a70da5b3ecc3160368529677006801c58cb369db
255,493,206,871,876,460,000,000,000,000,000,000,000
11
New functions to check a hostname email or IP address against a certificate. Add options to s_client, s_server and x509 utilities to print results of checks.
_XimCheckNestQuarkList(quark_list, num_quark, quark, separator) XrmQuark *quark_list; int num_quark; XrmQuark quark; XrmQuark separator; { register int i; for (i = 0; i < num_quark; i++) { if (quark_list[i] == separator) { break; } if (quark_list[i] == quark) { return True; } } return False; }
0
[ "CWE-190" ]
libx11
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
4,093,960,330,223,953,000,000,000,000,000,000,000
18
Zero out buffers in functions It looks like uninitialized stack or heap memory can leak out via padding bytes. Signed-off-by: Matthieu Herrb <[email protected]> Reviewed-by: Matthieu Herrb <[email protected]>
vhost_user_set_postcopy_advise(struct virtio_net **pdev, struct VhostUserMsg *msg, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; #ifdef RTE_LIBRTE_VHOST_POSTCOPY struct uffdio_api api_struct; if (validate_msg_fds(msg, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (dev->postcopy_ufd == -1) { RTE_LOG(ERR, VHOST_CONFIG, "Userfaultfd not available: %s\n", strerror(errno)); return RTE_VHOST_MSG_RESULT_ERR; } api_struct.api = UFFD_API; api_struct.features = 0; if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { RTE_LOG(ERR, VHOST_CONFIG, "UFFDIO_API ioctl failure: %s\n", strerror(errno)); close(dev->postcopy_ufd); dev->postcopy_ufd = -1; return RTE_VHOST_MSG_RESULT_ERR; } msg->fds[0] = dev->postcopy_ufd; msg->fd_num = 1; return RTE_VHOST_MSG_RESULT_REPLY; #else dev->postcopy_ufd = -1; msg->fd_num = 0; return RTE_VHOST_MSG_RESULT_ERR; #endif }
0
[]
dpdk
bf472259dde6d9c4dd3ebad2c2b477a168c6e021
263,117,227,643,002,200,000,000,000,000,000,000,000
38
vhost: fix possible denial of service by leaking FDs A malicious Vhost-user master could send in loop hand-crafted vhost-user messages containing more file descriptors the vhost-user slave expects. Doing so causes the application using the vhost-user library to run out of FDs. This issue has been assigned CVE-2019-14818 Fixes: 8f972312b8f4 ("vhost: support vhost-user") Signed-off-by: Maxime Coquelin <[email protected]>
static void hardware_disable(void *garbage) { if (vmm_exclusive) { vmclear_local_vcpus(); kvm_cpu_vmxoff(); } write_cr4(read_cr4() & ~X86_CR4_VMXE); }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
17,596,370,579,229,583,000,000,000,000,000,000,000
8
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
sctp_disposition_t sctp_sf_ignore_primitive( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { SCTP_DEBUG_PRINTK("Primitive type %d is ignored.\n", type.primitive); return SCTP_DISPOSITION_DISCARD; }
0
[ "CWE-20" ]
linux-2.6
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
26,206,293,896,036,323,000,000,000,000,000,000,000
10
sctp: Fix kernel panic while process protocol violation parameter Since call to function sctp_sf_abort_violation() need paramter 'arg' with 'struct sctp_chunk' type, it will read the chunk type and chunk length from the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen() always with 'struct sctp_paramhdr' type's parameter, it will be passed to sctp_sf_abort_violation(). This may cause kernel panic. sctp_sf_violation_paramlen() |-- sctp_sf_abort_violation() |-- sctp_make_abort_violation() This patch fixed this problem. This patch also fix two place which called sctp_sf_violation_paramlen() with wrong paramter type. Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int parse_uid_range(const char *s, uid_t *ret_lower, uid_t *ret_upper) { uint32_t u, l; int r; assert(s); assert(ret_lower); assert(ret_upper); r = parse_range(s, &l, &u); if (r < 0) return r; if (l > u) return -EINVAL; if (!uid_is_valid(l) || !uid_is_valid(u)) return -ENXIO; *ret_lower = l; *ret_upper = u; return 0; }
0
[ "CWE-269" ]
systemd
156a5fd297b61bce31630d7a52c15614bf784843
302,426,538,269,348,860,000,000,000,000,000,000,000
22
basic/user-util: always use base 10 for user/group numbers We would parse numbers with base prefixes as user identifiers. For example, "0x2b3bfa0" would be interpreted as UID==45334432 and "01750" would be interpreted as UID==1000. This parsing was used also in cases where either a user/group name or number may be specified. This means that names like 0x2b3bfa0 would be ambiguous: they are a valid user name according to our documented relaxed rules, but they would also be parsed as numeric uids. This behaviour is definitely not expected by users, since tools generally only accept decimal numbers (e.g. id, getent passwd), while other tools only accept user names and thus will interpret such strings as user names without even attempting to convert them to numbers (su, ssh). So let's follow suit and only accept numbers in decimal notation. Effectively this means that we will reject such strings as a username/uid/groupname/gid where strict mode is used, and try to look up a user/group with such a name in relaxed mode. Since the function changed is fairly low-level and fairly widely used, this affects multiple tools: loginctl show-user/enable-linger/disable-linger foo', the third argument in sysusers.d, fourth and fifth arguments in tmpfiles.d, etc. Fixes #15985.
void (*SSL_get_info_callback(const SSL *ssl))(const SSL * /*ssl*/,int /*type*/,int /*val*/) { return ssl->info_callback; }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
78,125,291,047,490,190,000,000,000,000,000,000,000
4
Add Next Protocol Negotiation.
void adapter_shutdown(void) { GList *list; DBG(""); powering_down = true; for (list = g_list_first(adapter_list); list; list = g_list_next(list)) { struct btd_adapter *adapter = list->data; if (!(adapter->current_settings & MGMT_SETTING_POWERED)) continue; clear_discoverable(adapter); remove_temporary_devices(adapter); set_mode(adapter, MGMT_OP_SET_POWERED, 0x00); adapter_remaining++; } if (!adapter_remaining) btd_exit(); }
0
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
213,189,793,174,662,980,000,000,000,000,000,000,000
25
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; u32 device_id; device_id = ops->get_device_id(vdpa); if (copy_to_user(argp, &device_id, sizeof(device_id))) return -EFAULT; return 0; }
0
[ "CWE-416" ]
linux
f6bbf0010ba004f5e90c7aefdebc0ee4bd3283b9
44,250,520,279,261,790,000,000,000,000,000,000,000
13
vhost-vdpa: fix use-after-free of v->config_ctx When the 'v->config_ctx' eventfd_ctx reference is released we didn't set it to NULL. So if the same character device (e.g. /dev/vhost-vdpa-0) is re-opened, the 'v->config_ctx' is invalid and calling again vhost_vdpa_config_put() causes use-after-free issues like the following refcount_t underflow: refcount_t: underflow; use-after-free. WARNING: CPU: 2 PID: 872 at lib/refcount.c:28 refcount_warn_saturate+0xae/0xf0 RIP: 0010:refcount_warn_saturate+0xae/0xf0 Call Trace: eventfd_ctx_put+0x5b/0x70 vhost_vdpa_release+0xcd/0x150 [vhost_vdpa] __fput+0x8e/0x240 ____fput+0xe/0x10 task_work_run+0x66/0xa0 exit_to_user_mode_prepare+0x118/0x120 syscall_exit_to_user_mode+0x21/0x50 ? __x64_sys_close+0x12/0x40 do_syscall_64+0x45/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: 776f395004d8 ("vhost_vdpa: Support config interrupt in vdpa") Cc: [email protected] Cc: [email protected] Signed-off-by: Stefano Garzarella <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Zhu Lingshan <[email protected]> Acked-by: Jason Wang <[email protected]>
libssh2_userauth_publickey_fromfile_ex(LIBSSH2_SESSION *session, const char *user, unsigned int user_len, const char *publickey, const char *privatekey, const char *passphrase) { int rc; if(NULL == passphrase) /* if given a NULL pointer, make it point to a zero-length string to save us from having to check this all over */ passphrase = ""; BLOCK_ADJUST(rc, session, userauth_publickey_fromfile(session, user, user_len, publickey, privatekey, passphrase)); return rc; }
0
[ "CWE-787" ]
libssh2
dc109a7f518757741590bb993c0c8412928ccec2
307,406,705,976,414,020,000,000,000,000,000,000,000
20
Security fixes (#315) * Bounds checks Fixes for CVEs https://www.libssh2.org/CVE-2019-3863.html https://www.libssh2.org/CVE-2019-3856.html * Packet length bounds check CVE https://www.libssh2.org/CVE-2019-3855.html * Response length check CVE https://www.libssh2.org/CVE-2019-3859.html * Bounds check CVE https://www.libssh2.org/CVE-2019-3857.html * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html and additional data validation * Check bounds before reading into buffers * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html * declare SIZE_MAX and UINT_MAX if needed
static void cleanup_single_sta(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; __cleanup_single_sta(sta); sta_info_free(local, sta); }
0
[ "CWE-287" ]
linux
3e493173b7841259a08c5c8e5cbe90adb349da7e
86,935,998,469,131,900,000,000,000,000,000,000,000
8
mac80211: Do not send Layer 2 Update frame before authorization The Layer 2 Update frame is used to update bridges when a station roams to another AP even if that STA does not transmit any frames after the reassociation. This behavior was described in IEEE Std 802.11F-2003 as something that would happen based on MLME-ASSOCIATE.indication, i.e., before completing 4-way handshake. However, this IEEE trial-use recommended practice document was published before RSN (IEEE Std 802.11i-2004) and as such, did not consider RSN use cases. Furthermore, IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been maintained amd should not be used anymore. Sending out the Layer 2 Update frame immediately after association is fine for open networks (and also when using SAE, FT protocol, or FILS authentication when the station is actually authenticated by the time association completes). However, it is not appropriate for cases where RSN is used with PSK or EAP authentication since the station is actually fully authenticated only once the 4-way handshake completes after authentication and attackers might be able to use the unauthenticated triggering of Layer 2 Update frame transmission to disrupt bridge behavior. Fix this by postponing transmission of the Layer 2 Update frame from station entry addition to the point when the station entry is marked authorized. Similarly, send out the VLAN binding update only if the STA entry has already been authorized. Signed-off-by: Jouni Malinen <[email protected]> Reviewed-by: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
*/ void xmlXPathContainsFunction(xmlXPathParserContextPtr ctxt, int nargs) { xmlXPathObjectPtr hay, needle; CHECK_ARITY(2); CAST_TO_STRING; CHECK_TYPE(XPATH_STRING); needle = valuePop(ctxt); CAST_TO_STRING; hay = valuePop(ctxt); if ((hay == NULL) || (hay->type != XPATH_STRING)) { xmlXPathReleaseObject(ctxt->context, hay); xmlXPathReleaseObject(ctxt->context, needle); XP_ERROR(XPATH_INVALID_TYPE); } if (xmlStrstr(hay->stringval, needle->stringval)) valuePush(ctxt, xmlXPathCacheNewBoolean(ctxt->context, 1)); else valuePush(ctxt, xmlXPathCacheNewBoolean(ctxt->context, 0)); xmlXPathReleaseObject(ctxt->context, hay);
0
[ "CWE-119" ]
libxml2
91d19754d46acd4a639a8b9e31f50f31c78f8c9c
79,393,241,444,478,530,000,000,000,000,000,000,000
22
Fix the semantic of XPath axis for namespace/attribute context nodes The processing of namespace and attributes nodes was not compliant to the XPath-1.0 specification
GF_Err ftab_box_dump(GF_Box *a, FILE * trace) { u32 i; GF_FontTableBox *p = (GF_FontTableBox *)a; gf_isom_box_dump_start(a, "FontTableBox", trace); gf_fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) { gf_fprintf(trace, "<FontRecord ID=\"%d\" name=\"%s\"/>\n", p->fonts[i].fontID, p->fonts[i].fontName ? p->fonts[i].fontName : "NULL"); } if (!p->size) { gf_fprintf(trace, "<FontRecord ID=\"\" name=\"\"/>\n"); } gf_isom_box_dump_done("FontTableBox", a, trace); return GF_OK; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
71,912,800,788,964,980,000,000,000,000,000,000,000
15
fixed #2138
cmd_http_expect(CMD_ARGS) { struct http *hp; const char *lhs; char *cmp; const char *rhs; (void)cmd; (void)vl; CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC); assert(!strcmp(av[0], "expect")); av++; AN(av[0]); AN(av[1]); AN(av[2]); AZ(av[3]); lhs = cmd_var_resolve(hp, av[0]); cmp = av[1]; rhs = cmd_var_resolve(hp, av[2]); if (!strcmp(cmp, "==")) { if (strcmp(lhs, rhs)) vtc_log(hp->vl, 0, "EXPECT %s (%s) %s %s (%s) failed", av[0], lhs, av[1], av[2], rhs); else vtc_log(hp->vl, 4, "EXPECT %s (%s) %s %s (%s) match", av[0], lhs, av[1], av[2], rhs); } else if (!strcmp(cmp, "!=")) { if (!strcmp(lhs, rhs)) vtc_log(hp->vl, 0, "EXPECT %s (%s) %s %s (%s) failed", av[0], lhs, av[1], av[2], rhs); else vtc_log(hp->vl, 4, "EXPECT %s (%s) %s %s (%s) match", av[0], lhs, av[1], av[2], rhs); } else { vtc_log(hp->vl, 0, "EXPECT %s (%s) %s %s (%s) test not implemented", av[0], lhs, av[1], av[2], rhs); } }
0
[ "CWE-269" ]
Varnish-Cache
85e8468bec9416bd7e16b0d80cb820ecd2b330c3
252,562,304,075,169,400,000,000,000,000,000,000,000
40
Do not consider a CR by itself as a valid line terminator Varnish (prior to version 4.0) was not following the standard with regard to line separator. Spotted and analyzed by: Régis Leroy [regilero] [email protected]
lyd_new_output_leaf(struct lyd_node *parent, const struct lys_module *module, const char *name, const char *val_str) { const struct lys_node *snode = NULL, *siblings; if ((!parent && !module) || !name) { LOGARG; return NULL; } siblings = lyd_new_find_schema(parent, module, 1); if (!siblings) { LOGARG; return NULL; } if (lys_getnext_data(module, lys_parent(siblings), name, strlen(name), LYS_LEAFLIST | LYS_LEAF, &snode) || !snode) { LOGERR(siblings->module->ctx, LY_EINVAL, "Failed to find \"%s\" as a sibling to \"%s:%s\".", name, lys_node_module(siblings)->name, siblings->name); return NULL; } return _lyd_new_leaf(parent, snode, val_str, 0, 0); }
1
[ "CWE-119" ]
libyang
32fb4993bc8bb49e93e84016af3c10ea53964be5
177,434,104,804,178,500,000,000,000,000,000,000,000
23
schema tree BUGFIX do not check features while still resolving schema Fixes #723
arrangeCursor(Buffer *buf) { int col, col2, pos; int delta = 1; if (buf == NULL || buf->currentLine == NULL) return; /* Arrange line */ if (buf->currentLine->linenumber - buf->topLine->linenumber >= buf->LINES || buf->currentLine->linenumber < buf->topLine->linenumber) { /* * buf->topLine = buf->currentLine; */ buf->topLine = lineSkip(buf, buf->currentLine, 0, FALSE); } /* Arrange column */ while (buf->pos < 0 && buf->currentLine->prev && buf->currentLine->bpos) { pos = buf->pos + buf->currentLine->prev->len; cursorUp0(buf, 1); buf->pos = pos; } while (buf->pos >= buf->currentLine->len && buf->currentLine->next && buf->currentLine->next->bpos) { pos = buf->pos - buf->currentLine->len; cursorDown0(buf, 1); buf->pos = pos; } if (buf->currentLine->len == 0 || buf->pos < 0) buf->pos = 0; else if (buf->pos >= buf->currentLine->len) buf->pos = buf->currentLine->len - 1; #ifdef USE_M17N while (buf->pos > 0 && buf->currentLine->propBuf[buf->pos] & PC_WCHAR2) buf->pos--; #endif col = COLPOS(buf->currentLine, buf->pos); #ifdef USE_M17N while (buf->pos + delta < buf->currentLine->len && buf->currentLine->propBuf[buf->pos + delta] & PC_WCHAR2) delta++; #endif col2 = COLPOS(buf->currentLine, buf->pos + delta); if (col < buf->currentColumn || col2 > buf->COLS + buf->currentColumn) { buf->currentColumn = 0; if (col2 > buf->COLS) columnSkip(buf, col); } /* Arrange cursor */ buf->cursorY = buf->currentLine->linenumber - buf->topLine->linenumber; buf->visualpos = buf->currentLine->bwidth + COLPOS(buf->currentLine, buf->pos) - buf->currentColumn; buf->cursorX = buf->visualpos - buf->currentLine->bwidth; #ifdef DISPLAY_DEBUG fprintf(stderr, "arrangeCursor: column=%d, cursorX=%d, visualpos=%d, pos=%d, len=%d\n", buf->currentColumn, buf->cursorX, buf->visualpos, buf->pos, buf->currentLine->len); #endif }
0
[ "CWE-119" ]
w3m
0c3f5d0e0d9269ad47b8f4b061d7818993913189
179,563,537,981,172,700,000,000,000,000,000,000,000
58
Prevent array index out of bounds for symbol Bug-Debian: https://github.com/tats/w3m/issues/38
static int airo_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); if (vwrq->disabled) { if (sniffing_mode(local)) return -EINVAL; local->config.powerSaveMode = POWERSAVE_CAM; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { local->config.fastListenDelay = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { local->config.fastListenInterval = local->config.listenInterval = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } switch (vwrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_ADDR; set_bit (FLAG_COMMIT, &local->flags); break; case IW_POWER_ALL_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); case IW_POWER_ON: /* This is broken, fixme ;-) */ break; default: return -EINVAL; } // Note : we may want to factor local->need_commit here // Note2 : may also want to factor RXMODE_RFMON test return -EINPROGRESS; /* Call commit handler */ }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
314,454,502,740,786,200,000,000,000,000,000,000,000
52
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
encode_ofpact(const struct ofpact *a, enum ofp_version ofp_version, struct ofpbuf *out) { switch (a->type) { #define OFPACT(ENUM, STRUCT, MEMBER, NAME) \ case OFPACT_##ENUM: \ encode_##ENUM(ofpact_get_##ENUM(a), ofp_version, out); \ return; OFPACTS #undef OFPACT default: OVS_NOT_REACHED(); } }
0
[ "CWE-125" ]
ovs
9237a63c47bd314b807cda0bd2216264e82edbe8
347,542,977,678,789,500,000,000,000,000,000,000
14
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
int handler::ha_external_lock(THD *thd, int lock_type) { int error; DBUG_ENTER("handler::ha_external_lock"); /* Whether this is lock or unlock, this should be true, and is to verify that if get_auto_increment() was called (thus may have reserved intervals or taken a table lock), ha_release_auto_increment() was too. */ DBUG_ASSERT(next_insert_id == 0); /* Consecutive calls for lock without unlocking in between is not allowed */ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || ((lock_type != F_UNLCK && m_lock_type == F_UNLCK) || lock_type == F_UNLCK)); /* SQL HANDLER call locks/unlock while scanning (RND/INDEX). */ DBUG_ASSERT(inited == NONE || table->open_by_handler); if (MYSQL_HANDLER_RDLOCK_START_ENABLED() || MYSQL_HANDLER_WRLOCK_START_ENABLED() || MYSQL_HANDLER_UNLOCK_START_ENABLED()) { if (lock_type == F_RDLCK) { MYSQL_HANDLER_RDLOCK_START(table_share->db.str, table_share->table_name.str); } else if (lock_type == F_WRLCK) { MYSQL_HANDLER_WRLOCK_START(table_share->db.str, table_share->table_name.str); } else if (lock_type == F_UNLCK) { MYSQL_HANDLER_UNLOCK_START(table_share->db.str, table_share->table_name.str); } } /* We cache the table flags if the locking succeeded. Otherwise, we keep them as they were when they were fetched in ha_open(). */ MYSQL_TABLE_LOCK_WAIT(m_psi, PSI_TABLE_EXTERNAL_LOCK, lock_type, { error= external_lock(thd, lock_type); }) DBUG_EXECUTE_IF("external_lock_failure", error= HA_ERR_GENERIC;); if (likely(error == 0 || lock_type == F_UNLCK)) { m_lock_type= lock_type; cached_table_flags= table_flags(); if (table_share->tmp_table == NO_TMP_TABLE) mysql_audit_external_lock(thd, table_share, lock_type); } if (MYSQL_HANDLER_RDLOCK_DONE_ENABLED() || MYSQL_HANDLER_WRLOCK_DONE_ENABLED() || MYSQL_HANDLER_UNLOCK_DONE_ENABLED()) { if (lock_type == F_RDLCK) { MYSQL_HANDLER_RDLOCK_DONE(error); } else if (lock_type == F_WRLCK) { MYSQL_HANDLER_WRLOCK_DONE(error); } else if (lock_type == F_UNLCK) { MYSQL_HANDLER_UNLOCK_DONE(error); } } DBUG_RETURN(error); }
0
[ "CWE-416" ]
server
af810407f78b7f792a9bb8c47c8c532eb3b3a758
33,587,311,582,392,150,000,000,000,000,000,000,000
74
MDEV-28098 incorrect key in "dup value" error after long unique reset errkey after using it, so that it wouldn't affect the next error message in the next statement
static Object find_node(CVarRef children, xmlNodePtr node) { for (ArrayIter iter(children.toArray()); iter; ++iter) { if (iter.second().isObject()) { c_SimpleXMLElement *elem = iter.second().toObject().getTyped<c_SimpleXMLElement>(); Object ret = elem->m_node == node ? elem : find_node(elem->m_children, node); if (!ret.isNull()) return ret; } else if(iter.second().isArray()) { Object ret = find_node(iter.second(), node); if (!ret.isNull()) return ret; } } return nullptr; }
0
[ "CWE-94" ]
hhvm
95f96e7287effe2fcdfb9a5338d1a7e4f55b083b
188,927,172,130,252,200,000,000,000,000,000,000,000
15
Fix libxml_disable_entity_loader() This wasn't calling requestInit and setting the libxml handler no null. So the first time an error came along it would reset the handler from no-op to reading again. This is a much better fix, we set our custom handler in requestInit and when libxml_disable_entity_loader we store that state as a member bool ensuring requestInit is always called to set our own handler. If the handler isn't inserted then the behavious is as before. The only time this could go pear shaped is say we wanted to make the default be off. In that case we'd need a global requestInit that is always called since there are libxml references everywhere. Reviewed By: @jdelong Differential Revision: D1116686
static int ZEND_FASTCALL ZEND_JMPZNZ_SPEC_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1; zval *val = _get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC); int retval; if (IS_TMP_VAR == IS_TMP_VAR && Z_TYPE_P(val) == IS_BOOL) { retval = Z_LVAL_P(val); } else { retval = i_zend_is_true(val); zval_dtor(free_op1.var); if (UNEXPECTED(EG(exception) != NULL)) { ZEND_VM_CONTINUE(); } } if (EXPECTED(retval != 0)) { #if DEBUG_ZEND>=2 printf("Conditional jmp on true to %d\n", opline->extended_value); #endif ZEND_VM_SET_OPCODE(&EX(op_array)->opcodes[opline->extended_value]); ZEND_VM_CONTINUE(); /* CHECK_ME */ } else { #if DEBUG_ZEND>=2 printf("Conditional jmp on false to %d\n", opline->op2.u.opline_num); #endif ZEND_VM_SET_OPCODE(&EX(op_array)->opcodes[opline->op2.u.opline_num]); ZEND_VM_CONTINUE(); /* CHECK_ME */ } }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
120,134,862,525,934,420,000,000,000,000,000,000,000
30
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static void qemu_chr_close_stdio(struct CharDriverState *chr) { term_exit(); fd_chr_close(chr); }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
69,872,968,998,237,800,000,000,000,000,000,000,000
5
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void print_serial(sc_card_t *in_card) { int r; sc_serial_number_t serial; r = sc_lock(card); if (r == SC_SUCCESS) r = sc_card_ctl(in_card, SC_CARDCTL_GET_SERIALNR, &serial); sc_unlock(card); if (r) fprintf(stderr, "sc_card_ctl(*, SC_CARDCTL_GET_SERIALNR, *) failed\n"); else util_hex_dump_asc(stdout, serial.value, serial.len, -1); }
0
[ "CWE-125" ]
OpenSC
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
79,639,957,265,641,910,000,000,000,000,000,000,000
14
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { long rem_load_move = max_load_move; int busiest_cpu = cpu_of(busiest); struct task_group *tg; rcu_read_lock(); list_for_each_entry(tg, &task_groups, list) { long imbalance; unsigned long this_weight, busiest_weight; long rem_load, max_load, moved_load; /* * empty group */ if (!aggregate(tg, sd)->task_weight) continue; rem_load = rem_load_move * aggregate(tg, sd)->rq_weight; rem_load /= aggregate(tg, sd)->load + 1; this_weight = tg->cfs_rq[this_cpu]->task_weight; busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; imbalance = (busiest_weight - this_weight) / 2; if (imbalance < 0) imbalance = busiest_weight; max_load = max(rem_load, imbalance); moved_load = __load_balance_fair(this_rq, this_cpu, busiest, max_load, sd, idle, all_pinned, this_best_prio, tg->cfs_rq[busiest_cpu]); if (!moved_load) continue; move_group_shares(tg, sd, busiest_cpu, this_cpu); moved_load *= aggregate(tg, sd)->load; moved_load /= aggregate(tg, sd)->rq_weight + 1; rem_load_move -= moved_load; if (rem_load_move < 0) break; } rcu_read_unlock(); return max_load_move - rem_load_move; }
0
[]
linux-2.6
ac884dec6d4a7df252150af875cffddf8f1d9c15
280,430,966,476,784,970,000,000,000,000,000,000,000
53
sched: fair-group scheduling vs latency Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
ptvcursor_set_tree(ptvcursor_t *ptvc, proto_tree *tree) { ptvc->tree = tree; }
0
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
302,965,354,312,157,640,000,000,000,000,000,000,000
4
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
void dev_printk(const char *level, const struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; __dev_printk(level, dev, &vaf); va_end(args); }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
191,675,236,759,438,230,000,000,000,000,000,000,000
15
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
gst_mpegts_section_new (guint16 pid, guint8 * data, gsize data_size) { GstMpegtsSection *res = NULL; guint8 tmp; guint8 table_id; guint16 section_length; /* Check for length */ section_length = GST_READ_UINT16_BE (data + 1) & 0x0FFF; if (G_UNLIKELY (data_size < section_length + 3)) goto short_packet; /* Table id is in first byte */ table_id = *data; res = _gst_mpegts_section_init (pid, table_id); res->data = data; /* table_id (already parsed) : 8 bit */ data++; /* section_syntax_indicator : 1 bit * other_fields (reserved) : 3 bit*/ res->short_section = (*data & 0x80) == 0x00; /* section_length (already parsed) : 12 bit */ res->section_length = section_length + 3; if (!res->short_section) { /* CRC is after section_length (-4 for the size of the CRC) */ res->crc = GST_READ_UINT32_BE (res->data + res->section_length - 4); /* Skip to after section_length */ data += 2; /* subtable extension : 16 bit */ res->subtable_extension = GST_READ_UINT16_BE (data); data += 2; /* reserved : 2 bit * version_number : 5 bit * current_next_indicator : 1 bit */ tmp = *data++; res->version_number = tmp >> 1 & 0x1f; res->current_next_indicator = tmp & 0x01; /* section_number : 8 bit */ res->section_number = *data++; /* last_section_number : 8 bit */ res->last_section_number = *data; } return res; short_packet: { GST_WARNING ("PID 0x%04x section extends past provided data (got:%" G_GSIZE_FORMAT ", need:%d)", pid, data_size, section_length + 3); g_free (data); return NULL; } }
1
[ "CWE-125" ]
gst-plugins-bad
d58f668ece8795bddb3316832e1848c7b7cf38ac
214,999,627,840,689,900,000,000,000,000,000,000,000
56
mpegtssection: Add more section size checks The smallest section ever needs to be at least 3 bytes (i.e. just the short header). Non-short headers need to be at least 11 bytes long (3 for the minimum header, 5 for the non-short header, and 4 for the CRC). https://bugzilla.gnome.org/show_bug.cgi?id=775048
static void pb_release_single_field(pb_field_iter_t *field) { pb_type_t type; type = field->type; if (PB_HTYPE(type) == PB_HTYPE_ONEOF) { if (*(pb_size_t*)field->pSize != field->tag) return; /* This is not the current field in the union */ } /* Release anything contained inside an extension or submsg. * This has to be done even if the submsg itself is statically * allocated. */ if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) { /* Release fields from all extensions in the linked list */ pb_extension_t *ext = *(pb_extension_t**)field->pData; while (ext != NULL) { pb_field_iter_t ext_iter; if (pb_field_iter_begin_extension(&ext_iter, ext)) { pb_release_single_field(&ext_iter); } ext = ext->next; } } else if (PB_LTYPE_IS_SUBMSG(type) && PB_ATYPE(type) != PB_ATYPE_CALLBACK) { /* Release fields in submessage or submsg array */ pb_size_t count = 1; if (PB_ATYPE(type) == PB_ATYPE_POINTER) { field->pData = *(void**)field->pField; } else { field->pData = field->pField; } if (PB_HTYPE(type) == PB_HTYPE_REPEATED) { count = *(pb_size_t*)field->pSize; if (PB_ATYPE(type) == PB_ATYPE_STATIC && count > field->array_size) { /* Protect against corrupted _count fields */ count = field->array_size; } } if (field->pData) { for (; count > 0; count--) { pb_release(field->submsg_desc, field->pData); field->pData = (char*)field->pData + field->data_size; } } } if (PB_ATYPE(type) == PB_ATYPE_POINTER) { if (PB_HTYPE(type) == PB_HTYPE_REPEATED && (PB_LTYPE(type) == PB_LTYPE_STRING || PB_LTYPE(type) == PB_LTYPE_BYTES)) { /* Release entries in repeated string or bytes array */ void **pItem = *(void***)field->pField; pb_size_t count = *(pb_size_t*)field->pSize; for (; count > 0; count--) { pb_free(*pItem); *pItem++ = NULL; } } if (PB_HTYPE(type) == PB_HTYPE_REPEATED) { /* We are going to release the array, so set the size to 0 */ *(pb_size_t*)field->pSize = 0; } /* Release main pointer */ pb_free(*(void**)field->pField); *(void**)field->pField = NULL; } }
0
[ "CWE-763" ]
nanopb
e2f0ccf939d9f82931d085acb6df8e9a182a4261
31,674,340,861,981,850,000,000,000,000,000,000,000
90
Fix invalid free() with oneof (#647) Nanopb would call free() or realloc() on an invalid (attacker controlled) pointer value when all the following conditions are true: - PB_ENABLE_MALLOC is defined at the compile time - Message definition contains an oneof field, and the oneof contains at least one pointer type field and at least one non-pointer type field. - Data being decoded first contains a non-pointer value for the oneof field, and later contains an overwriting pointer value. Depending on message layout, the bug may not be exploitable in all cases, but it is known to be exploitable at least with string and bytes fields. Actual security impact will also depend on the heap implementation used.
delete_conn(struct conn *conn) { ovs_assert(conn->conn_type == CT_CONN_TYPE_DEFAULT); ovs_mutex_destroy(&conn->lock); free(conn->nat_conn); delete_conn_cmn(conn); }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
50,820,272,189,398,070,000,000,000,000,000,000,000
7
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fsinfo *info) { int error; struct nfs_fattr *fattr = info->fattr; struct nfs4_label *label = fattr->label; error = nfs4_server_capabilities(server, mntfh); if (error < 0) { dprintk("nfs4_get_root: getcaps error = %d\n", -error); return error; } error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL); if (error < 0) { dprintk("nfs4_get_root: getattr error = %d\n", -error); goto out; } if (fattr->valid & NFS_ATTR_FATTR_FSID && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); out: return error; }
0
[ "CWE-787" ]
linux
b4487b93545214a9db8cbf32e86411677b0cca21
132,395,343,793,845,860,000,000,000,000,000,000,000
26
nfs: Fix getxattr kernel panic and memory overflow Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell <[email protected]> [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust <[email protected]>
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) { unsigned long min_count, ret; struct page *page; LIST_HEAD(page_list); NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); /* * Bit mask controlling how hard we retry per-node allocations. * If we can not allocate the bit mask, do not attempt to allocate * the requested huge pages. */ if (node_alloc_noretry) nodes_clear(*node_alloc_noretry); else return -ENOMEM; /* * resize_lock mutex prevents concurrent adjustments to number of * pages in hstate via the proc/sysfs interfaces. */ mutex_lock(&h->resize_lock); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); /* * Check for a node specific request. * Changing node specific huge page count may require a corresponding * change to the global count. In any case, the passed node mask * (nodes_allowed) will restrict alloc/free to the specified node. */ if (nid != NUMA_NO_NODE) { unsigned long old_count = count; count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; /* * User may have specified a large count value which caused the * above calculation to overflow. In this case, they wanted * to allocate as many huge pages as possible. Set count to * largest possible value to align with their intention. */ if (count < old_count) count = ULONG_MAX; } /* * Gigantic pages runtime allocation depend on the capability for large * page range allocation. * If the system does not provide this feature, return an error when * the user tries to allocate gigantic pages but let the user free the * boottime allocated gigantic pages. */ if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { if (count > persistent_huge_pages(h)) { spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return -EINVAL; } /* Fall through to decrease pool */ } /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_surplus_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock_irq(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); ret = alloc_pool_huge_page(h, nodes_allowed, node_alloc_noretry); spin_lock_irq(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_surplus_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); /* * Collect pages to be removed on list without dropping lock */ while (min_count < persistent_huge_pages(h)) { page = remove_pool_huge_page(h, nodes_allowed, 0); if (!page) break; list_add(&page->lru, &page_list); } /* free the pages after dropping lock */ spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: h->max_huge_pages = persistent_huge_pages(h); spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return 0; }
0
[]
linux
a4a118f2eead1d6c49e00765de89878288d4b890
62,739,707,413,603,170,000,000,000,000,000,000,000
149
hugetlbfs: flush TLBs correctly after huge_pmd_unshare When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB flush is missing. This TLB flush must be performed before releasing the i_mmap_rwsem, in order to prevent an unshared PMDs page from being released and reused before the TLB flush took place. Arguably, a comprehensive solution would use mmu_gather interface to batch the TLB flushes and the PMDs page release, however it is not an easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2) deferring the release of the page reference for the PMDs page until after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into thinking PMDs are shared when they are not. Fix __unmap_hugepage_range() by adding the missing TLB flush, and forcing a flush when unshare is successful. Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6 Signed-off-by: Nadav Amit <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int _hostsock_getsockopt( oe_fd_t* sock_, int level, int optname, void* optval, oe_socklen_t* optlen) { int ret = -1; sock_t* sock = _cast_sock(sock_); oe_socklen_t optlen_in = 0; oe_socklen_t optlen_out = 0; oe_errno = 0; if (!sock || !optval || !optlen) OE_RAISE_ERRNO(OE_EINVAL); optlen_in = *optlen; if (oe_syscall_getsockopt_ocall( &ret, sock->host_fd, level, optname, optval, optlen_in, &optlen_out) != OE_OK) { OE_RAISE_ERRNO(OE_EINVAL); } /* * The POSIX specification for getsockopt states that if the size of optval * is greater than the input optlen, then the value stored in the object * pointed to by the optval argument shall be silently truncated. We do this * in the enclave to ensure that the untrusted host has not returned an * arbitrarily large optlen value. * Refer to * https://pubs.opengroup.org/onlinepubs/9699919799/functions/getsockopt.html * for more detail. */ if (optlen_out > optlen_in) optlen_out = optlen_in; *optlen = optlen_out; done: return ret; }
0
[ "CWE-200", "CWE-552" ]
openenclave
bcac8e7acb514429fee9e0b5d0c7a0308fd4d76b
155,363,369,873,911,480,000,000,000,000,000,000,000
50
Merge pull request from GHSA-525h-wxcc-f66m Signed-off-by: Ming-Wei Shih <[email protected]>
arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER __iomem *prbuffer) { uint8_t *pQbuffer; uint8_t __iomem *iop_data; uint32_t iop_len; if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); iop_data = (uint8_t __iomem *)prbuffer->data; iop_len = readl(&prbuffer->data_len); while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; *pQbuffer = readb(iop_data); acb->rqbuf_putIndex++; acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } arcmsr_iop_message_read(acb); return 1; }
0
[ "CWE-119", "CWE-787" ]
linux
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
185,034,664,231,145,930,000,000,000,000,000,000,000
22
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer() We need to put an upper bound on "user_len" so the memcpy() doesn't overflow. Cc: <[email protected]> Reported-by: Marco Grassi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: Tomas Henzl <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
psutil_net_io_counters(PyObject *self, PyObject *args) { char *buf = NULL, *lim, *next; struct if_msghdr *ifm; int mib[6]; size_t len; PyObject *py_retdict = PyDict_New(); PyObject *py_ifc_info = NULL; if (py_retdict == NULL) return NULL; mib[0] = CTL_NET; // networking subsystem mib[1] = PF_ROUTE; // type of information mib[2] = 0; // protocol (IPPROTO_xxx) mib[3] = 0; // address family mib[4] = NET_RT_IFLIST; // operation mib[5] = 0; if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) { PyErr_SetFromErrno(PyExc_OSError); goto error; } buf = malloc(len); if (buf == NULL) { PyErr_NoMemory(); goto error; } if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) { PyErr_SetFromErrno(PyExc_OSError); goto error; } lim = buf + len; for (next = buf; next < lim; ) { py_ifc_info = NULL; ifm = (struct if_msghdr *)next; next += ifm->ifm_msglen; if (ifm->ifm_type == RTM_IFINFO) { struct if_msghdr *if2m = (struct if_msghdr *)ifm; struct sockaddr_dl *sdl = (struct sockaddr_dl *)(if2m + 1); char ifc_name[32]; strncpy(ifc_name, sdl->sdl_data, sdl->sdl_nlen); ifc_name[sdl->sdl_nlen] = 0; // XXX: ignore usbus interfaces: // http://lists.freebsd.org/pipermail/freebsd-current/ // 2011-October/028752.html // 'ifconfig -a' doesn't show them, nor do we. if (strncmp(ifc_name, "usbus", 5) == 0) continue; py_ifc_info = Py_BuildValue("(kkkkkkki)", if2m->ifm_data.ifi_obytes, if2m->ifm_data.ifi_ibytes, if2m->ifm_data.ifi_opackets, if2m->ifm_data.ifi_ipackets, if2m->ifm_data.ifi_ierrors, if2m->ifm_data.ifi_oerrors, if2m->ifm_data.ifi_iqdrops, #ifdef _IFI_OQDROPS if2m->ifm_data.ifi_oqdrops #else 0 #endif ); if (!py_ifc_info) goto error; if (PyDict_SetItemString(py_retdict, ifc_name, py_ifc_info)) goto error; Py_CLEAR(py_ifc_info); } else { continue; } } free(buf); return py_retdict; error: Py_XDECREF(py_ifc_info); Py_DECREF(py_retdict); if (buf != NULL) free(buf); return NULL; }
0
[ "CWE-415" ]
psutil
7d512c8e4442a896d56505be3e78f1156f443465
150,870,156,566,438,700,000,000,000,000,000,000,000
89
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616) These files contain loops that convert system data into python objects and during the process they create objects and dereference their refcounts after they have been added to the resulting list. However, in case of errors during the creation of those python objects, the refcount to previously allocated objects is dropped again with Py_XDECREF, which should be a no-op in case the paramater is NULL. Even so, in most of these loops the variables pointing to the objects are never set to NULL, even after Py_DECREF is called at the end of the loop iteration. This means, after the first iteration, if an error occurs those python objects will get their refcount dropped two times, resulting in a possible double-free.
CImg<T>& normalize() { const ulongT whd = (ulongT)_width*_height*_depth; cimg_pragma_openmp(parallel for cimg_openmp_collapse(2) cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*512 && _height*_depth>=16)) cimg_forYZ(*this,y,z) { T *ptrd = data(0,y,z,0); cimg_forX(*this,x) { const T *ptrs = ptrd; float n = 0; cimg_forC(*this,c) { n+=cimg::sqr((float)*ptrs); ptrs+=whd; } n = (float)std::sqrt(n); T *_ptrd = ptrd++; if (n>0) cimg_forC(*this,c) { *_ptrd = (T)(*_ptrd/n); _ptrd+=whd; } else cimg_forC(*this,c) { *_ptrd = (T)0; _ptrd+=whd; } } } return *this; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
302,673,499,995,430,630,000,000,000,000,000,000,000
18
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
TEST_F(ExprMatchTest, LteWithLHSFieldPathMatchesCorrectly) { createMatcher(fromjson("{$expr: {$lte: ['$x', 3]}}")); ASSERT_TRUE(matches(BSON("x" << 3))); ASSERT_FALSE(matches(BSON("x" << 10))); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
26,015,544,976,406,900,000,000,000,000,000,000,000
6
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; struct v4l2_subdev_ir_parameters *o = &state->rx_params; u16 rxclk_divider; if (p->shutdown) return cx23888_ir_rx_shutdown(sd); if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH) return -ENOSYS; mutex_lock(&state->rx_params_lock); o->shutdown = p->shutdown; o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; o->bytes_per_data_element = p->bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec); /* Before we tweak the hardware, we have to disable the receiver */ irqenable_rx(dev, 0); control_rx_enable(dev, false); control_rx_demodulation_enable(dev, p->modulation); o->modulation = p->modulation; if (p->modulation) { p->carrier_freq = rxclk_rx_s_carrier(dev, p->carrier_freq, &rxclk_divider); o->carrier_freq = p->carrier_freq; o->duty_cycle = p->duty_cycle = 50; control_rx_s_carrier_window(dev, p->carrier_freq, &p->carrier_range_lower, &p->carrier_range_upper); o->carrier_range_lower = p->carrier_range_lower; o->carrier_range_upper = p->carrier_range_upper; p->max_pulse_width = (u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider); } else { p->max_pulse_width = rxclk_rx_s_max_pulse_width(dev, p->max_pulse_width, &rxclk_divider); } o->max_pulse_width = p->max_pulse_width; atomic_set(&state->rxclk_divider, rxclk_divider); p->noise_filter_min_width = filter_rx_s_min_width(dev, p->noise_filter_min_width); o->noise_filter_min_width = p->noise_filter_min_width; p->resolution = clock_divider_to_resolution(rxclk_divider); o->resolution = p->resolution; /* FIXME - make this dependent on resolution for better performance */ control_rx_irq_watermark(dev, RX_FIFO_HALF_FULL); control_rx_s_edge_detection(dev, CNTRL_EDG_BOTH); o->invert_level = p->invert_level; atomic_set(&state->rx_invert, p->invert_level); o->interrupt_enable = p->interrupt_enable; o->enable = p->enable; if (p->enable) { unsigned long flags; spin_lock_irqsave(&state->rx_kfifo_lock, flags); kfifo_reset(&state->rx_kfifo); /* reset tx_fifo too if there is one... */ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags); if (p->interrupt_enable) irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); control_rx_enable(dev, p->enable); } mutex_unlock(&state->rx_params_lock); return 0; }
0
[ "CWE-400", "CWE-401" ]
linux
a7b2df76b42bdd026e3106cf2ba97db41345a177
62,606,693,024,685,090,000,000,000,000,000,000,000
86
media: rc: prevent memory leak in cx23888_ir_probe In cx23888_ir_probe if kfifo_alloc fails the allocated memory for state should be released. Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Sean Young <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static void cfg_indent(FILE *fp, int indent) { while (indent--) fprintf(fp, " "); }
0
[]
libconfuse
d73777c2c3566fb2647727bb56d9a2295b81669b
55,825,219,630,870,880,000,000,000,000,000,000,000
5
Fix #163: unterminated username used with getpwnam() Signed-off-by: Joachim Wiberg <[email protected]>
static int ZEND_FASTCALL ZEND_DIV_SPEC_TMP_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1; div_function(&EX_T(opline->result.u.var).tmp_var, _get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC), _get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC); zval_dtor(free_op1.var); ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
78,197,640,666,168,570,000,000,000,000,000,000,000
12
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
GC_API char * GC_CALL GC_strndup(const char *str, size_t size) { char *copy; size_t len = strlen(str); /* str is expected to be non-NULL */ if (len > size) len = size; copy = GC_malloc_atomic(len + 1); if (copy == NULL) { # ifndef MSWINCE errno = ENOMEM; # endif return NULL; } BCOPY(str, copy, len); copy[len] = '\0'; return copy; }
0
[ "CWE-189" ]
bdwgc
be9df82919960214ee4b9d3313523bff44fd99e1
222,532,994,461,852,940,000,000,000,000,000,000,000
17
Fix allocation size overflows due to rounding. * malloc.c (GC_generic_malloc): Check if the allocation size is rounded to a smaller value. * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
void addQueryableBackupPrivileges(PrivilegeVector* privileges) { Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::collStats)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listIndexes)); ActionSet clusterActions; clusterActions << ActionType::getParameter // To check authSchemaVersion << ActionType::listDatabases << ActionType::useUUID; Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forClusterResource(), clusterActions)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forDatabaseName("config"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forDatabaseName("local"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.js"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.users"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forCollectionName("system.profile"), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::usersAltCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forExactNamespace( AuthorizationManager::usersBackupCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::rolesCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege( ResourcePattern::forExactNamespace(AuthorizationManager::versionCollectionNamespace), ActionType::find)); Privilege::addPrivilegeToPrivilegeVector( privileges, Privilege(ResourcePattern::forExactNamespace(NamespaceString("config", "settings")), ActionType::find)); }
0
[ "CWE-20" ]
mongo
865eccaf35aca29d1b71764d50227cdf853752d0
9,036,507,213,519,624,000,000,000,000,000,000,000
62
SERVER-36263 Bypassing operation validation in applyOps should require special privilege
const Tags* Segment::GetTags() const { return m_pTags; }
0
[ "CWE-20" ]
libvpx
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
241,306,355,042,705,750,000,000,000,000,000,000,000
1
update libwebm to libwebm-1.0.0.27-358-gdbf1d10 changelog: https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10 Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { blkcnt_t i_blocks ; struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); if (ei->i_flags & EXT4_HUGE_FILE_FL) { /* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } } else { return le32_to_cpu(raw_inode->i_blocks_lo); } }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
153,796,081,034,545,930,000,000,000,000,000,000,000
22
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
struct tcp_sock_t *tcp_open(uint16_t port) { struct tcp_sock_t *this = calloc(1, sizeof *this); if (this == NULL) { ERR("IPv4: callocing this failed"); goto error; } // Open [S]ocket [D]escriptor this->sd = -1; this->sd = socket(AF_INET, SOCK_STREAM, 0); if (this->sd < 0) { ERR("IPv4 socket open failed"); goto error; } // Configure socket params struct sockaddr_in addr; memset(&addr, 0, sizeof addr); addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = htonl(0x7F000001); // Bind to localhost if (bind(this->sd, (struct sockaddr *)&addr, sizeof addr) < 0) { if (g_options.only_desired_port == 1) ERR("IPv4 bind on port failed. " "Requested port may be taken or require root permissions."); goto error; } // Let kernel over-accept max number of connections if (listen(this->sd, HTTP_MAX_PENDING_CONNS) < 0) { ERR("IPv4 listen failed on socket"); goto error; } return this; error: if (this != NULL) { if (this->sd != -1) { close(this->sd); } free(this); } return NULL; }
0
[ "CWE-284", "CWE-264" ]
ippusbxd
46844402bca7a38fc224483ba6f0a93c4613203f
151,464,632,518,567,830,000,000,000,000,000,000,000
50
SECURITY FIX: Actually restrict the access to the printer to localhost Before, any machine in any network connected by any of the interfaces (as listed by "ifconfig") could access to an IPP-over-USB printer on the assigned port, allowing users on remote machines to print and to access the web configuration interface of a IPP-over-USB printer in contrary to conventional USB printers which are only accessible locally.
static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); }
0
[ "CWE-20" ]
linux
07f2c7ab6f8d0a7e7c5764c4e6cc9c52951b9d9c
168,537,732,422,891,570,000,000,000,000,000,000,000
11
sctp: verify size of a new chunk in _sctp_make_chunk() When SCTP makes INIT or INIT_ACK packet the total chunk length can exceed SCTP_MAX_CHUNK_LEN which leads to kernel panic when transmitting these packets, e.g. the crash on sending INIT_ACK: [ 597.804948] skbuff: skb_over_panic: text:00000000ffae06e4 len:120168 put:120156 head:000000007aa47635 data:00000000d991c2de tail:0x1d640 end:0xfec0 dev:<NULL> ... [ 597.976970] ------------[ cut here ]------------ [ 598.033408] kernel BUG at net/core/skbuff.c:104! [ 600.314841] Call Trace: [ 600.345829] <IRQ> [ 600.371639] ? sctp_packet_transmit+0x2095/0x26d0 [sctp] [ 600.436934] skb_put+0x16c/0x200 [ 600.477295] sctp_packet_transmit+0x2095/0x26d0 [sctp] [ 600.540630] ? sctp_packet_config+0x890/0x890 [sctp] [ 600.601781] ? __sctp_packet_append_chunk+0x3b4/0xd00 [sctp] [ 600.671356] ? sctp_cmp_addr_exact+0x3f/0x90 [sctp] [ 600.731482] sctp_outq_flush+0x663/0x30d0 [sctp] [ 600.788565] ? sctp_make_init+0xbf0/0xbf0 [sctp] [ 600.845555] ? sctp_check_transmitted+0x18f0/0x18f0 [sctp] [ 600.912945] ? sctp_outq_tail+0x631/0x9d0 [sctp] [ 600.969936] sctp_cmd_interpreter.isra.22+0x3be1/0x5cb0 [sctp] [ 601.041593] ? sctp_sf_do_5_1B_init+0x85f/0xc30 [sctp] [ 601.104837] ? sctp_generate_t1_cookie_event+0x20/0x20 [sctp] [ 601.175436] ? sctp_eat_data+0x1710/0x1710 [sctp] [ 601.233575] sctp_do_sm+0x182/0x560 [sctp] [ 601.284328] ? sctp_has_association+0x70/0x70 [sctp] [ 601.345586] ? sctp_rcv+0xef4/0x32f0 [sctp] [ 601.397478] ? sctp6_rcv+0xa/0x20 [sctp] ... Here the chunk size for INIT_ACK packet becomes too big, mostly because of the state cookie (INIT packet has large size with many address parameters), plus additional server parameters. Later this chunk causes the panic in skb_put_data(): skb_packet_transmit() sctp_packet_pack() skb_put_data(nskb, chunk->skb->data, chunk->skb->len); 'nskb' (head skb) was previously allocated with packet->size from u16 'chunk->chunk_hdr->length'. As suggested by Marcelo we should check the chunk's length in _sctp_make_chunk() before trying to allocate skb for it and discard a chunk if its size bigger than SCTP_MAX_CHUNK_LEN. Signed-off-by: Alexey Kodanev <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
Status GetSliceAxis(const NodeDef* node, const NodeDef* pack, const PartialTensorShape& pack_output_shape, int pack_axis, int64* slice_start_value, bool* found, bool* must_expand_dims) { *found = false; if (IsSlice(*node)) { *must_expand_dims = true; return GetSimpleSliceAxis(node, pack, pack_output_shape, pack_axis, slice_start_value, found); } else { return GetStridedSliceAxis(node, pack, pack_output_shape, pack_axis, slice_start_value, found, must_expand_dims); } }
0
[ "CWE-476" ]
tensorflow
e6340f0665d53716ef3197ada88936c2a5f7a2d3
276,787,926,111,093,600,000,000,000,000,000,000,000
14
Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
int cil_gen_constraint_expr(struct cil_tree_node *current, enum cil_flavor flavor, struct cil_list **expr) { int rc = SEPOL_ERR; if (current->cl_head == NULL) { goto exit; } rc = __cil_fill_constraint_expr(current->cl_head, flavor, expr); if (rc != SEPOL_OK) { goto exit; } return SEPOL_OK; exit: cil_log(CIL_ERR, "Bad expression tree for constraint\n"); return rc; }
0
[ "CWE-125" ]
selinux
340f0eb7f3673e8aacaf0a96cbfcd4d12a405521
338,431,504,309,879,880,000,000,000,000,000,000,000
20
libsepol/cil: Check for statements not allowed in optional blocks While there are some checks for invalid statements in an optional block when resolving the AST, there are no checks when building the AST. OSS-Fuzz found the following policy which caused a null dereference in cil_tree_get_next_path(). (blockinherit b3) (sid SID) (sidorder(SID)) (optional o (ibpkeycon :(1 0)s) (block b3 (filecon""block()) (filecon""block()))) The problem is that the blockinherit copies block b3 before the optional block is disabled. When the optional is disabled, block b3 is deleted along with everything else in the optional. Later, when filecon statements with the same path are found an error message is produced and in trying to find out where the block was copied from, the reference to the deleted block is used. The error handling code assumes (rightly) that if something was copied from a block then that block should still exist. It is clear that in-statements, blocks, and macros cannot be in an optional, because that allows nodes to be copied from the optional block to somewhere outside even though the optional could be disabled later. When optionals are disabled the AST is reset and the resolution is restarted at the point of resolving macro calls, so anything resolved before macro calls will never be re-resolved. This includes tunableifs, in-statements, blockinherits, blockabstracts, and macro definitions. Tunable declarations also cannot be in an optional block because they are needed to resolve tunableifs. It should be fine to allow blockinherit statements in an optional, because that is copying nodes from outside the optional to the optional and if the optional is later disabled, everything will be deleted anyway. Check and quit with an error if a tunable declaration, in-statement, block, blockabstract, or macro definition is found within an optional when either building or resolving the AST. Signed-off-by: James Carter <[email protected]>
void XMLRPC_SetValueInt(XMLRPC_VALUE value, int val) { if(value) { value->type = xmlrpc_int; value->i = val; } }
0
[ "CWE-119" ]
php-src
88412772d295ebf7dd34409534507dc9bcac726e
53,201,235,181,706,040,000,000,000,000,000,000,000
6
Fix bug #68027 - fix date parsing in XMLRPC lib
DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); }
0
[ "CWE-190" ]
libtiff
43c0b81a818640429317c80fea1e66771e85024b
240,161,965,749,895,580,000,000,000,000,000,000,000
7
* tools/tiffcp.c: fix read of undefined variable in case of missing required tags. Found on test case of MSVR 35100. * tools/tiffcrop.c: fix read of undefined buffer in readContigStripsIntoBuffer() due to uint16 overflow. Probably not a security issue but I can be wrong. Reported as MSVR 35100 by Axel Souchet from the MSRC Vulnerabilities & Mitigations team.