func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
char * copy_fifo_list (sizep) int *sizep; { if (sizep) *sizep = 0; return (char *)NULL;
0
[ "CWE-20" ]
bash
4f747edc625815f449048579f6e65869914dd715
317,783,823,912,346,900,000,000,000,000,000,000,000
7
Bash-4.4 patch 7
static int check_ptr_to_btf_access(struct bpf_verifier_env *env, struct bpf_reg_state *regs, int regno, int off, int size, enum bpf_access_type atype, int value_regno) { struct bpf_reg_state *reg = regs + regno; const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); u32 btf_id; int ret; if (off < 0) { verbose(env, "R%d is ptr_%s invalid negative access: off=%d\n", regno, tname, off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", regno, tname, off, tn_buf); return -EACCES; } if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, t, off, size, atype, &btf_id); } else { if (atype != BPF_READ) { verbose(env, "only read is supported\n"); return -EACCES; } ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); } if (ret < 0) return ret; if (atype == BPF_READ && value_regno >= 0) mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); return 0; }
0
[ "CWE-119", "CWE-681", "CWE-787" ]
linux
5b9fbeb75b6a98955f628e205ac26689bcb1383e
276,655,852,367,498,960,000,000,000,000,000,000,000
49
bpf: Fix scalar32_min_max_or bounds tracking Simon reported an issue with the current scalar32_min_max_or() implementation. That is, compared to the other 32 bit subreg tracking functions, the code in scalar32_min_max_or() stands out that it's using the 64 bit registers instead of 32 bit ones. This leads to bounds tracking issues, for example: [...] 8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 8: (79) r1 = *(u64 *)(r0 +0) R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 9: (b7) r0 = 1 10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 10: (18) r2 = 0x600000002 12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 12: (ad) if r1 < r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: (95) exit 14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 14: (25) if r1 > 0x0 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: (95) exit 16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 16: (47) r1 |= 0 17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm [...] The bound tests on the map value force the upper unsigned bound to be 25769803777 in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By using OR they are truncated and thus result in the range [1,1] for the 32 bit reg tracker. This is incorrect given the only thing we know is that the value must be positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes sense, for example, for the case where we update dst_reg->s32_{min,max}_value in the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as we know that these are positive. Previously, in the else branch the 64 bit values of umin_value=1 and umax_value=32212254719 were used and latter got truncated to be 1 as upper bound there. After the fix the subreg range is now correct: [...] 8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 8: (79) r1 = *(u64 *)(r0 +0) R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 9: (b7) r0 = 1 10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 10: (18) r2 = 0x600000002 12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 12: (ad) if r1 < r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: (95) exit 14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 14: (25) if r1 > 0x0 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: (95) exit 16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 16: (47) r1 |= 0 17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm [...] Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") Reported-by: Simon Scannell <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Reviewed-by: John Fastabend <[email protected]> Acked-by: Alexei Starovoitov <[email protected]>
ptaInsertPt(PTA *pta, l_int32 index, l_int32 x, l_int32 y) { l_int32 i, n; PROCNAME("ptaInsertPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); if (index < 0 || index > n) return ERROR_INT("index not in {0...n}", procName, 1); if (n > pta->nalloc) ptaExtendArrays(pta); pta->n++; for (i = n; i > index; i--) { pta->x[i] = pta->x[i - 1]; pta->y[i] = pta->y[i - 1]; } pta->x[index] = x; pta->y[index] = y; return 0; }
0
[ "CWE-119", "CWE-787" ]
leptonica
ee301cb2029db8a6289c5295daa42bba7715e99a
334,633,466,613,805,970,000,000,000,000,000,000,000
26
Security fixes: expect final changes for release 1.75.3. * Fixed a debian security issue with fscanf() reading a string with possible buffer overflow. * There were also a few similar situations with sscanf().
static void tg3_process_error(struct tg3 *tp) { u32 val; bool real_error = false; if (tg3_flag(tp, ERROR_PROCESSED)) return; /* Check Flow Attention register */ val = tr32(HOSTCC_FLOW_ATTN); if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); real_error = true; } if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); real_error = true; } if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); real_error = true; } if (!real_error) return; tg3_dump_state(tp); tg3_flag_set(tp, ERROR_PROCESSED); tg3_reset_task_schedule(tp); }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
197,916,589,157,046,500,000,000,000,000,000,000,000
33
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void h2_destroy(struct connection *conn) { struct h2c *h2c = conn->ctx; if (eb_is_empty(&h2c->streams_by_id)) h2_release(h2c->conn); }
0
[ "CWE-125" ]
haproxy
a01f45e3ced23c799f6e78b5efdbd32198a75354
190,015,984,733,671,440,000,000,000,000,000,000,000
7
BUG/CRITICAL: mux-h2: re-check the frame length when PRIORITY is used Tim D�sterhus reported a possible crash in the H2 HEADERS frame decoder when the PRIORITY flag is present. A check is missing to ensure the 5 extra bytes needed with this flag are actually part of the frame. As per RFC7540#4.2, let's return a connection error with code FRAME_SIZE_ERROR. Many thanks to Tim for responsibly reporting this issue with a working config and reproducer. This issue was assigned CVE-2018-20615. This fix must be backported to 1.9 and 1.8.
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk, struct net *net) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think * XXX PF_KEY socket apps will not mind current behavior. */ if (!skb) return -ENOMEM; rcu_read_lock(); sk_for_each_rcu(sk, &net_pfkey->table) { struct pfkey_sock *pfk = pfkey_sk(sk); int err2; /* Yes, it means that if you are meant to receive this * pfkey message you receive it twice as promiscuous * socket. */ if (pfk->promisc) pfkey_broadcast_one(skb, &skb2, allocation, sk); /* the exact target will be processed later */ if (sk == one_sk) continue; if (broadcast_flags != BROADCAST_ALL) { if (broadcast_flags & BROADCAST_PROMISC_ONLY) continue; if ((broadcast_flags & BROADCAST_REGISTERED) && !pfk->registered) continue; if (broadcast_flags & BROADCAST_ONE) continue; } err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk); /* Error is cleare after succecful sending to at least one * registered KM */ if ((broadcast_flags & BROADCAST_REGISTERED) && err) err = err2; } rcu_read_unlock(); if (one_sk != NULL) err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); kfree_skb(skb2); kfree_skb(skb); return err; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
289,439,744,223,542,400,000,000,000,000,000,000,000
56
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int kdb_help(int argc, const char **argv) { kdbtab_t *kt; kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description"); kdb_printf("-----------------------------" "-----------------------------\n"); list_for_each_entry(kt, &kdb_cmds_head, list_node) { char *space = ""; if (KDB_FLAG(CMD_INTERRUPT)) return 0; if (!kdb_check_flags(kt->flags, kdb_cmd_enabled, true)) continue; if (strlen(kt->usage) > 20) space = "\n "; kdb_printf("%-15.15s %-20s%s%s\n", kt->name, kt->usage, space, kt->help); } return 0; }
0
[ "CWE-787" ]
linux
eadb2f47a3ced5c64b23b90fd2a3463f63726066
53,716,144,507,530,735,000,000,000,000,000,000,000
20
lockdown: also lock down previous kgdb use KGDB and KDB allow read and write access to kernel memory, and thus should be restricted during lockdown. An attacker with access to a serial port (for example, via a hypervisor console, which some cloud vendors provide over the network) could trigger the debugger so it is important that the debugger respect the lockdown mode when/if it is triggered. Fix this by integrating lockdown into kdb's existing permissions mechanism. Unfortunately kgdb does not have any permissions mechanism (although it certainly could be added later) so, for now, kgdb is simply and brutally disabled by immediately exiting the gdb stub without taking any action. For lockdowns established early in the boot (e.g. the normal case) then this should be fine but on systems where kgdb has set breakpoints before the lockdown is enacted than "bad things" will happen. CVE: CVE-2022-21499 Co-developed-by: Stephen Brennan <[email protected]> Signed-off-by: Stephen Brennan <[email protected]> Reviewed-by: Douglas Anderson <[email protected]> Signed-off-by: Daniel Thompson <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
bgp_write_notify (struct peer *peer) { int ret, val; u_char type; struct stream *s; /* There should be at least one packet. */ s = stream_fifo_head (peer->obuf); if (!s) return 0; assert (stream_get_endp (s) >= BGP_HEADER_SIZE); /* Put socket in blocking mode. */ val = fcntl (peer->fd, F_GETFL, 0); fcntl (peer->fd, F_SETFL, val & ~O_NONBLOCK); ret = writen (peer->fd, STREAM_DATA (s), stream_get_endp (s)); if (ret <= 0) { BGP_EVENT_ADD (peer, TCP_fatal_error); return 0; } /* Retrieve BGP packet type. */ stream_set_getp (s, BGP_MARKER_SIZE + 2); type = stream_getc (s); assert (type == BGP_MSG_NOTIFY); /* Type should be notify. */ peer->notify_out++; /* Double start timer. */ peer->v_start *= 2; /* Overflow check. */ if (peer->v_start >= (60 * 2)) peer->v_start = (60 * 2); BGP_EVENT_ADD (peer, BGP_Stop); return 0; }
0
[ "CWE-119" ]
quagga
5861739f8c38bc36ea9955e5cb2be2bf2f482d70
70,698,109,488,154,910,000,000,000,000,000,000,000
43
bgpd: Open option parse errors don't NOTIFY, resulting in abort & DoS * bgp_packet.c: (bgp_open_receive) Errors from bgp_open_option_parse are detected, and the code will stop processing the OPEN and return. However it does so without calling bgp_notify_send to send a NOTIFY - which means the peer FSM doesn't get stopped, and bgp_read will be called again later. Because it returns, it doesn't go through the code near the end of the function that removes the current message from the peer input streaam. Thus the next call to bgp_read will try to parse a half-parsed stream as if it were a new BGP message, leading to an assert later in the code when it tries to read stuff that isn't there. Add the required call to bgp_notify_send before returning. * bgp_open.c: (bgp_capability_as4) Be a bit stricter, check the length field corresponds to the only value it can be, which is the amount we're going to read off the stream. And make sure the capability flag gets set, so callers can know this capability was read, regardless. (peek_for_as4_capability) Let bgp_capability_as4 do the length check.
AsyncUDPSocket& getSocket() { return *socket_; }
0
[ "CWE-617", "CWE-703" ]
mvfst
a67083ff4b8dcbb7ee2839da6338032030d712b0
139,624,842,802,262,380,000,000,000,000,000,000,000
3
Close connection if we derive an extra 1-rtt write cipher Summary: Fixes CVE-2021-24029 Reviewed By: mjoras, lnicco Differential Revision: D26613890 fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { ENVOY_STREAM_LOG(debug, "Stream max duration time reached", *this); connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc(); connection_manager_.doEndStream(*this); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
292,963,207,476,131,570,000,000,000,000,000,000,000
5
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; /* * We can get here for an inline file via the FIBMAP ioctl */ if (ext4_has_inline_data(inode)) return 0; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } if (EXT4_JOURNAL(inode) && ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT4_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ ext4_clear_inode_state(inode, EXT4_STATE_JDATA); journal = EXT4_JOURNAL(inode); jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); if (err) return 0; } return iomap_bmap(mapping, block, &ext4_iomap_ops); }
0
[ "CWE-703" ]
linux
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
316,743,682,948,340,240,000,000,000,000,000,000,000
54
ext4: check journal inode extents more carefully Currently, system zones just track ranges of block, that are "important" fs metadata (bitmaps, group descriptors, journal blocks, etc.). This however complicates how extent tree (or indirect blocks) can be checked for inodes that actually track such metadata - currently the journal inode but arguably we should be treating quota files or resize inode similarly. We cannot run __ext4_ext_check() on such metadata inodes when loading their extents as that would immediately trigger the validity checks and so we just hack around that and special-case the journal inode. This however leads to a situation that a journal inode which has extent tree of depth at least one can have invalid extent tree that gets unnoticed until ext4_cache_extents() crashes. To overcome this limitation, track inode number each system zone belongs to (0 is used for zones not belonging to any inode). We can then verify inode number matches the expected one when verifying extent tree and thus avoid the false errors. With this there's no need to to special-case journal inode during extent tree checking anymore so remove it. Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode") Reported-by: Wolfgang Frisch <[email protected]> Reviewed-by: Lukas Czerner <[email protected]> Signed-off-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Theodore Ts'o <[email protected]>
TEST(ParseOperand, ShouldRecognizeNumberLiteral) { auto resultExpression = parseOperand(BSON("" << 5)); auto constantExpression = dynamic_cast<ExpressionConstant*>(resultExpression.get()); ASSERT_TRUE(constantExpression); ASSERT_VALUE_EQ(constantExpression->serialize(false), Value(Document{{"$const", 5}})); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
14,155,221,077,736,097,000,000,000,000,000,000,000
6
SERVER-38070 fix infinite loop in agg expression
lex(struct scanner *s, union lvalue *val) { skip_more_whitespace_and_comments: /* Skip spaces. */ while (is_space(peek(s))) if (next(s) == '\n') return TOK_END_OF_LINE; /* Skip comments. */ if (chr(s, '#')) { skip_to_eol(s); goto skip_more_whitespace_and_comments; } /* See if we're done. */ if (eof(s)) return TOK_END_OF_FILE; /* New token. */ s->token_line = s->line; s->token_column = s->column; s->buf_pos = 0; /* LHS Keysym. */ if (chr(s, '<')) { while (peek(s) != '>' && !eol(s)) buf_append(s, next(s)); if (!chr(s, '>')) { scanner_err(s, "unterminated keysym literal"); return TOK_ERROR; } if (!buf_append(s, '\0')) { scanner_err(s, "keysym literal is too long"); return TOK_ERROR; } val->string.str = s->buf; val->string.len = s->buf_pos; return TOK_LHS_KEYSYM; } /* Colon. */ if (chr(s, ':')) return TOK_COLON; if (chr(s, '!')) return TOK_BANG; if (chr(s, '~')) return TOK_TILDE; /* String literal. */ if (chr(s, '\"')) { while (!eof(s) && !eol(s) && peek(s) != '\"') { if (chr(s, '\\')) { uint8_t o; if (chr(s, '\\')) { buf_append(s, '\\'); } else if (chr(s, '"')) { buf_append(s, '"'); } else if (chr(s, 'x') || chr(s, 'X')) { if (hex(s, &o)) buf_append(s, (char) o); else scanner_warn(s, "illegal hexadecimal escape sequence in string literal"); } else if (oct(s, &o)) { buf_append(s, (char) o); } else { scanner_warn(s, "unknown escape sequence (%c) in string literal", peek(s)); /* Ignore. */ } } else { buf_append(s, next(s)); } } if (!chr(s, '\"')) { scanner_err(s, "unterminated string literal"); return TOK_ERROR; } if (!buf_append(s, '\0')) { scanner_err(s, "string literal is too long"); return TOK_ERROR; } if (!is_valid_utf8(s->buf, s->buf_pos - 1)) { scanner_err(s, "string literal is not a valid UTF-8 string"); return TOK_ERROR; } val->string.str = s->buf; val->string.len = s->buf_pos; return TOK_STRING; } /* Identifier or include. */ if (is_alpha(peek(s)) || peek(s) == '_') { s->buf_pos = 0; while (is_alnum(peek(s)) || peek(s) == '_') buf_append(s, next(s)); if (!buf_append(s, '\0')) { scanner_err(s, "identifier is too long"); return TOK_ERROR; } if (streq(s->buf, "include")) return TOK_INCLUDE; val->string.str = s->buf; val->string.len = s->buf_pos; return TOK_IDENT; } /* Discard rest of line. */ skip_to_eol(s); scanner_err(s, "unrecognized token"); return TOK_ERROR; }
1
[ "CWE-835" ]
libxkbcommon
842e4351c2c97de6051cab6ce36b4a81e709a0e1
201,779,090,834,516,580,000,000,000,000,000,000,000
116
compose: fix infinite loop in parser on some inputs The parser would enter an infinite loop if an unterminated keysym literal occurs at EOF. Found with the afl fuzzer. Signed-off-by: Ran Benita <[email protected]>
int cdrom_mode_select(struct cdrom_device_info *cdi, struct packet_command *cgc) { const struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); memset(cgc->buffer, 0, 2); cgc->cmd[0] = GPCMD_MODE_SELECT_10; cgc->cmd[1] = 0x10; /* PF */ cgc->cmd[7] = cgc->buflen >> 8; cgc->cmd[8] = cgc->buflen & 0xff; cgc->data_direction = CGC_DATA_WRITE; return cdo->generic_packet(cdi, cgc); }
0
[ "CWE-119", "CWE-787" ]
linux
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
300,596,598,460,445,900,000,000,000,000,000,000,000
14
cdrom: information leak in cdrom_ioctl_media_changed() This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned long. The way the check is written now, if one of the high 32 bits is set then we could read outside the info->slots[] array. This bug is pretty old and it predates git. Reviewed-by: Christoph Hellwig <[email protected]> Cc: [email protected] Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
TEST_F(QueryPlannerTest, MaxMinSortInequalityFirstSortSecond) { addIndex(BSON("a" << 1 << "b" << 1)); // Run an empty query, sort {b: 1}, max/min arguments. runQueryFull(BSONObj(), fromjson("{b: 1}"), BSONObj(), 0, 0, BSONObj(), fromjson("{a: 1, b: 1}"), fromjson("{a: 2, b: 2}"), false); assertNumSolutions(1); assertSolutionExists( "{sort: {pattern: {b: 1}, limit: 0, node: {sortKeyGen: {node: " "{fetch: {node: " "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}}}"); }
0
[]
mongo
b0ef26c639112b50648a02d969298650fbd402a4
340,009,197,622,907,300,000,000,000,000,000,000,000
20
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
int regexp_c_locale(char **u, size_t *len) { /* Without uselocale, we need to expand character ranges */ int r; char *s = *u; size_t s_len, u_len; if (len == NULL) { len = &u_len; s_len = strlen(s); } else { s_len = *len; } r = fa_expand_char_ranges(s, s_len, u, len); if (r != 0) { *u = s; *len = s_len; } if (r < 0) return -1; /* Syntax errors will be caught when the result is compiled */ if (r > 0) return 0; free(s); return 1; }
0
[]
augeas
1a66739c3fc14b3257af5d4a32d0a2a714a7b39d
131,531,422,041,905,340,000,000,000,000,000,000,000
24
* src/transform.c (xread_file): catch failed fopen, e.g. EACCES
memory_read_privatekey(LIBSSH2_SESSION * session, const LIBSSH2_HOSTKEY_METHOD ** hostkey_method, void **hostkey_abstract, const unsigned char *method, int method_len, const char *privkeyfiledata, size_t privkeyfiledata_len, const char *passphrase) { const LIBSSH2_HOSTKEY_METHOD **hostkey_methods_avail = libssh2_hostkey_methods(); *hostkey_method = NULL; *hostkey_abstract = NULL; while(*hostkey_methods_avail && (*hostkey_methods_avail)->name) { if((*hostkey_methods_avail)->initPEMFromMemory && strncmp((*hostkey_methods_avail)->name, (const char *) method, method_len) == 0) { *hostkey_method = *hostkey_methods_avail; break; } hostkey_methods_avail++; } if(!*hostkey_method) { return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NONE, "No handler for specified private key"); } if((*hostkey_method)-> initPEMFromMemory(session, privkeyfiledata, privkeyfiledata_len, (unsigned char *) passphrase, hostkey_abstract)) { return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Unable to initialize private key from file"); } return 0; }
0
[ "CWE-787" ]
libssh2
dc109a7f518757741590bb993c0c8412928ccec2
55,897,827,482,040,340,000,000,000,000,000,000,000
36
Security fixes (#315) * Bounds checks Fixes for CVEs https://www.libssh2.org/CVE-2019-3863.html https://www.libssh2.org/CVE-2019-3856.html * Packet length bounds check CVE https://www.libssh2.org/CVE-2019-3855.html * Response length check CVE https://www.libssh2.org/CVE-2019-3859.html * Bounds check CVE https://www.libssh2.org/CVE-2019-3857.html * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html and additional data validation * Check bounds before reading into buffers * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html * declare SIZE_MAX and UINT_MAX if needed
static int hci_req_add_le_interleaved_scan(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; int ret = 0; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: BT_ERR("unexpected error"); ret = -1; } hci_dev_unlock(hdev); return ret; }
0
[ "CWE-362" ]
linux
e2cb6b891ad2b8caa9131e3be70f45243df82a80
118,057,993,764,912,050,000,000,000,000,000,000,000
30
bluetooth: eliminate the potential race condition when removing the HCI controller There is a possible race condition vulnerability between issuing a HCI command and removing the cont. Specifically, functions hci_req_sync() and hci_dev_do_close() can race each other like below: thread-A in hci_req_sync() | thread-B in hci_dev_do_close() | hci_req_sync_lock(hdev); test_bit(HCI_UP, &hdev->flags); | ... | test_and_clear_bit(HCI_UP, &hdev->flags) hci_req_sync_lock(hdev); | | In this commit we alter the sequence in function hci_req_sync(). Hence, the thread-A cannot issue th. Signed-off-by: Lin Ma <[email protected]> Cc: Marcel Holtmann <[email protected]> Fixes: 7c6a329e4447 ("[Bluetooth] Fix regression from using default link policy") Signed-off-by: Greg Kroah-Hartman <[email protected]>
vc4_cl_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct vc4_exec_info *exec) { struct drm_vc4_submit_cl *args = exec->args; uint32_t *handles; int ret = 0; int i; exec->bo_count = args->bo_handle_count; if (!exec->bo_count) { /* See comment on bo_index for why we have to check * this. */ DRM_ERROR("Rendering requires BOs to validate\n"); return -EINVAL; } exec->bo = drm_calloc_large(exec->bo_count, sizeof(struct drm_gem_cma_object *)); if (!exec->bo) { DRM_ERROR("Failed to allocate validated BO pointers\n"); return -ENOMEM; } handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t)); if (!handles) { ret = -ENOMEM; DRM_ERROR("Failed to allocate incoming GEM handles\n"); goto fail; } if (copy_from_user(handles, (void __user *)(uintptr_t)args->bo_handles, exec->bo_count * sizeof(uint32_t))) { ret = -EFAULT; DRM_ERROR("Failed to copy in GEM handles\n"); goto fail; } spin_lock(&file_priv->table_lock); for (i = 0; i < exec->bo_count; i++) { struct drm_gem_object *bo = idr_find(&file_priv->object_idr, handles[i]); if (!bo) { DRM_ERROR("Failed to look up GEM BO %d: %d\n", i, handles[i]); ret = -EINVAL; spin_unlock(&file_priv->table_lock); goto fail; } drm_gem_object_reference(bo); exec->bo[i] = (struct drm_gem_cma_object *)bo; } spin_unlock(&file_priv->table_lock); fail: drm_free_large(handles); return ret; }
0
[ "CWE-190", "CWE-703" ]
linux
0f2ff82e11c86c05d051cae32b58226392d33bbf
28,257,515,780,899,937,000,000,000,000,000,000,000
61
drm/vc4: Fix an integer overflow in temporary allocation layout. We copy the unvalidated ioctl arguments from the user into kernel temporary memory to run the validation from, to avoid a race where the user updates the unvalidate contents in between validating them and copying them into the validated BO. However, in setting up the layout of the kernel side, we failed to check one of the additions (the roundup() for shader_rec_offset) against integer overflow, allowing a nearly MAX_UINT value of bin_cl_size to cause us to under-allocate the temporary space that we then copy_from_user into. Reported-by: Murray McAllister <[email protected]> Signed-off-by: Eric Anholt <[email protected]> Fixes: d5b1a78a772f ("drm/vc4: Add support for drawing 3D frames.")
void vrend_set_single_ssbo(struct vrend_context *ctx, uint32_t shader_type, uint32_t index, uint32_t offset, uint32_t length, uint32_t handle) { struct vrend_ssbo *ssbo = &ctx->sub->ssbo[shader_type][index]; struct vrend_resource *res; if (!has_feature(feat_ssbo)) return; if (handle) { res = vrend_renderer_ctx_res_lookup(ctx, handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle); return; } ssbo->res = res; ssbo->buffer_offset = offset; ssbo->buffer_size = length; ctx->sub->ssbo_used_mask[shader_type] |= (1u << index); } else { ssbo->res = 0; ssbo->buffer_offset = 0; ssbo->buffer_size = 0; ctx->sub->ssbo_used_mask[shader_type] &= ~(1u << index); } }
0
[ "CWE-787" ]
virglrenderer
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
5,229,854,211,678,195,000,000,000,000,000,000,000
29
vrend: Add test to resource OOB write and fix it v2: Also check that no depth != 1 has been send when none is due Closes: #250 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Chia-I Wu <[email protected]>
parseMaxPDU(DUL_MAXLENGTH * max, unsigned char *buf, unsigned long *itemLength, unsigned long availData) { // We want to read 8 bytes of data, is there enough data? if (availData < 8) return makeLengthError("Max PDU", availData, 8); max->type = *buf++; max->rsv1 = *buf++; EXTRACT_SHORT_BIG(buf, max->length); buf += 2; EXTRACT_LONG_BIG(buf, max->maxLength); *itemLength = 2 + 2 + max->length; if (max->length != 4) DCMNET_WARN("Invalid length (" << max->length << ") for maximum length item, must be 4"); // Is there less data than the length field claims there is? if (availData - 4 < max->length) return makeLengthError("Max PDU", availData, 0, max->length); DCMNET_TRACE("Maximum PDU Length: " << (unsigned long)max->maxLength); return EC_Normal; }
0
[ "CWE-415", "CWE-703", "CWE-401" ]
dcmtk
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
163,177,917,272,488,370,000,000,000,000,000,000,000
25
Fixed poss. NULL pointer dereference/double free. Thanks to Jinsheng Ba <[email protected]> for the report and some patches.
void mddev_unlock(struct mddev *mddev) { if (mddev->to_remove) { /* These cannot be removed under reconfig_mutex as * an access to the files will try to take reconfig_mutex * while holding the file unremovable, which leads to * a deadlock. * So hold set sysfs_active while the remove in happeing, * and anything else which might set ->to_remove or my * otherwise change the sysfs namespace will fail with * -EBUSY if sysfs_active is still set. * We set sysfs_active under reconfig_mutex and elsewhere * test it under the same mutex to ensure its correct value * is seen. */ struct attribute_group *to_remove = mddev->to_remove; mddev->to_remove = NULL; mddev->sysfs_active = 1; mutex_unlock(&mddev->reconfig_mutex); if (mddev->kobj.sd) { if (to_remove != &md_redundancy_group) sysfs_remove_group(&mddev->kobj, to_remove); if (mddev->pers == NULL || mddev->pers->sync_request == NULL) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); mddev->sysfs_action = NULL; } } mddev->sysfs_active = 0; } else mutex_unlock(&mddev->reconfig_mutex); /* As we've dropped the mutex we need a spinlock to * make sure the thread doesn't disappear */ spin_lock(&pers_lock); md_wakeup_thread(mddev->thread); spin_unlock(&pers_lock); }
0
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
110,179,796,029,992,220,000,000,000,000,000,000,000
42
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <[email protected]> Signed-off-by: NeilBrown <[email protected]>
rb_str_aref(str, indx) VALUE str; VALUE indx; { long idx; switch (TYPE(indx)) { case T_FIXNUM: idx = FIX2LONG(indx); num_index: if (idx < 0) { idx = RSTRING(str)->len + idx; } if (idx < 0 || RSTRING(str)->len <= idx) { return Qnil; } return INT2FIX(RSTRING(str)->ptr[idx] & 0xff); case T_REGEXP: return rb_str_subpat(str, indx, 0); case T_STRING: if (rb_str_index(str, indx, 0) != -1) return rb_str_dup(indx); return Qnil; default: /* check if indx is Range */ { long beg, len; VALUE tmp; switch (rb_range_beg_len(indx, &beg, &len, RSTRING(str)->len, 0)) { case Qfalse: break; case Qnil: return Qnil; default: tmp = rb_str_substr(str, beg, len); OBJ_INFECT(tmp, indx); return tmp; } } idx = NUM2LONG(indx); goto num_index; } return Qnil; /* not reached */ }
0
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
197,023,519,829,661,170,000,000,000,000,000,000,000
49
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
explicit CollectiveBcastSendV2OpKernel(OpKernelConstruction* c) : CollectiveOpV2Kernel(c) { const bool is_source = true; name_ = strings::StrCat(name(), ": Broadcast(", is_source, ")"); }
0
[ "CWE-416" ]
tensorflow
ca38dab9d3ee66c5de06f11af9a4b1200da5ef75
121,060,510,396,229,640,000,000,000,000,000,000,000
5
Fix undefined behavior in CollectiveReduceV2 and others We should not call done after it's moved. PiperOrigin-RevId: 400838185 Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1
parameter_brace_substring (varname, value, ind, substr, quoted, flags) char *varname, *value; int ind; char *substr; int quoted, flags; { intmax_t e1, e2; int vtype, r, starsub; char *temp, *val, *tt, *oname; SHELL_VAR *v; if (value == 0 && ((varname[0] != '@' && varname[0] != '*') || varname[1])) return ((char *)NULL); oname = this_command_name; this_command_name = varname; vtype = get_var_and_type (varname, value, ind, quoted, flags, &v, &val); if (vtype == -1) { this_command_name = oname; return ((char *)NULL); } starsub = vtype & VT_STARSUB; vtype &= ~VT_STARSUB; r = verify_substring_values (v, val, substr, vtype, &e1, &e2); this_command_name = oname; if (r <= 0) { if (vtype == VT_VARIABLE) FREE (val); return ((r == 0) ? &expand_param_error : (char *)NULL); } switch (vtype) { case VT_VARIABLE: case VT_ARRAYMEMBER: #if defined (HANDLE_MULTIBYTE) if (MB_CUR_MAX > 1) tt = mb_substring (val, e1, e2); else #endif tt = substring (val, e1, e2); if (vtype == VT_VARIABLE) FREE (val); if (quoted & (Q_DOUBLE_QUOTES|Q_HERE_DOCUMENT)) temp = quote_string (tt); else temp = tt ? quote_escapes (tt) : (char *)NULL; FREE (tt); break; case VT_POSPARMS: tt = pos_params (varname, e1, e2, quoted); if ((quoted & (Q_DOUBLE_QUOTES|Q_HERE_DOCUMENT)) == 0) { temp = tt ? quote_escapes (tt) : (char *)NULL; FREE (tt); } else temp = tt; break; #if defined (ARRAY_VARS) case VT_ARRAYVAR: if (assoc_p (v)) /* we convert to list and take first e2 elements starting at e1th element -- officially undefined for now */ temp = assoc_subrange (assoc_cell (v), e1, e2, starsub, quoted); else /* We want E2 to be the number of elements desired (arrays can be sparse, so verify_substring_values just returns the numbers specified and we rely on array_subrange to understand how to deal with them). */ temp = array_subrange (array_cell (v), e1, e2, starsub, quoted); /* array_subrange now calls array_quote_escapes as appropriate, so the caller no longer needs to. */ break; #endif default: temp = (char *)NULL; } return temp; }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
202,903,533,609,240,660,000,000,000,000,000,000,000
86
bash-4.4-rc2 release
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); }
0
[ "CWE-835" ]
linux
ef0579b64e93188710d48667cb5e014926af9f1b
192,711,503,507,643,060,000,000,000,000,000,000,000
4
crypto: ahash - Fix EINPROGRESS notification callback The ahash API modifies the request's callback function in order to clean up after itself in some corner cases (unaligned final and missing finup). When the request is complete ahash will restore the original callback and everything is fine. However, when the request gets an EBUSY on a full queue, an EINPROGRESS callback is made while the request is still ongoing. In this case the ahash API will incorrectly call its own callback. This patch fixes the problem by creating a temporary request object on the stack which is used to relay EINPROGRESS back to the original completion function. This patch also adds code to preserve the original flags value. Fixes: ab6bf4e5e5e4 ("crypto: hash - Fix the pointer voodoo in...") Cc: <[email protected]> Reported-by: Sabrina Dubroca <[email protected]> Tested-by: Sabrina Dubroca <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
explicit AddOpsRewriteStage(const GraphOptimizerContext& ctx, const ArithmeticOptimizerContext& ctx_ext) : ArithmeticNodesGroupOptimizerStage("AddOpsRewrite", ctx, ctx_ext) {}
0
[ "CWE-476" ]
tensorflow
e6340f0665d53716ef3197ada88936c2a5f7a2d3
21,479,876,630,254,176,000,000,000,000,000,000,000
3
Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
RecordDisableContext(RecordContextPtr pContext) { RecordClientsAndProtocolPtr pRCAP; int i; if (!pContext->pRecordingClient) return; if (!pContext->pRecordingClient->clientGone) { RecordAProtocolElement(pContext, NULL, XRecordEndOfData, NULL, 0, 0, 0); RecordFlushReplyBuffer(pContext, NULL, 0, NULL, 0); } /* Re-enable request processing on this connection. */ AttendClient(pContext->pRecordingClient); for (pRCAP = pContext->pListOfRCAP; pRCAP; pRCAP = pRCAP->pNextRCAP) { RecordUninstallHooks(pRCAP, 0); } pContext->pRecordingClient = NULL; /* move the newly disabled context to the rear part of ppAllContexts, * where all the disabled contexts are */ i = RecordFindContextOnAllContexts(pContext); assert(i != -1); assert(i < numEnabledContexts); if (i != (numEnabledContexts - 1)) { ppAllContexts[i] = ppAllContexts[numEnabledContexts - 1]; ppAllContexts[numEnabledContexts - 1] = pContext; } --numEnabledContexts; assert(numEnabledContexts >= 0); } /* RecordDisableContext */
0
[ "CWE-191" ]
xserver
2902b78535ecc6821cc027351818b28a5c7fdbdc
200,653,983,551,854,250,000,000,000,000,000,000,000
33
Fix XRecordRegisterClients() Integer underflow CVE-2020-14362 ZDI-CAN-11574 This vulnerability was discovered by: Jan-Niklas Sohn working with Trend Micro Zero Day Initiative Signed-off-by: Matthieu Herrb <[email protected]>
xsltDefaultProcessOneNode(xsltTransformContextPtr ctxt, xmlNodePtr node, xsltStackElemPtr params) { xmlNodePtr copy; xmlNodePtr delete = NULL, cur; int nbchild = 0, oldSize; int childno = 0, oldPos; xsltTemplatePtr template; CHECK_STOPPED; /* * Handling of leaves */ switch (node->type) { case XML_DOCUMENT_NODE: case XML_HTML_DOCUMENT_NODE: case XML_ELEMENT_NODE: break; case XML_CDATA_SECTION_NODE: #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy CDATA %s\n", node->content)); #endif copy = xsltCopyText(ctxt, ctxt->insert, node, 0); if (copy == NULL) { xsltTransformError(ctxt, NULL, node, "xsltDefaultProcessOneNode: cdata copy failed\n"); } return; case XML_TEXT_NODE: #ifdef WITH_XSLT_DEBUG_PROCESS if (node->content == NULL) { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy empty text\n")); return; } else { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy text %s\n", node->content)); } #endif copy = xsltCopyText(ctxt, ctxt->insert, node, 0); if (copy == NULL) { xsltTransformError(ctxt, NULL, node, "xsltDefaultProcessOneNode: text copy failed\n"); } return; case XML_ATTRIBUTE_NODE: cur = node->children; while ((cur != NULL) && (cur->type != XML_TEXT_NODE)) cur = cur->next; if (cur == NULL) { xsltTransformError(ctxt, NULL, node, "xsltDefaultProcessOneNode: no text for attribute\n"); } else { #ifdef WITH_XSLT_DEBUG_PROCESS if (cur->content == NULL) { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy empty text\n")); } else { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy text %s\n", cur->content)); } #endif copy = xsltCopyText(ctxt, ctxt->insert, cur, 0); if (copy == NULL) { xsltTransformError(ctxt, NULL, node, "xsltDefaultProcessOneNode: text copy failed\n"); } } return; default: return; } /* * Handling of Elements: first pass, cleanup and counting */ cur = node->children; while (cur != NULL) { switch (cur->type) { case XML_TEXT_NODE: case XML_CDATA_SECTION_NODE: case XML_DOCUMENT_NODE: case XML_HTML_DOCUMENT_NODE: case XML_ELEMENT_NODE: case XML_PI_NODE: case XML_COMMENT_NODE: nbchild++; break; case XML_DTD_NODE: /* Unlink the DTD, it's still reachable using doc->intSubset */ if (cur->next != NULL) cur->next->prev = cur->prev; if (cur->prev != NULL) cur->prev->next = cur->next; break; default: #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: skipping node type %d\n", cur->type)); #endif delete = cur; } cur = cur->next; if (delete != NULL) { #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: removing ignorable blank node\n")); #endif xmlUnlinkNode(delete); xmlFreeNode(delete); delete = NULL; } } if (delete != NULL) { #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: removing ignorable blank node\n")); #endif xmlUnlinkNode(delete); xmlFreeNode(delete); delete = NULL; } /* * Handling of Elements: second pass, actual processing * * Note that params are passed to the next template. This matches * XSLT 2.0 behavior but doesn't conform to XSLT 1.0. */ oldSize = ctxt->xpathCtxt->contextSize; oldPos = ctxt->xpathCtxt->proximityPosition; cur = node->children; while (cur != NULL) { childno++; switch (cur->type) { case XML_DOCUMENT_NODE: case XML_HTML_DOCUMENT_NODE: case XML_ELEMENT_NODE: ctxt->xpathCtxt->contextSize = nbchild; ctxt->xpathCtxt->proximityPosition = childno; xsltProcessOneNode(ctxt, cur, params); break; case XML_CDATA_SECTION_NODE: template = xsltGetTemplate(ctxt, cur, NULL); if (template) { #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: applying template for CDATA %s\n", cur->content)); #endif /* * Instantiate the xsl:template. */ xsltApplyXSLTTemplate(ctxt, cur, template->content, template, params); } else /* if (ctxt->mode == NULL) */ { #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy CDATA %s\n", cur->content)); #endif copy = xsltCopyText(ctxt, ctxt->insert, cur, 0); if (copy == NULL) { xsltTransformError(ctxt, NULL, cur, "xsltDefaultProcessOneNode: cdata copy failed\n"); } } break; case XML_TEXT_NODE: template = xsltGetTemplate(ctxt, cur, NULL); if (template) { #ifdef WITH_XSLT_DEBUG_PROCESS XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: applying template for text %s\n", cur->content)); #endif ctxt->xpathCtxt->contextSize = nbchild; ctxt->xpathCtxt->proximityPosition = childno; /* * Instantiate the xsl:template. */ xsltApplyXSLTTemplate(ctxt, cur, template->content, template, params); } else /* if (ctxt->mode == NULL) */ { #ifdef WITH_XSLT_DEBUG_PROCESS if (cur->content == NULL) { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy empty text\n")); } else { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: copy text %s\n", cur->content)); } #endif copy = xsltCopyText(ctxt, ctxt->insert, cur, 0); if (copy == NULL) { xsltTransformError(ctxt, NULL, cur, "xsltDefaultProcessOneNode: text copy failed\n"); } } break; case XML_PI_NODE: case XML_COMMENT_NODE: template = xsltGetTemplate(ctxt, cur, NULL); if (template) { #ifdef WITH_XSLT_DEBUG_PROCESS if (cur->type == XML_PI_NODE) { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: template found for PI %s\n", cur->name)); } else if (cur->type == XML_COMMENT_NODE) { XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext, "xsltDefaultProcessOneNode: template found for comment\n")); } #endif ctxt->xpathCtxt->contextSize = nbchild; ctxt->xpathCtxt->proximityPosition = childno; /* * Instantiate the xsl:template. */ xsltApplyXSLTTemplate(ctxt, cur, template->content, template, params); } break; default: break; } cur = cur->next; } ctxt->xpathCtxt->contextSize = oldSize; ctxt->xpathCtxt->proximityPosition = oldPos; }
0
[]
libxslt
e03553605b45c88f0b4b2980adfbbb8f6fca2fd6
52,886,267,465,473,320,000,000,000,000,000,000,000
235
Fix security framework bypass xsltCheckRead and xsltCheckWrite return -1 in case of error but callers don't check for this condition and allow access. With a specially crafted URL, xsltCheckRead could be tricked into returning an error because of a supposedly invalid URL that would still be loaded succesfully later on. Fixes #12. Thanks to Felix Wilhelm for the report.
static void handle_RMD(ctrl_t *ctrl, char *arg) { handle_DELE(ctrl, arg); }
0
[ "CWE-120", "CWE-787" ]
uftpd
0fb2c031ce0ace07cc19cd2cb2143c4b5a63c9dd
286,084,565,394,783,000,000,000,000,000,000,000,000
4
FTP: Fix buffer overflow in PORT parser, reported by Aaron Esau Signed-off-by: Joachim Nilsson <[email protected]>
static void b43_short_slot_timing_disable(struct b43_wldev *dev) { b43_set_slot_time(dev, 20); }
0
[ "CWE-134" ]
wireless
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
120,991,387,906,729,580,000,000,000,000,000,000,000
4
b43: stop format string leaking into error msgs The module parameter "fwpostfix" is userspace controllable, unfiltered, and is used to define the firmware filename. b43_do_request_fw() populates ctx->errors[] on error, containing the firmware filename. b43err() parses its arguments as a format string. For systems with b43 hardware, this could lead to a uid-0 to ring-0 escalation. CVE-2013-2852 Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: John W. Linville <[email protected]>
static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { if (refcount_dec_and_test(&tp->refcnt)) tcf_proto_destroy(tp, rtnl_held, true, extack); }
0
[ "CWE-416" ]
linux
04c2a47ffb13c29778e2a14e414ad4cb5a5db4b5
90,178,259,932,599,340,000,000,000,000,000,000,000
6
net: sched: fix use-after-free in tc_new_tfilter() Whenever tc_new_tfilter() jumps back to replay: label, we need to make sure @q and @chain local variables are cleared again, or risk use-after-free as in [1] For consistency, apply the same fix in tc_ctl_chain() BUG: KASAN: use-after-free in mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 Write of size 8 at addr ffff8880985c4b08 by task syz-executor.4/1945 CPU: 0 PID: 1945 Comm: syz-executor.4 Not tainted 5.17.0-rc1-syzkaller-00495-gff58831fa02d #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0x8d/0x336 mm/kasan/report.c:255 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:459 mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 tcf_chain_head_change_item net/sched/cls_api.c:372 [inline] tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:386 tcf_chain_tp_insert net/sched/cls_api.c:1657 [inline] tcf_chain_tp_insert_unique net/sched/cls_api.c:1707 [inline] tc_new_tfilter+0x1e67/0x2350 net/sched/cls_api.c:2086 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f2647172059 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f2645aa5168 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00007f2647285100 RCX: 00007f2647172059 RDX: 040000000000009f RSI: 00000000200002c0 RDI: 0000000000000006 RBP: 00007f26471cc08d R08: 0000000000000000 R09: 0000000000000000 R10: 9e00000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007fffb3f7f02f R14: 00007f2645aa5300 R15: 0000000000022000 </TASK> Allocated by task 1944: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:45 [inline] set_alloc_info mm/kasan/common.c:436 [inline] ____kasan_kmalloc mm/kasan/common.c:515 [inline] ____kasan_kmalloc mm/kasan/common.c:474 [inline] __kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:524 kmalloc_node include/linux/slab.h:604 [inline] kzalloc_node include/linux/slab.h:726 [inline] qdisc_alloc+0xac/0xa10 net/sched/sch_generic.c:941 qdisc_create.constprop.0+0xce/0x10f0 net/sched/sch_api.c:1211 tc_modify_qdisc+0x4c5/0x1980 net/sched/sch_api.c:1660 rtnetlink_rcv_msg+0x413/0xb80 net/core/rtnetlink.c:5592 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Freed by task 3609: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track+0x21/0x30 mm/kasan/common.c:45 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free+0x130/0x160 mm/kasan/common.c:328 kasan_slab_free include/linux/kasan.h:236 [inline] slab_free_hook mm/slub.c:1728 [inline] slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1754 slab_free mm/slub.c:3509 [inline] kfree+0xcb/0x280 mm/slub.c:4562 rcu_do_batch kernel/rcu/tree.c:2527 [inline] rcu_core+0x7b8/0x1540 kernel/rcu/tree.c:2778 __do_softirq+0x29b/0x9c2 kernel/softirq.c:558 Last potentially related work creation: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 __kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348 __call_rcu kernel/rcu/tree.c:3026 [inline] call_rcu+0xb1/0x740 kernel/rcu/tree.c:3106 qdisc_put_unlocked+0x6f/0x90 net/sched/sch_generic.c:1109 tcf_block_release+0x86/0x90 net/sched/cls_api.c:1238 tc_new_tfilter+0xc0d/0x2350 net/sched/cls_api.c:2148 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff8880985c4800 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 776 bytes inside of 1024-byte region [ffff8880985c4800, ffff8880985c4c00) The buggy address belongs to the page: page:ffffea0002617000 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x985c0 head:ffffea0002617000 order:3 compound_mapcount:0 compound_pincount:0 flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff) raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888010c41dc0 raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Unmovable, gfp_mask 0x1d20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC|__GFP_HARDWALL), pid 1941, ts 1038999441284, free_ts 1033444432829 prep_new_page mm/page_alloc.c:2434 [inline] get_page_from_freelist+0xa72/0x2f50 mm/page_alloc.c:4165 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5389 alloc_pages+0x1aa/0x310 mm/mempolicy.c:2271 alloc_slab_page mm/slub.c:1799 [inline] allocate_slab mm/slub.c:1944 [inline] new_slab+0x28a/0x3b0 mm/slub.c:2004 ___slab_alloc+0x87c/0xe90 mm/slub.c:3018 __slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3105 slab_alloc_node mm/slub.c:3196 [inline] slab_alloc mm/slub.c:3238 [inline] __kmalloc+0x2fb/0x340 mm/slub.c:4420 kmalloc include/linux/slab.h:586 [inline] kzalloc include/linux/slab.h:715 [inline] __register_sysctl_table+0x112/0x1090 fs/proc/proc_sysctl.c:1335 neigh_sysctl_register+0x2c8/0x5e0 net/core/neighbour.c:3787 devinet_sysctl_register+0xb1/0x230 net/ipv4/devinet.c:2618 inetdev_init+0x286/0x580 net/ipv4/devinet.c:278 inetdev_event+0xa8a/0x15d0 net/ipv4/devinet.c:1532 notifier_call_chain+0xb5/0x200 kernel/notifier.c:84 call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:1919 call_netdevice_notifiers_extack net/core/dev.c:1931 [inline] call_netdevice_notifiers net/core/dev.c:1945 [inline] register_netdevice+0x1073/0x1500 net/core/dev.c:9698 veth_newlink+0x59c/0xa90 drivers/net/veth.c:1722 page last free stack trace: reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1352 [inline] free_pcp_prepare+0x374/0x870 mm/page_alloc.c:1404 free_unref_page_prepare mm/page_alloc.c:3325 [inline] free_unref_page+0x19/0x690 mm/page_alloc.c:3404 release_pages+0x748/0x1220 mm/swap.c:956 tlb_batch_pages_flush mm/mmu_gather.c:50 [inline] tlb_flush_mmu_free mm/mmu_gather.c:243 [inline] tlb_flush_mmu+0xe9/0x6b0 mm/mmu_gather.c:250 zap_pte_range mm/memory.c:1441 [inline] zap_pmd_range mm/memory.c:1490 [inline] zap_pud_range mm/memory.c:1519 [inline] zap_p4d_range mm/memory.c:1540 [inline] unmap_page_range+0x1d1d/0x2a30 mm/memory.c:1561 unmap_single_vma+0x198/0x310 mm/memory.c:1606 unmap_vmas+0x16b/0x2f0 mm/memory.c:1638 exit_mmap+0x201/0x670 mm/mmap.c:3178 __mmput+0x122/0x4b0 kernel/fork.c:1114 mmput+0x56/0x60 kernel/fork.c:1135 exit_mm kernel/exit.c:507 [inline] do_exit+0xa3c/0x2a30 kernel/exit.c:793 do_group_exit+0xd2/0x2f0 kernel/exit.c:935 __do_sys_exit_group kernel/exit.c:946 [inline] __se_sys_exit_group kernel/exit.c:944 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:944 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Memory state around the buggy address: ffff8880985c4a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8880985c4b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8880985c4b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc Fixes: 470502de5bdb ("net: sched: unlock rules update API") Signed-off-by: Eric Dumazet <[email protected]> Cc: Vlad Buslov <[email protected]> Cc: Jiri Pirko <[email protected]> Cc: Cong Wang <[email protected]> Reported-by: syzbot <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
size_t SSL_get_peer_finished(const SSL *s, void *buf, size_t count) { size_t ret = 0; if (s->s3 != NULL) { ret = s->s3->tmp.peer_finish_md_len; if (count > ret) count = ret; memcpy(buf, s->s3->tmp.peer_finish_md, count); } return ret; }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
98,897,796,779,007,380,000,000,000,000,000,000,000
13
Add Next Protocol Negotiation.
int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = ext4_journal_current_handle(); int ret = 0, started = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; if (create && !handle) { /* Direct IO write... */ if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } started = 1; } ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, create ? EXT4_GET_BLOCKS_CREATE : 0); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } if (started) ext4_journal_stop(handle); out: return ret; }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
330,215,994,617,483,450,000,000,000,000,000,000,000
32
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
evbuffer_expand(struct evbuffer *buf, size_t datlen) { struct evbuffer_chain *chain; EVBUFFER_LOCK(buf); chain = evbuffer_expand_singlechain(buf, datlen); EVBUFFER_UNLOCK(buf); return chain ? 0 : -1; }
0
[ "CWE-189" ]
libevent
20d6d4458bee5d88bda1511c225c25b2d3198d6c
271,237,291,003,032,850,000,000,000,000,000,000,000
9
Fix CVE-2014-6272 in Libevent 2.0 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, struct Qdisc *q) { if (unlikely(skb->next)) dev->gso_skb = skb; else q->ops->requeue(skb, q); netif_schedule(dev); return 0; }
0
[ "CWE-399" ]
linux-2.6
2ba2506ca7ca62c56edaa334b0fe61eb5eab6ab0
223,019,746,771,586,040,000,000,000,000,000,000,000
11
[NET]: Add preemption point in qdisc_run The qdisc_run loop is currently unbounded and runs entirely in a softirq. This is bad as it may create an unbounded softirq run. This patch fixes this by calling need_resched and breaking out if necessary. It also adds a break out if the jiffies value changes since that would indicate we've been transmitting for too long which starves other softirqs. Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int ssl2_shutdown(SSL *s) { s->shutdown = (SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN); return (1); }
0
[ "CWE-20" ]
openssl
86f8fb0e344d62454f8daf3e15236b2b59210756
177,812,147,112,798,140,000,000,000,000,000,000,000
5
Fix reachable assert in SSLv2 servers. This assert is reachable for servers that support SSLv2 and export ciphers. Therefore, such servers can be DoSed by sending a specially crafted SSLv2 CLIENT-MASTER-KEY. Also fix s2_srvr.c to error out early if the key lengths are malformed. These lengths are sent unencrypted, so this does not introduce an oracle. CVE-2015-0293 This issue was discovered by Sean Burford (Google) and Emilia Käsper of the OpenSSL development team. Reviewed-by: Richard Levitte <[email protected]> Reviewed-by: Tim Hudson <[email protected]>
add_tree_first_ipv4(const u_char *data, const int len, const int datalink) { tcpr_tree_t *newnode, *findnode; const u_char *packet = data; uint32_t _U_ vlan_offset; uint32_t pkt_len = len; uint16_t ether_type; uint32_t l2offset; ipv4_hdr_t ip_hdr; uint32_t l2len; int res; assert(packet); res = get_l2len_protocol(packet, pkt_len, datalink, &ether_type, &l2len, &l2offset, &vlan_offset); if (res == -1 || len < (TCPR_ETH_H + TCPR_IPV4_H)) { errx(-1, "Capture length %d too small for IPv4 parsing", len); return; } packet += l2offset; l2len -= l2offset; pkt_len -= l2offset; /* * first add/find the source IP/client */ newnode = new_tree(); /* prevent issues with byte alignment, must memcpy */ memcpy(&ip_hdr, (packet + TCPR_ETH_H), TCPR_IPV4_H); /* copy over the source ip, and values to guarantee this a client */ newnode->family = AF_INET; newnode->u.ip = ip_hdr.ip_src.s_addr; newnode->type = DIR_CLIENT; newnode->client_cnt = 1000; findnode = RB_FIND(tcpr_data_tree_s, &treeroot, newnode); /* if we didn't find it, add it to the tree, else free it */ if (findnode == NULL) { RB_INSERT(tcpr_data_tree_s, &treeroot, newnode); } else { safe_free(newnode); } /* * now add/find the destination IP/server */ newnode = new_tree(); memcpy(&ip_hdr, (packet + TCPR_ETH_H), TCPR_IPV4_H); newnode->family = AF_INET; newnode->u.ip = ip_hdr.ip_dst.s_addr; newnode->type = DIR_SERVER; newnode->server_cnt = 1000; findnode = RB_FIND(tcpr_data_tree_s, &treeroot, newnode); if (findnode == NULL) { RB_INSERT(tcpr_data_tree_s, &treeroot, newnode); } else { safe_free(newnode); } }
1
[ "CWE-476" ]
tcpreplay
46cf964a7db636da76abeebf10482acf6f682a87
100,694,689,434,345,540,000,000,000,000,000,000,000
71
Bug #677 - fixes for tcpprep tree
static int ZEND_FASTCALL ZEND_INIT_ARRAY_SPEC_VAR_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); array_init(&EX_T(opline->result.u.var).tmp_var); if (IS_VAR == IS_UNUSED) { ZEND_VM_NEXT_OPCODE(); #if 0 || IS_VAR != IS_UNUSED } else { return ZEND_ADD_ARRAY_ELEMENT_SPEC_VAR_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); #endif } }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
312,607,084,606,594,800,000,000,000,000,000,000,000
13
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
srtp_protect_aead (srtp_ctx_t *ctx, srtp_stream_ctx_t *stream, void *rtp_hdr, unsigned int *pkt_octet_len) { srtp_hdr_t *hdr = (srtp_hdr_t*)rtp_hdr; uint32_t *enc_start; /* pointer to start of encrypted portion */ unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */ xtd_seq_num_t est; /* estimated xtd_seq_num_t of *hdr */ int delta; /* delta of local pkt idx and that in hdr */ err_status_t status; int tag_len; v128_t iv; unsigned int aad_len; debug_print(mod_srtp, "function srtp_protect_aead", NULL); /* * update the key usage limit, and check it to make sure that we * didn't just hit either the soft limit or the hard limit, and call * the event handler if we hit either. */ switch (key_limit_update(stream->limit)) { case key_event_normal: break; case key_event_hard_limit: srtp_handle_event(ctx, stream, event_key_hard_limit); return err_status_key_expired; case key_event_soft_limit: default: srtp_handle_event(ctx, stream, event_key_soft_limit); break; } /* get tag length from stream */ tag_len = auth_get_tag_length(stream->rtp_auth); /* * find starting point for encryption and length of data to be * encrypted - the encrypted portion starts after the rtp header * extension, if present; otherwise, it starts after the last csrc, * if any are present */ enc_start = (uint32_t*)hdr + uint32s_in_rtp_header + hdr->cc; if (hdr->x == 1) { srtp_hdr_xtnd_t *xtn_hdr = (srtp_hdr_xtnd_t*)enc_start; enc_start += (ntohs(xtn_hdr->length) + 1); } if (!((uint8_t*)enc_start < (uint8_t*)hdr + (*pkt_octet_len - tag_len))) return err_status_parse_err; enc_octet_len = (unsigned int)(*pkt_octet_len - ((uint8_t*)enc_start - (uint8_t*)hdr)); /* * estimate the packet index using the start of the replay window * and the sequence number from the header */ delta = rdbx_estimate_index(&stream->rtp_rdbx, &est, ntohs(hdr->seq)); status = rdbx_check(&stream->rtp_rdbx, delta); if (status) { if (status != err_status_replay_fail || !stream->allow_repeat_tx) { return status; /* we've been asked to reuse an index */ } } else { rdbx_add_index(&stream->rtp_rdbx, delta); } #ifdef NO_64BIT_MATH debug_print2(mod_srtp, "estimated packet index: %08x%08x", high32(est), low32(est)); #else debug_print(mod_srtp, "estimated packet index: %016llx", est); #endif /* * AEAD uses a new IV formation method */ srtp_calc_aead_iv(stream, &iv, &est, hdr); status = cipher_set_iv(stream->rtp_cipher, &iv, direction_encrypt); if (status) { return err_status_cipher_fail; } /* shift est, put into network byte order */ #ifdef NO_64BIT_MATH est = be64_to_cpu(make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16)); #else est = be64_to_cpu(est << 16); #endif /* * Set the AAD over the RTP header */ aad_len = (uint8_t *)enc_start - (uint8_t *)hdr; status = cipher_set_aad(stream->rtp_cipher, (uint8_t*)hdr, aad_len); if (status) { return ( err_status_cipher_fail); } /* Encrypt the payload */ status = cipher_encrypt(stream->rtp_cipher, (uint8_t*)enc_start, &enc_octet_len); if (status) { return err_status_cipher_fail; } /* * If we're doing GCM, we need to get the tag * and append that to the output */ status = cipher_get_tag(stream->rtp_cipher, (uint8_t*)enc_start+enc_octet_len, &tag_len); if (status) { return ( err_status_cipher_fail); } enc_octet_len += tag_len; /* increase the packet length by the length of the auth tag */ *pkt_octet_len += tag_len; return err_status_ok; }
1
[ "CWE-119" ]
libsrtp
be95365fbb4788b688cab7af61c65b7989055fb4
88,340,226,123,085,920,000,000,000,000,000,000,000
121
Apply a7216c19582827cdbd5cacce5618db0b6be8d851 to 1.5.x throttle
TEST_F(OAuth2Test, OAuthErrorQueryString) { Http::TestRequestHeaderMapImpl request_headers{ {Http::Headers::get().Path.get(), "/_oauth?error=someerrorcode"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, }; Http::TestResponseHeaderMapImpl response_headers{ {Http::Headers::get().Status.get(), "401"}, {Http::Headers::get().ContentLength.get(), "18"}, // unauthorizedBodyMessage() {Http::Headers::get().ContentType.get(), "text/plain"}, }; EXPECT_CALL(*validator_, setParams(_, _)); EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false)); EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ(scope_.counterFromString("test.oauth_failure").value(), 1); EXPECT_EQ(scope_.counterFromString("test.oauth_success").value(), 0); }
0
[ "CWE-416" ]
envoy
7ffda4e809dec74449ebc330cebb9d2f4ab61360
97,175,582,113,237,600,000,000,000,000,000,000,000
25
oauth2: do not blindly accept requests with a token in the Authorization headera (781) The logic was broken because it assumed an additional call would be performed to the auth server, which isn't the case. Per the filter documentation, a request is only considered subsequently authenticated if there's valid cookie that was set after the access token was received from the auth server: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/oauth2_filter More info about how to validate an access token (which we don't do, per above): https://www.oauth.com/oauth2-servers/token-introspection-endpoint/ https://datatracker.ietf.org/doc/html/rfc7662 Also fix the fact that ee shouldn't be calling continueDecoding() after decoder_callbacks_->encodeHeaders(). Signed-off-by: Raul Gutierrez Segales <[email protected]> Signed-off-by: Matt Klein <[email protected]> Signed-off-by: Pradeep Rao <[email protected]>
repinfo_copy_rrsets(struct reply_info* dest, struct reply_info* from, struct regional* region) { size_t i, s; struct packed_rrset_data* fd, *dd; struct ub_packed_rrset_key* fk, *dk; for(i=0; i<dest->rrset_count; i++) { fk = from->rrsets[i]; dk = dest->rrsets[i]; fd = (struct packed_rrset_data*)fk->entry.data; dk->entry.hash = fk->entry.hash; dk->rk = fk->rk; if(region) { dk->id = fk->id; dk->rk.dname = (uint8_t*)regional_alloc_init(region, fk->rk.dname, fk->rk.dname_len); } else dk->rk.dname = (uint8_t*)memdup(fk->rk.dname, fk->rk.dname_len); if(!dk->rk.dname) return 0; s = packed_rrset_sizeof(fd); if(region) dd = (struct packed_rrset_data*)regional_alloc_init( region, fd, s); else dd = (struct packed_rrset_data*)memdup(fd, s); if(!dd) return 0; packed_rrset_ptr_fixup(dd); dk->entry.data = (void*)dd; } return 1; }
0
[ "CWE-787" ]
unbound
6c3a0b54ed8ace93d5b5ca7b8078dc87e75cd640
132,811,705,259,801,660,000,000,000,000,000,000,000
33
- Fix Out of Bound Write Compressed Names in rdata_copy(), reported by X41 D-Sec.
void Downstream::set_request_downstream_host(const StringRef &host) { request_downstream_host_ = host; }
0
[]
nghttp2
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
166,039,217,095,061,890,000,000,000,000,000,000,000
3
nghttpx: Fix request stall Fix request stall if backend connection is reused and buffer is full.
S_invlist_previous_index(SV* const invlist) { /* Returns cached index of previous search */ PERL_ARGS_ASSERT_INVLIST_PREVIOUS_INDEX; return *get_invlist_previous_index_addr(invlist); }
0
[ "CWE-125" ]
perl5
43b2f4ef399e2fd7240b4eeb0658686ad95f8e62
71,281,672,501,605,660,000,000,000,000,000,000,000
8
regcomp.c: Convert some strchr to memchr This allows things to work properly in the face of embedded NULs. See the branch merge message for more information.
u32 __tcp_select_window(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* MSS for the peer's data. Previous versions used mss_clamp * here. I don't know if the value based on our guesses * of peer's MSS is better for the performance. It's more correct * but may be worse for the performance because of rcv_mss * fluctuations. --SAW 1998/11/1 */ int mss = icsk->icsk_ack.rcv_mss; int free_space = tcp_space(sk); int allowed_space = tcp_full_space(sk); int full_space = min_t(int, tp->window_clamp, allowed_space); int window; if (unlikely(mss > full_space)) { mss = full_space; if (mss <= 0) return 0; } if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); /* free_space might become our new window, make sure we don't * increase it due to wscale. */ free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); /* if free space is less than mss estimate, or is below 1/16th * of the maximum allowed, try to move to zero-window, else * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and * new incoming data is dropped due to memory limits. * With large window, mss test triggers way too late in order * to announce zero window in time before rmem limit kicks in. */ if (free_space < (allowed_space >> 4) || free_space < mss) return 0; } if (free_space > tp->rcv_ssthresh) free_space = tp->rcv_ssthresh; /* Don't do rounding if we are using window scaling, since the * scaled window will not line up with the MSS boundary anyway. */ if (tp->rx_opt.rcv_wscale) { window = free_space; /* Advertise enough space so that it won't get scaled away. * Import case: prevent zero window announcement if * 1<<rcv_wscale > mss. */ window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); } else { window = tp->rcv_wnd; /* Get the largest window that is a nice multiple of mss. * Window clamp already applied above. * If our current window offering is within 1 mss of the * free space we just keep it. This prevents the divide * and multiply from happening most of the time. * We also don't do any window rounding when the free space * is too small. */ if (window <= free_space - mss || window > free_space) window = rounddown(free_space, mss); else if (mss == full_space && free_space > window + (full_space >> 1)) window = free_space; } return window; }
0
[ "CWE-190" ]
net
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
27,062,373,953,850,233,000,000,000,000,000,000,000
77
tcp: limit payload size of sacked skbs Jonathan Looney reported that TCP can trigger the following crash in tcp_shifted_skb() : BUG_ON(tcp_skb_pcount(skb) < pcount); This can happen if the remote peer has advertized the smallest MSS that linux TCP accepts : 48 An skb can hold 17 fragments, and each fragment can hold 32KB on x86, or 64KB on PowerPC. This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs can overflow. Note that tcp_sendmsg() builds skbs with less than 64KB of payload, so this problem needs SACK to be enabled. SACK blocks allow TCP to coalesce multiple skbs in the retransmit queue, thus filling the 17 fragments to maximal capacity. CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Looney <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Tyler Hicks <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Bruce Curtis <[email protected]> Cc: Jonathan Lemon <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static DnsStream *dns_stream_free(DnsStream *s) { DnsPacket *p; Iterator i; assert(s); dns_stream_stop(s); if (s->server && s->server->stream == s) s->server->stream = NULL; if (s->manager) { LIST_REMOVE(streams, s->manager->dns_streams, s); s->manager->n_dns_streams--; } #if ENABLE_DNS_OVER_TLS if (s->encrypted) dnstls_stream_free(s); #endif ORDERED_SET_FOREACH(p, s->write_queue, i) dns_packet_unref(ordered_set_remove(s->write_queue, p)); dns_packet_unref(s->write_packet); dns_packet_unref(s->read_packet); dns_server_unref(s->server); ordered_set_free(s->write_queue); return mfree(s); }
0
[ "CWE-416", "CWE-703" ]
systemd
d973d94dec349fb676fdd844f6fe2ada3538f27c
235,641,531,743,469,300,000,000,000,000,000,000,000
32
resolved: pin stream while calling callbacks for it These callbacks might unref the stream, but we still have to access it, let's hence ref it explicitly. Maybe fixes: #10725
static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) { struct ethtool_rxnfc info; const struct ethtool_ops *ops = dev->ethtool_ops; int ret; void *rule_buf = NULL; if (!ops->get_rxnfc) return -EOPNOTSUPP; if (copy_from_user(&info, useraddr, sizeof(info))) return -EFAULT; if (info.cmd == ETHTOOL_GRXCLSRLALL) { if (info.rule_cnt > 0) { rule_buf = kmalloc(info.rule_cnt * sizeof(u32), GFP_USER); if (!rule_buf) return -ENOMEM; } } ret = ops->get_rxnfc(dev, &info, rule_buf); if (ret < 0) goto err_out; ret = -EFAULT; if (copy_to_user(useraddr, &info, sizeof(info))) goto err_out; if (rule_buf) { useraddr += offsetof(struct ethtool_rxnfc, rule_locs); if (copy_to_user(useraddr, rule_buf, info.rule_cnt * sizeof(u32))) goto err_out; } ret = 0; err_out: kfree(rule_buf); return ret; }
1
[ "CWE-190" ]
linux-2.6
db048b69037e7fa6a7d9e95a1271a50dc08ae233
256,352,693,318,403,330,000,000,000,000,000,000,000
44
ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer overflow and the buffer may be smaller than needed. Since ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at least denial of service. Signed-off-by: Ben Hutchings <[email protected]> Cc: [email protected] Signed-off-by: David S. Miller <[email protected]>
dt_lite_to_s(VALUE self) { return strftimev("%Y-%m-%dT%H:%M:%S%:z", self, set_tmx); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
70,650,362,064,189,440,000,000,000,000,000,000,000
4
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); }
0
[ "CWE-362", "CWE-399" ]
linux
49d31c2f389acfe83417083e1208422b4091cd9e
69,434,350,152,555,590,000,000,000,000,000,000,000
6
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <[email protected]>
xmlBufResize(xmlBufPtr buf, size_t size) { unsigned int newSize; xmlChar* rebuf = NULL; size_t start_buf; if ((buf == NULL) || (buf->error)) return(0); CHECK_COMPAT(buf) if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return(0); if (buf->alloc == XML_BUFFER_ALLOC_BOUNDED) { /* * Used to provide parsing limits */ if (size >= XML_MAX_TEXT_LENGTH) { xmlBufMemoryError(buf, "buffer error: text too long\n"); return(0); } } /* Don't resize if we don't have to */ if (size < buf->size) return 1; /* figure out new size */ switch (buf->alloc){ case XML_BUFFER_ALLOC_IO: case XML_BUFFER_ALLOC_DOUBLEIT: /*take care of empty case*/ newSize = (buf->size ? buf->size*2 : size + 10); while (size > newSize) { if (newSize > UINT_MAX / 2) { xmlBufMemoryError(buf, "growing buffer"); return 0; } newSize *= 2; } break; case XML_BUFFER_ALLOC_EXACT: newSize = size+10; break; case XML_BUFFER_ALLOC_HYBRID: if (buf->use < BASE_BUFFER_SIZE) newSize = size; else { newSize = buf->size * 2; while (size > newSize) { if (newSize > UINT_MAX / 2) { xmlBufMemoryError(buf, "growing buffer"); return 0; } newSize *= 2; } } break; default: newSize = size+10; break; } if ((buf->alloc == XML_BUFFER_ALLOC_IO) && (buf->contentIO != NULL)) { start_buf = buf->content - buf->contentIO; if (start_buf > newSize) { /* move data back to start */ memmove(buf->contentIO, buf->content, buf->use); buf->content = buf->contentIO; buf->content[buf->use] = 0; buf->size += start_buf; } else { rebuf = (xmlChar *) xmlRealloc(buf->contentIO, start_buf + newSize); if (rebuf == NULL) { xmlBufMemoryError(buf, "growing buffer"); return 0; } buf->contentIO = rebuf; buf->content = rebuf + start_buf; } } else { if (buf->content == NULL) { rebuf = (xmlChar *) xmlMallocAtomic(newSize); } else if (buf->size - buf->use < 100) { rebuf = (xmlChar *) xmlRealloc(buf->content, newSize); } else { /* * if we are reallocating a buffer far from being full, it's * better to make a new allocation and copy only the used range * and free the old one. */ rebuf = (xmlChar *) xmlMallocAtomic(newSize); if (rebuf != NULL) { memcpy(rebuf, buf->content, buf->use); xmlFree(buf->content); rebuf[buf->use] = 0; } } if (rebuf == NULL) { xmlBufMemoryError(buf, "growing buffer"); return 0; } buf->content = rebuf; } buf->size = newSize; UPDATE_COMPAT(buf) return 1; }
1
[ "CWE-190" ]
libxml2
6c283d83eccd940bcde15634ac8c7f100e3caefd
320,104,047,698,532,970,000,000,000,000,000,000,000
109
[CVE-2022-29824] Fix integer overflows in xmlBuf and xmlBuffer In several places, the code handling string buffers didn't check for integer overflow or used wrong types for buffer sizes. This could result in out-of-bounds writes or other memory errors when working on large, multi-gigabyte buffers. Thanks to Felix Wilhelm for the report.
circle_diameter(PG_FUNCTION_ARGS) { CIRCLE *circle = PG_GETARG_CIRCLE_P(0); PG_RETURN_FLOAT8(2 * circle->radius); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
105,949,840,910,826,460,000,000,000,000,000,000,000
6
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
static struct array_cache *alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) { size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *ac = NULL; ac = kmalloc_node(memsize, gfp, node); init_arraycache(ac, entries, batchcount); return ac; }
0
[ "CWE-703" ]
linux
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
14,528,903,392,654,502,000,000,000,000,000,000,000
10
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: John Sperbeck <[email protected]> Signed-off-by: Thomas Garnier <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TfLiteRegistration* Register_SPACE_TO_BATCH_ND() { // return Register_SPACE_TO_BATCH_ND_REF(); return Register_SPACE_TO_BATCH_ND_GENERIC_OPT(); }
0
[ "CWE-369" ]
tensorflow
6d36ba65577006affb272335b7c1abd829010708
146,264,596,586,993,180,000,000,000,000,000,000,000
4
Prevent division by 0 PiperOrigin-RevId: 370984990 Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) { struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index); hrtimer_cancel(&stimer->timer); clear_bit(stimer->index, to_hv_vcpu(vcpu)->stimer_pending_bitmap); stimer->msg_pending = false; stimer->exp_time = 0; }
0
[ "CWE-476" ]
linux
919f4ebc598701670e80e31573a58f1f2d2bf918
288,630,239,026,670,400,000,000,000,000,000,000,000
13
KVM: x86: hyper-v: Fix Hyper-V context null-ptr-deref Reported by syzkaller: KASAN: null-ptr-deref in range [0x0000000000000140-0x0000000000000147] CPU: 1 PID: 8370 Comm: syz-executor859 Not tainted 5.11.0-syzkaller #0 RIP: 0010:synic_get arch/x86/kvm/hyperv.c:165 [inline] RIP: 0010:kvm_hv_set_sint_gsi arch/x86/kvm/hyperv.c:475 [inline] RIP: 0010:kvm_hv_irq_routing_update+0x230/0x460 arch/x86/kvm/hyperv.c:498 Call Trace: kvm_set_irq_routing+0x69b/0x940 arch/x86/kvm/../../../virt/kvm/irqchip.c:223 kvm_vm_ioctl+0x12d0/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3959 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Hyper-V context is lazily allocated until Hyper-V specific MSRs are accessed or SynIC is enabled. However, the syzkaller testcase sets irq routing table directly w/o enabling SynIC. This results in null-ptr-deref when accessing SynIC Hyper-V context. This patch fixes it. syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=163342ccd00000 Reported-by: [email protected] Fixes: 8f014550dfb1 ("KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional") Signed-off-by: Wanpeng Li <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, afi_t afi, safi_t safi, struct bpacket_attr_vec_arr *vecarr, struct attr *attr) { size_t sizep; iana_afi_t pkt_afi; iana_safi_t pkt_safi; afi_t nh_afi; /* Set extended bit always to encode the attribute length as 2 bytes */ stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_EXTLEN); stream_putc(s, BGP_ATTR_MP_REACH_NLRI); sizep = stream_get_endp(s); stream_putw(s, 0); /* Marker: Attribute length. */ /* Convert AFI, SAFI to values for packet. */ bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi); stream_putw(s, pkt_afi); /* AFI */ stream_putc(s, pkt_safi); /* SAFI */ /* Nexthop AFI */ if (afi == AFI_IP && (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST)) nh_afi = peer_cap_enhe(peer, afi, safi) ? AFI_IP6 : AFI_IP; else nh_afi = BGP_NEXTHOP_AFI_FROM_NHLEN(attr->mp_nexthop_len); /* Nexthop */ bpacket_attr_vec_arr_set_vec(vecarr, BGP_ATTR_VEC_NH, s, attr); switch (nh_afi) { case AFI_IP: switch (safi) { case SAFI_UNICAST: case SAFI_MULTICAST: case SAFI_LABELED_UNICAST: stream_putc(s, 4); stream_put_ipv4(s, attr->nexthop.s_addr); break; case SAFI_MPLS_VPN: stream_putc(s, 12); stream_putl(s, 0); /* RD = 0, per RFC */ stream_putl(s, 0); stream_put(s, &attr->mp_nexthop_global_in, 4); break; case SAFI_ENCAP: case SAFI_EVPN: stream_putc(s, 4); stream_put(s, &attr->mp_nexthop_global_in, 4); break; case SAFI_FLOWSPEC: stream_putc(s, 0); /* no nexthop for flowspec */ default: break; } break; case AFI_IP6: switch (safi) { case SAFI_UNICAST: case SAFI_MULTICAST: case SAFI_LABELED_UNICAST: case SAFI_EVPN: { if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { stream_putc(s, BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL); stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN); stream_put(s, &attr->mp_nexthop_local, IPV6_MAX_BYTELEN); } else { stream_putc(s, IPV6_MAX_BYTELEN); stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN); } } break; case SAFI_MPLS_VPN: { if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL) { stream_putc(s, 24); stream_putl(s, 0); /* RD = 0, per RFC */ stream_putl(s, 0); stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN); } else if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { stream_putc(s, 48); stream_putl(s, 0); /* RD = 0, per RFC */ stream_putl(s, 0); stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN); stream_putl(s, 0); /* RD = 0, per RFC */ stream_putl(s, 0); stream_put(s, &attr->mp_nexthop_local, IPV6_MAX_BYTELEN); } } break; case SAFI_ENCAP: stream_putc(s, IPV6_MAX_BYTELEN); stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN); break; case SAFI_FLOWSPEC: stream_putc(s, 0); /* no nexthop for flowspec */ default: break; } break; default: if (safi != SAFI_FLOWSPEC) flog_err( EC_BGP_ATTR_NH_SEND_LEN, "Bad nexthop when sending to %s, AFI %u SAFI %u nhlen %d", peer->host, afi, safi, attr->mp_nexthop_len); break; } /* SNPA */ stream_putc(s, 0); return sizep; }
0
[ "CWE-20", "CWE-436" ]
frr
943d595a018e69b550db08cccba1d0778a86705a
79,299,797,337,651,340,000,000,000,000,000,000,000
122
bgpd: don't use BGP_ATTR_VNC(255) unless ENABLE_BGP_VNC_ATTR is defined Signed-off-by: Lou Berger <[email protected]>
static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { int i; bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { vfio_msi_disable(vdev, msix); return 0; } if (!(irq_is(vdev, index) || is_irq_none(vdev))) return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t *fds = data; int ret; if (vdev->irq_type == index) return vfio_msi_set_block(vdev, start, count, fds, msix); ret = vfio_msi_enable(vdev, start + count, msix); if (ret) return ret; ret = vfio_msi_set_block(vdev, start, count, fds, msix); if (ret) vfio_msi_disable(vdev, msix); return ret; } if (!irq_is(vdev, index) || start + count > vdev->num_ctx) return -EINVAL; for (i = start; i < start + count; i++) { if (!vdev->ctx[i].trigger) continue; if (flags & VFIO_IRQ_SET_DATA_NONE) { eventfd_signal(vdev->ctx[i].trigger, 1); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t *bools = data; if (bools[i - start]) eventfd_signal(vdev->ctx[i].trigger, 1); } } return 0; }
0
[ "CWE-399", "CWE-190" ]
linux
05692d7005a364add85c6e25a6c4447ce08f913a
252,793,958,502,744,530,000,000,000,000,000,000,000
50
vfio/pci: Fix integer overflows, bitmask check The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize user-supplied integers, potentially allowing memory corruption. This patch adds appropriate integer overflow checks, checks the range bounds for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set. VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in vfio_pci_set_irqs_ioctl(). Furthermore, a kzalloc is changed to a kcalloc because the use of a kzalloc with an integer multiplication allowed an integer overflow condition to be reached without this patch. kcalloc checks for overflow and should prevent a similar occurrence. Signed-off-by: Vlad Tsyrklevich <[email protected]> Signed-off-by: Alex Williamson <[email protected]>
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { struct keyring_search_context ctx = { .match_data.cmp = lookup_user_key_possessed, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_NO_STATE_CHECK, }; struct request_key_auth *rka; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: ctx.cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: if (!ctx.cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_thread_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->thread_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: if (!ctx.cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_process_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = ctx.cred->process_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: if (!ctx.cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; if (lflags & KEY_LOOKUP_CREATE) ret = join_session_keyring(NULL); else ret = install_session_keyring( ctx.cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; } else if (ctx.cred->session_keyring == ctx.cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) goto error; goto reget_creds; } rcu_read_lock(); key = rcu_dereference(ctx.cred->session_keyring); __key_get(key); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: if (!ctx.cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->uid_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: if (!ctx.cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = ctx.cred->user->session_keyring; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: key = ctx.cred->request_key_auth; if (!key) goto error; __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: if (!ctx.cred->request_key_auth) goto error; down_read(&ctx.cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, &ctx.cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { rka = ctx.cred->request_key_auth->payload.data[0]; key = rka->dest_keyring; __key_get(key); } up_read(&ctx.cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); break; default: key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error; } key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ ctx.index_key.type = key->type; ctx.index_key.description = key->description; ctx.index_key.desc_len = strlen(key->description); ctx.match_data.raw_data = key; kdebug("check possessed"); skey_ref = search_process_keyrings(&ctx); kdebug("possessed=%p", skey_ref); if (!IS_ERR(skey_ref)) { key_put(key); key_ref = skey_ref; } break; } /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ if (lflags & KEY_LOOKUP_FOR_UNLINK) { ret = 0; goto error; } if (!(lflags & KEY_LOOKUP_PARTIAL)) { ret = wait_for_key_construction(key, true); switch (ret) { case -ERESTARTSYS: goto invalid_key; default: if (perm) goto invalid_key; case 0: break; } } else if (perm) { ret = key_validate(key); if (ret < 0) goto invalid_key; } ret = -EIO; if (!(lflags & KEY_LOOKUP_PARTIAL) && key_read_state(key) == KEY_IS_UNINSTANTIATED) goto invalid_key; /* check the permissions */ ret = key_task_permission(key_ref, ctx.cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: put_cred(ctx.cred); return key_ref; invalid_key: key_ref_put(key_ref); key_ref = ERR_PTR(ret); goto error; /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: put_cred(ctx.cred); goto try_again; }
0
[ "CWE-20" ]
linux
363b02dab09b3226f3bd1420dad9c72b79a42a76
270,789,946,000,235,960,000,000,000,000,000,000,000
225
KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: [email protected] # v4.4+ Reported-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Reviewed-by: Eric Biggers <[email protected]>
static void Rp_exec(js_State *J) { js_RegExp_prototype_exec(J, js_toregexp(J, 0), js_tostring(J, 1)); }
0
[ "CWE-400", "CWE-674", "CWE-787" ]
mujs
00d4606c3baf813b7b1c176823b2729bf51002a2
69,215,517,374,774,240,000,000,000,000,000,000,000
4
Bug 700937: Limit recursion in regexp matcher. Also handle negative return code as an error in the JS bindings.
trnext(t) struct tr *t; { for (;;) { if (!t->gen) { if (t->p == t->pend) return -1; if (t->p < t->pend - 1 && *t->p == '\\') { t->p++; } t->now = *(USTR)t->p++; if (t->p < t->pend - 1 && *t->p == '-') { t->p++; if (t->p < t->pend) { if (t->now > *(USTR)t->p) { t->p++; continue; } t->gen = 1; t->max = *(USTR)t->p++; } } return t->now; } else if (++t->now < t->max) { return t->now; } else { t->gen = 0; return t->max; } } }
0
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
298,914,096,854,153,140,000,000,000,000,000,000,000
32
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static JSValue js_sys_prompt_echo_off(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { Bool echo_off; if (!argc) return GF_JS_EXCEPTION(ctx); echo_off = JS_ToBool(ctx, argv[0]); if (argc<2) gf_prompt_set_echo_off(echo_off); return JS_UNDEFINED; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
215,592,758,371,428,440,000,000,000,000,000,000,000
9
fixed #2138
static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat) { struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); int index; u32 *logical_apic_id_table; int dlid = GET_APIC_LOGICAL_ID(ldr); if (!dlid) return NULL; if (flat) { /* flat */ index = ffs(dlid) - 1; if (index > 7) return NULL; } else { /* cluster */ int cluster = (dlid & 0xf0) >> 4; int apic = ffs(dlid & 0x0f) - 1; if ((apic < 0) || (apic > 7) || (cluster >= 0xf)) return NULL; index = (cluster << 2) + apic; } logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page); return &logical_apic_id_table[index]; }
0
[ "CWE-401" ]
linux
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
211,718,659,853,731,450,000,000,000,000,000,000,000
28
KVM: SVM: Fix potential memory leak in svm_cpu_init() When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually the only possible outcome here. Reviewed-by: Liran Alon <[email protected]> Reviewed-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Miaohe Lin <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } }
1
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
246,450,915,863,881,470,000,000,000,000,000,000,000
15
Fix bug #72262 - do not overflow int
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, ireq->opt); err = net_xmit_eval(err); } return err; }
0
[ "CWE-284" ]
linux
ac6e780070e30e4c35bd395acfe9191e6268bdd3
281,017,652,615,394,050,000,000,000,000,000,000,000
28
tcp: take care of truncations done by sk_filter() With syzkaller help, Marco Grassi found a bug in TCP stack, crashing in tcp_collapse() Root cause is that sk_filter() can truncate the incoming skb, but TCP stack was not really expecting this to happen. It probably was expecting a simple DROP or ACCEPT behavior. We first need to make sure no part of TCP header could be removed. Then we need to adjust TCP_SKB_CB(skb)->end_seq Many thanks to syzkaller team and Marco for giving us a reproducer. Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Marco Grassi <[email protected]> Reported-by: Vladis Dronov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_MEMORY_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; }
0
[]
kvm
0769c5de24621141c953fbe1f943582d37cb4244
137,811,468,008,432,440,000,000,000,000,000,000,000
78
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid" In order to be able to proceed checks on CPU-specific properties within the emulator, function "get_cpuid" is introduced. With "get_cpuid" it is possible to virtually call the guests "cpuid"-opcode without changing the VM's context. [mtosatti: cleanup/beautify code] Signed-off-by: Stephan Baerwolf <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
find_minimal_sface_of_shell(T& D, SFace_visited_hash& Vi) : Base(D), Done(Vi), SORT(D) {}
0
[ "CWE-125" ]
cgal
5a1ab45058112f8647c14c02f58905ecc597ec76
235,129,216,872,096,460,000,000,000,000,000,000,000
2
Fix Nef_3
_outSubqueryScan(StringInfo str, const SubqueryScan *node) { WRITE_NODE_TYPE("SUBQUERYSCAN"); _outScanInfo(str, (const Scan *) node); WRITE_NODE_FIELD(subplan); }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
150,223,053,742,601,510,000,000,000,000,000,000,000
8
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
ext4_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; /* If the file has inline data, no need to do readpages. */ if (ext4_has_inline_data(inode)) return 0; return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); }
0
[ "CWE-416", "CWE-401" ]
linux
4ea99936a1630f51fc3a2d61a58ec4a1c4b7d55a
326,638,903,904,422,730,000,000,000,000,000,000,000
11
ext4: add more paranoia checking in ext4_expand_extra_isize handling It's possible to specify a non-zero s_want_extra_isize via debugging option, and this can cause bad things(tm) to happen when using a file system with an inode size of 128 bytes. Add better checking when the file system is mounted, as well as when we are actually doing the trying to do the inode expansion. Link: https://lore.kernel.org/r/[email protected] Reported-by: [email protected] Reported-by: [email protected] Reported-by: [email protected] Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, size_t len) { if (!len) { return NVME_SUCCESS; } if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { return NVME_DATA_TRAS_ERROR; } qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); return NVME_SUCCESS; }
0
[]
qemu
736b01642d85be832385063f278fe7cd4ffb5221
256,735,362,771,553,900,000,000,000,000,000,000,000
15
hw/nvme: fix CVE-2021-3929 This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the device itself. This still allows DMA to MMIO regions of other devices (e.g. doing P2P DMA to the controller memory buffer of another NVMe device). Fixes: CVE-2021-3929 Reported-by: Qiuhao Li <[email protected]> Reviewed-by: Keith Busch <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Klaus Jensen <[email protected]>
join_read_last_key(JOIN_TAB *tab) { int error; TABLE *table= tab->table; if (!table->file->inited && (error= table->file->ha_index_init(tab->ref.key, tab->sorted))) { (void) report_error(table, error); return 1; } if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; if ((error= table->file->prepare_index_key_scan_map(tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) { report_error(table,error); return -1; } if ((error= table->file->ha_index_read_map(table->record[0], tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts), HA_READ_PREFIX_LAST))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); return -1; /* purecov: inspected */ } return 0; }
0
[ "CWE-89" ]
server
5ba77222e9fe7af8ff403816b5338b18b342053c
181,728,711,006,414,130,000,000,000,000,000,000,000
30
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view if the view has algorithm=temptable it is not updatable, so DEFAULT() for its fields is meaningless, and thus it's NULL or 0/'' for NOT NULL columns.
static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1) - 1; defaultoptions(&h); lua_settop(L, 2); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); luaL_checkstack(L, 1, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); break; } case 'c': { if (size == 0) { if (!lua_isnumber(L, -1)) luaL_error(L, "format `c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); return lua_gettop(L) - 2; }
1
[ "CWE-190", "CWE-787" ]
redis
1eb08bcd4634ae42ec45e8284923ac048beaa4c3
22,297,221,737,219,190,000,000,000,000,000,000,000
65
Security: update Lua struct package for security. During an auditing Apple found that the "struct" Lua package we ship with Redis (http://www.inf.puc-rio.br/~roberto/struct/) contains a security problem. A bound-checking statement fails because of integer overflow. The bug exists since we initially integrated this package with Lua, when scripting was introduced, so every version of Redis with EVAL/EVALSHA capabilities exposed is affected. Instead of just fixing the bug, the library was updated to the latest version shipped by the author.
Value ExpressionRandom::evaluate(const Document& root, Variables* variables) const { return Value(getRandomValue()); }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
200,154,399,937,294,700,000,000,000,000,000,000,000
3
SERVER-49404 Enforce additional checks in $arrayToObject
static u32 mp4box_cleanup(u32 ret_code) { if (mpd_base_urls) { gf_free(mpd_base_urls); mpd_base_urls = NULL; } if (sdp_lines) { gf_free(sdp_lines); sdp_lines = NULL; } if (metas) { u32 i; for (i=0; i<nb_meta_act; i++) { if (metas[i].enc_type) gf_free(metas[i].enc_type); if (metas[i].mime_type) gf_free(metas[i].mime_type); if (metas[i].szName) gf_free(metas[i].szName); if (metas[i].szPath) gf_free(metas[i].szPath); if (metas[i].keep_props) gf_free(metas[i].keep_props); if (metas[i].image_props) { GF_ImageItemProperties *iprops = metas[i].image_props; if (iprops->overlay_offsets) gf_free(iprops->overlay_offsets); if (iprops->aux_urn) gf_free((char *) iprops->aux_urn); if (iprops->aux_data) gf_free((char *) iprops->aux_data); gf_free(iprops); } } gf_free(metas); metas = NULL; } if (tracks) { u32 i; for (i = 0; i<nb_track_act; i++) { if (tracks[i].out_name) gf_free(tracks[i].out_name); if (tracks[i].src_name) gf_free(tracks[i].src_name); if (tracks[i].string) gf_free(tracks[i].string); if (tracks[i].kind_scheme) gf_free(tracks[i].kind_scheme); if (tracks[i].kind_value) gf_free(tracks[i].kind_value); } gf_free(tracks); tracks = NULL; } if (tsel_acts) { gf_free(tsel_acts); tsel_acts = NULL; } if (brand_add) { gf_free(brand_add); brand_add = NULL; } if (brand_rem) { gf_free(brand_rem); brand_rem = NULL; } if (dash_inputs) { u32 i, j; for (i = 0; i<nb_dash_inputs; i++) { GF_DashSegmenterInput *di = &dash_inputs[i]; if (di->nb_baseURL) { for (j = 0; j<di->nb_baseURL; j++) { gf_free(di->baseURL[j]); } gf_free(di->baseURL); } if (di->rep_descs) { for (j = 0; j<di->nb_rep_descs; j++) { gf_free(di->rep_descs[j]); } gf_free(di->rep_descs); } if (di->as_descs) { for (j = 0; j<di->nb_as_descs; j++) { gf_free(di->as_descs[j]); } gf_free(di->as_descs); } if (di->as_c_descs) { for (j = 0; j<di->nb_as_c_descs; j++) { gf_free(di->as_c_descs[j]); } gf_free(di->as_c_descs); } if (di->p_descs) { for (j = 0; j<di->nb_p_descs; j++) { gf_free(di->p_descs[j]); } gf_free(di->p_descs); } if (di->representationID) gf_free(di->representationID); if (di->periodID) gf_free(di->periodID); if (di->xlink) gf_free(di->xlink); if (di->seg_template) gf_free(di->seg_template); if (di->hls_pl) gf_free(di->hls_pl); if (di->source_opts) gf_free(di->source_opts); if (di->filter_chain) gf_free(di->filter_chain); if (di->roles) { for (j = 0; j<di->nb_roles; j++) { gf_free(di->roles[j]); } gf_free(di->roles); } } gf_free(dash_inputs); dash_inputs = NULL; } if (logfile) gf_fclose(logfile); gf_sys_close(); #ifdef GPAC_MEMORY_TRACKING if (mem_track && (gf_memory_size() || gf_file_handles_count() )) { gf_log_set_tool_level(GF_LOG_MEMORY, GF_LOG_INFO); gf_memory_print(); } #endif return ret_code;
0
[ "CWE-787" ]
gpac
4e56ad72ac1afb4e049a10f2d99e7512d7141f9d
264,239,850,165,742,040,000,000,000,000,000,000,000
121
fixed #2216
static int test_unaligned_bulk( struct usbtest_dev *tdev, int pipe, unsigned length, int iterations, unsigned transfer_flags, const char *label) { int retval; struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1, 0, simple_callback); if (!urb) return -ENOMEM; retval = simple_io(tdev, urb, iterations, 0, 0, label); simple_free_urb(urb); return retval; }
0
[ "CWE-476" ]
linux
7c80f9e4a588f1925b07134bb2e3689335f6c6d8
197,877,331,208,853,300,000,000,000,000,000,000,000
19
usb: usbtest: fix NULL pointer dereference If the usbtest driver encounters a device with an IN bulk endpoint but no OUT bulk endpoint, it will try to dereference a NULL pointer (out->desc.bEndpointAddress). The problem can be solved by adding a missing test. Signed-off-by: Alan Stern <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
bool LEX::stmt_create_udf_function(const DDL_options_st &options, enum_sp_aggregate_type agg_type, const Lex_ident_sys_st &name, Item_result return_type, const LEX_CSTRING &soname) { if (stmt_create_function_start(options)) return true; if (unlikely(is_native_function(thd, &name))) { my_error(ER_NATIVE_FCT_NAME_COLLISION, MYF(0), name.str); return true; } sql_command= SQLCOM_CREATE_FUNCTION; udf.name= name; udf.returns= return_type; udf.dl= soname.str; udf.type= agg_type == GROUP_AGGREGATE ? UDFTYPE_AGGREGATE : UDFTYPE_FUNCTION; stmt_create_routine_finalize(); return false; }
0
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
58,847,578,718,747,020,000,000,000,000,000,000,000
23
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <[email protected]>
HTC_Status(enum htc_status_e e) { switch (e) { #define HTC_STATUS(e, n, s, l) \ case HTC_S_ ## e: return (s); #include "tbl/htc.h" default: WRONG("HTC_Status"); } NEEDLESS(return (NULL)); }
0
[ "CWE-617" ]
varnish-cache
2d8fc1a784a1e26d78c30174923a2b14ee2ebf62
283,154,987,654,548,450,000,000,000,000,000,000,000
11
Take sizeof pool_task into account when reserving WS in SES_Wait The assert on WS_ReserveSize() in ses_handle() can not trip because sizeof (struct pool_task) is less than sizeof (struct waited). But to safe guard against future problems if that were to change, this patch makes sure that the session workspace can hold the largest of them before entering the waiter, erroring out if not.
static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } }
0
[ "CWE-20", "CWE-190" ]
tinyexr
a685e3332f61cd4e59324bf3f669d36973d64270
334,068,664,508,901,450,000,000,000,000,000,000,000
80
Make line_no with too large value(2**20) invalid. Fixes #124
static int acm_start_wb(struct acm *acm, struct acm_wb *wb) { int rc; acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { dev_err(&acm->data->dev, "%s - usb_submit_urb(write bulk) failed: %d\n", __func__, rc); acm_write_done(acm, wb); } return rc; }
0
[ "CWE-703" ]
linux
8835ba4a39cf53f705417b3b3a94eb067673f2c9
40,762,902,776,299,246,000,000,000,000,000,000,000
20
USB: cdc-acm: more sanity checking An attack has become available which pretends to be a quirky device circumventing normal sanity checks and crashes the kernel by an insufficient number of interfaces. This patch adds a check to the code path for quirky devices. Signed-off-by: Oliver Neukum <[email protected]> CC: [email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) { if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) return _nfs4_opendata_reclaim_to_nfs4_state(data); return _nfs4_opendata_to_nfs4_state(data); }
0
[ "CWE-119", "CWE-401" ]
linux
7d3e91a89b7adbc2831334def9e494dd9892f9af
103,136,065,957,576,920,000,000,000,000,000,000,000
6
NFSv4: Check for buffer length in __nfs4_get_acl_uncached Commit 1f1ea6c "NFSv4: Fix buffer overflow checking in __nfs4_get_acl_uncached" accidently dropped the checking for too small result buffer length. If someone uses getxattr on "system.nfs4_acl" on an NFSv4 mount supporting ACLs, the ACL has not been cached and the buffer suplied is too short, we still copy the complete ACL, resulting in kernel and user space memory corruption. Signed-off-by: Sven Wegener <[email protected]> Cc: [email protected] Signed-off-by: Trond Myklebust <[email protected]>
psf_close_handle (HANDLE handle) { if (handle == NULL) return 0 ; if (CloseHandle (handle) == 0) return -1 ; return 0 ; } /* psf_close_handle */
0
[ "CWE-369", "CWE-189" ]
libsndfile
725c7dbb95bfaf8b4bb7b04820e3a00cceea9ce6
319,551,029,346,683,130,000,000,000,000,000,000,000
9
src/file_io.c : Prevent potential divide-by-zero. Closes: https://github.com/erikd/libsndfile/issues/92
XML_GetInputContext(XML_Parser parser, int *offset, int *size) { #ifdef XML_CONTEXT_BYTES if (parser == NULL) return NULL; if (eventPtr && buffer) { if (offset != NULL) *offset = (int)(eventPtr - buffer); if (size != NULL) *size = (int)(bufferEnd - buffer); return buffer; } #else (void)parser; (void)offset; (void)size; #endif /* defined XML_CONTEXT_BYTES */ return (char *) 0; }
0
[ "CWE-611" ]
libexpat
c4bf96bb51dd2a1b0e185374362ee136fe2c9d7f
33,389,823,142,760,390,000,000,000,000,000,000,000
19
xmlparse.c: Fix external entity infinite loop bug (CVE-2017-9233)
void mnt_pin(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); mnt->mnt_pinned++; spin_unlock(&vfsmount_lock); }
0
[ "CWE-269" ]
linux-2.6
ee6f958291e2a768fd727e7a67badfff0b67711a
271,180,373,454,972,630,000,000,000,000,000,000,000
6
check privileges before setting mount propagation There's a missing check for CAP_SYS_ADMIN in do_change_type(). Signed-off-by: Miklos Szeredi <[email protected]> Cc: Al Viro <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(ut8 *buffer, st64 sz, st64 buf_offset) { RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; ut16 name_idx; st64 nsz; if (!buffer || ((int) sz) < 4 || buf_offset < 0) { eprintf ("r_bin_Java_read_next_attr_from_buffer: invalid buffer size %d\n", (int) sz); return NULL; } name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; nsz = R_BIN_JAVA_UINT (buffer, offset); offset += 4; char *name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, name_idx); if (!name) { name = strdup ("unknown"); } IFDBG eprintf("r_bin_java_read_next_attr: name_idx = %d is %s\n", name_idx, name); RBinJavaAttrMetas *type_info = r_bin_java_get_attr_type_by_name (name); if (type_info) { IFDBG eprintf("Typeinfo: %s, was %s\n", type_info->name, name); // printf ("SZ %d %d %d\n", nsz, sz, buf_offset); if (nsz > sz) { free (name); return NULL; } if ((attr = type_info->allocs->new_obj (buffer, nsz, buf_offset))) { attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); } } else { eprintf ("r_bin_java_read_next_attr_from_buffer: Cannot find type_info for %s\n", name); } free (name); return attr; }
0
[ "CWE-125" ]
radare2
e9ce0d64faf19fa4e9c260250fbdf25e3c11e152
247,442,855,713,269,700,000,000,000,000,000,000,000
37
Fix #10498 - Fix crash in fuzzed java files (#10511)
lacks_directory_count (NautilusFile *file) { return !file->details->directory_count_is_up_to_date && nautilus_file_should_show_directory_item_count (file); }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
283,634,785,936,165,900,000,000,000,000,000,000,000
5
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
validate_cname_response(struct module_env* env, struct val_env* ve, struct query_info* qchase, struct reply_info* chase_reply, struct key_entry_key* kkey) { uint8_t* wc = NULL; size_t wl; int wc_NSEC_ok = 0; int nsec3s_seen = 0; size_t i; struct ub_packed_rrset_key* s; /* validate the ANSWER section - this will be the CNAME (+DNAME) */ for(i=0; i<chase_reply->an_numrrsets; i++) { s = chase_reply->rrsets[i]; /* Check to see if the rrset is the result of a wildcard * expansion. If so, an additional check will need to be * made in the authority section. */ if(!val_rrset_wildcard(s, &wc, &wl)) { log_nametypeclass(VERB_QUERY, "Cname response has " "inconsistent wildcard sigs:", s->rk.dname, ntohs(s->rk.type), ntohs(s->rk.rrset_class)); chase_reply->security = sec_status_bogus; update_reason_bogus(chase_reply, LDNS_EDE_DNSSEC_BOGUS); return; } /* Refuse wildcarded DNAMEs rfc 4597. * Do not follow a wildcarded DNAME because * its synthesized CNAME expansion is underdefined */ if(qchase->qtype != LDNS_RR_TYPE_DNAME && ntohs(s->rk.type) == LDNS_RR_TYPE_DNAME && wc) { log_nametypeclass(VERB_QUERY, "cannot validate a " "wildcarded DNAME:", s->rk.dname, ntohs(s->rk.type), ntohs(s->rk.rrset_class)); chase_reply->security = sec_status_bogus; update_reason_bogus(chase_reply, LDNS_EDE_DNSSEC_BOGUS); return; } /* If we have found a CNAME, stop looking for one. * The iterator has placed the CNAME chain in correct * order. */ if (ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME) { break; } } /* AUTHORITY section */ for(i=chase_reply->an_numrrsets; i<chase_reply->an_numrrsets+ chase_reply->ns_numrrsets; i++) { s = chase_reply->rrsets[i]; /* If this is a positive wildcard response, and we have a * (just verified) NSEC record, try to use it to 1) prove * that qname doesn't exist and 2) that the correct wildcard * was used. */ if(wc != NULL && ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC) { if(val_nsec_proves_positive_wildcard(s, qchase, wc)) { wc_NSEC_ok = 1; } /* if not, continue looking for proof */ } /* Otherwise, if this is a positive wildcard response and * we have NSEC3 records */ if(wc != NULL && ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC3) { nsec3s_seen = 1; } } /* If this was a positive wildcard response that we haven't already * proven, and we have NSEC3 records, try to prove it using the NSEC3 * records. */ if(wc != NULL && !wc_NSEC_ok && nsec3s_seen) { enum sec_status sec = nsec3_prove_wildcard(env, ve, chase_reply->rrsets+chase_reply->an_numrrsets, chase_reply->ns_numrrsets, qchase, kkey, wc); if(sec == sec_status_insecure) { verbose(VERB_ALGO, "wildcard CNAME response is " "insecure"); chase_reply->security = sec_status_insecure; return; } else if(sec == sec_status_secure) wc_NSEC_ok = 1; } /* If after all this, we still haven't proven the positive wildcard * response, fail. */ if(wc != NULL && !wc_NSEC_ok) { verbose(VERB_QUERY, "CNAME response was wildcard " "expansion and did not prove original data " "did not exist"); chase_reply->security = sec_status_bogus; update_reason_bogus(chase_reply, LDNS_EDE_DNSSEC_BOGUS); return; } verbose(VERB_ALGO, "Successfully validated CNAME response"); chase_reply->security = sec_status_secure; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
225,322,211,921,176,480,000,000,000,000,000,000,000
101
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, xfs_fsize_t isize) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); int zero_offset = XFS_B_FSB_OFFSET(mp, isize); int zero_len; int nimaps = 1; int error = 0; struct xfs_bmbt_irec imap; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; ASSERT(nimaps > 0); /* * If the block underlying isize is just a hole, then there * is nothing to zero. */ if (imap.br_startblock == HOLESTARTBLOCK) return 0; zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; return xfs_iozero(ip, isize, zero_len); }
0
[ "CWE-284", "CWE-264" ]
linux
8d0207652cbe27d1f962050737848e5ad4671958
19,844,329,483,926,859,000,000,000,000,000,000,000
33
->splice_write() via ->write_iter() iter_file_splice_write() - a ->splice_write() instance that gathers the pipe buffers, builds a bio_vec-based iov_iter covering those and feeds it to ->write_iter(). A bunch of simple cases coverted to that... [AV: fixed the braino spotted by Cyrill] Signed-off-by: Al Viro <[email protected]>
cmsBool Type_LUT16_Write(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, void* Ptr, cmsUInt32Number nItems) { cmsUInt32Number nTabSize; cmsPipeline* NewLUT = (cmsPipeline*) Ptr; cmsStage* mpe; _cmsStageToneCurvesData* PreMPE = NULL, *PostMPE = NULL; _cmsStageMatrixData* MatMPE = NULL; _cmsStageCLutData* clut = NULL; int i, InputChannels, OutputChannels, clutPoints; // Disassemble the LUT into components. mpe = NewLUT -> Elements; if (mpe != NULL && mpe ->Type == cmsSigMatrixElemType) { MatMPE = (_cmsStageMatrixData*) mpe ->Data; mpe = mpe -> Next; } if (mpe != NULL && mpe ->Type == cmsSigCurveSetElemType) { PreMPE = (_cmsStageToneCurvesData*) mpe ->Data; mpe = mpe -> Next; } if (mpe != NULL && mpe ->Type == cmsSigCLutElemType) { clut = (_cmsStageCLutData*) mpe -> Data; mpe = mpe ->Next; } if (mpe != NULL && mpe ->Type == cmsSigCurveSetElemType) { PostMPE = (_cmsStageToneCurvesData*) mpe ->Data; mpe = mpe -> Next; } // That should be all if (mpe != NULL) { cmsSignalError(mpe->ContextID, cmsERROR_UNKNOWN_EXTENSION, "LUT is not suitable to be saved as LUT16"); return FALSE; } InputChannels = cmsPipelineInputChannels(NewLUT); OutputChannels = cmsPipelineOutputChannels(NewLUT); if (clut == NULL) clutPoints = 0; else clutPoints = clut->Params->nSamples[0]; if (!_cmsWriteUInt8Number(io, (cmsUInt8Number) InputChannels)) return FALSE; if (!_cmsWriteUInt8Number(io, (cmsUInt8Number) OutputChannels)) return FALSE; if (!_cmsWriteUInt8Number(io, (cmsUInt8Number) clutPoints)) return FALSE; if (!_cmsWriteUInt8Number(io, 0)) return FALSE; // Padding if (MatMPE != NULL) { if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[0])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[1])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[2])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[3])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[4])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[5])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[6])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[7])) return FALSE; if (!_cmsWrite15Fixed16Number(io, MatMPE -> Double[8])) return FALSE; } else { if (!_cmsWrite15Fixed16Number(io, 1)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 1)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 0)) return FALSE; if (!_cmsWrite15Fixed16Number(io, 1)) return FALSE; } if (PreMPE != NULL) { if (!_cmsWriteUInt16Number(io, (cmsUInt16Number) PreMPE ->TheCurves[0]->nEntries)) return FALSE; } else { if (!_cmsWriteUInt16Number(io, 2)) return FALSE; } if (PostMPE != NULL) { if (!_cmsWriteUInt16Number(io, (cmsUInt16Number) PostMPE ->TheCurves[0]->nEntries)) return FALSE; } else { if (!_cmsWriteUInt16Number(io, 2)) return FALSE; } // The prelinearization table if (PreMPE != NULL) { if (!Write16bitTables(self ->ContextID, io, PreMPE)) return FALSE; } else { for (i=0; i < InputChannels; i++) { if (!_cmsWriteUInt16Number(io, 0)) return FALSE; if (!_cmsWriteUInt16Number(io, 0xffff)) return FALSE; } } nTabSize = uipow(OutputChannels, clutPoints, InputChannels); if (nTabSize == (cmsUInt32Number) -1) return FALSE; if (nTabSize > 0) { // The 3D CLUT. if (clut != NULL) { if (!_cmsWriteUInt16Array(io, nTabSize, clut->Tab.T)) return FALSE; } } // The postlinearization table if (PostMPE != NULL) { if (!Write16bitTables(self ->ContextID, io, PostMPE)) return FALSE; } else { for (i=0; i < OutputChannels; i++) { if (!_cmsWriteUInt16Number(io, 0)) return FALSE; if (!_cmsWriteUInt16Number(io, 0xffff)) return FALSE; } } return TRUE; cmsUNUSED_PARAMETER(nItems); }
0
[ "CWE-125" ]
Little-CMS
5ca71a7bc18b6897ab21d815d15e218e204581e2
172,523,376,558,700,200,000,000,000,000,000,000,000
131
Added an extra check to MLU bounds Thanks to Ibrahim el-sayed for spotting the bug
typeToCodecType(Http::CodecClient::Type type) { switch (type) { case Http::CodecClient::Type::HTTP1: return envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager:: HTTP1; case Http::CodecClient::Type::HTTP2: return envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager:: HTTP2; default: RELEASE_ASSERT(0, ""); } }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
198,616,319,294,548,500,000,000,000,000,000,000,000
12
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) { const char *format; char buf[LINE_MAX]; sd_id128_t mid; assert(u); if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD) return; if (log_on_console()) return; /* We log status messages for all units and all operations. */ format = unit_get_status_message_format_try_harder(u, t); if (!format) return; snprintf(buf, sizeof(buf), format, unit_description(u)); char_array_0(buf); mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING : t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING : SD_MESSAGE_UNIT_RELOADING; log_struct_unit(LOG_INFO, u->id, MESSAGE_ID(mid), "MESSAGE=%s", buf, NULL); }
0
[]
systemd
5ba6985b6c8ef85a8bcfeb1b65239c863436e75b
304,404,789,677,293,720,000,000,000,000,000,000,000
32
core: allow PIDs to be watched by two units at the same time In some cases it is interesting to map a PID to two units at the same time. For example, when a user logs in via a getty, which is reexeced to /sbin/login that binary will be explicitly referenced as main pid of the getty service, as well as implicitly referenced as part of the session scope.
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) { int ret; struct device *dev = hr_dev->dev; ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table, HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz, hr_dev->caps.num_mtt_segs, 1); if (ret) { dev_err(dev, "Failed to init MTT context memory, aborting.\n"); return ret; } if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) { ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_cqe_table, HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz, hr_dev->caps.num_cqe_segs, 1); if (ret) { dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n"); goto err_unmap_cqe; } } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, hr_dev->caps.num_mtpts, 1); if (ret) { dev_err(dev, "Failed to init MTPT context memory, aborting.\n"); goto err_unmap_mtt; } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz, hr_dev->caps.num_qps, 1); if (ret) { dev_err(dev, "Failed to init QP context memory, aborting.\n"); goto err_unmap_dmpt; } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, HEM_TYPE_IRRL, hr_dev->caps.irrl_entry_sz * hr_dev->caps.max_qp_init_rdma, hr_dev->caps.num_qps, 1); if (ret) { dev_err(dev, "Failed to init irrl_table memory, aborting.\n"); goto err_unmap_qp; } if (hr_dev->caps.trrl_entry_sz) { ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.trrl_table, HEM_TYPE_TRRL, hr_dev->caps.trrl_entry_sz * hr_dev->caps.max_qp_dest_rdma, hr_dev->caps.num_qps, 1); if (ret) { dev_err(dev, "Failed to init trrl_table memory, aborting.\n"); goto err_unmap_irrl; } } ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, hr_dev->caps.num_cqs, 1); if (ret) { dev_err(dev, "Failed to init CQ context memory, aborting.\n"); goto err_unmap_trrl; } return 0; err_unmap_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.trrl_table); err_unmap_irrl: hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); err_unmap_qp: hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); err_unmap_dmpt: hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); err_unmap_mtt: if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_cqe_table); err_unmap_cqe: hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); return ret; }
0
[ "CWE-665" ]
kernel
72be029e947510dd6cbbbaf51879622af26e4200
84,987,613,601,237,950,000,000,000,000,000,000,000
98
RDMA/hns: Fix init resp when alloc ucontext (bsc#1104427 FATE#326416). suse-commit: 8e5436bc2806cbe952f043cc995804c188ce047a
void shash_free_instance(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(shash_instance(inst)); }
0
[ "CWE-787" ]
linux
af3ff8045bbf3e32f1a448542e73abb4c8ceb6f1
183,054,519,917,431,480,000,000,000,000,000,000,000
5
crypto: hmac - require that the underlying hash algorithm is unkeyed Because the HMAC template didn't check that its underlying hash algorithm is unkeyed, trying to use "hmac(hmac(sha3-512-generic))" through AF_ALG or through KEYCTL_DH_COMPUTE resulted in the inner HMAC being used without having been keyed, resulting in sha3_update() being called without sha3_init(), causing a stack buffer overflow. This is a very old bug, but it seems to have only started causing real problems when SHA-3 support was added (requires CONFIG_CRYPTO_SHA3) because the innermost hash's state is ->import()ed from a zeroed buffer, and it just so happens that other hash algorithms are fine with that, but SHA-3 is not. However, there could be arch or hardware-dependent hash algorithms also affected; I couldn't test everything. Fix the bug by introducing a function crypto_shash_alg_has_setkey() which tests whether a shash algorithm is keyed. Then update the HMAC template to require that its underlying hash algorithm is unkeyed. Here is a reproducer: #include <linux/if_alg.h> #include <sys/socket.h> int main() { int algfd; struct sockaddr_alg addr = { .salg_type = "hash", .salg_name = "hmac(hmac(sha3-512-generic))", }; char key[4096] = { 0 }; algfd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(algfd, (const struct sockaddr *)&addr, sizeof(addr)); setsockopt(algfd, SOL_ALG, ALG_SET_KEY, key, sizeof(key)); } Here was the KASAN report from syzbot: BUG: KASAN: stack-out-of-bounds in memcpy include/linux/string.h:341 [inline] BUG: KASAN: stack-out-of-bounds in sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161 Write of size 4096 at addr ffff8801cca07c40 by task syzkaller076574/3044 CPU: 1 PID: 3044 Comm: syzkaller076574 Not tainted 4.14.0-mm1+ #25 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 print_address_description+0x73/0x250 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x25b/0x340 mm/kasan/report.c:409 check_memory_region_inline mm/kasan/kasan.c:260 [inline] check_memory_region+0x137/0x190 mm/kasan/kasan.c:267 memcpy+0x37/0x50 mm/kasan/kasan.c:303 memcpy include/linux/string.h:341 [inline] sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161 crypto_shash_update+0xcb/0x220 crypto/shash.c:109 shash_finup_unaligned+0x2a/0x60 crypto/shash.c:151 crypto_shash_finup+0xc4/0x120 crypto/shash.c:165 hmac_finup+0x182/0x330 crypto/hmac.c:152 crypto_shash_finup+0xc4/0x120 crypto/shash.c:165 shash_digest_unaligned+0x9e/0xd0 crypto/shash.c:172 crypto_shash_digest+0xc4/0x120 crypto/shash.c:186 hmac_setkey+0x36a/0x690 crypto/hmac.c:66 crypto_shash_setkey+0xad/0x190 crypto/shash.c:64 shash_async_setkey+0x47/0x60 crypto/shash.c:207 crypto_ahash_setkey+0xaf/0x180 crypto/ahash.c:200 hash_setkey+0x40/0x90 crypto/algif_hash.c:446 alg_setkey crypto/af_alg.c:221 [inline] alg_setsockopt+0x2a1/0x350 crypto/af_alg.c:254 SYSC_setsockopt net/socket.c:1851 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1830 entry_SYSCALL_64_fastpath+0x1f/0x96 Reported-by: syzbot <[email protected]> Cc: <[email protected]> Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); }
0
[ "CWE-416" ]
linux
0774a964ef561b7170d8d1b1bfe6f88002b6d219
37,026,052,342,131,464,000,000,000,000,000,000,000
4
KVM: Fix out of range accesses to memslots Reset the LRU slot if it becomes invalid when deleting a memslot to fix an out-of-bounds/use-after-free access when searching through memslots. Explicitly check for there being no used slots in search_memslots(), and in the caller of s390's approximation variant. Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots") Reported-by: Qian Cai <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Acked-by: Christian Borntraeger <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
SPL_METHOD(FilesystemIterator, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & (SPL_FILE_DIR_KEY_MODE_MASK | SPL_FILE_DIR_CURRENT_MODE_MASK | SPL_FILE_DIR_OTHERS_MASK)); } /* }}} */
0
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
27,053,984,519,713,964,000,000,000,000,000,000,000
10
Fix bug #72262 - do not overflow int
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned int)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc * might sleep. */ atomic_add(size, &sk->sk_omem_alloc); mem = kmalloc(size, priority); if (mem) return mem; atomic_sub(size, &sk->sk_omem_alloc); } return NULL; }
0
[ "CWE-119", "CWE-787" ]
linux
b98b0bc8c431e3ceb4b26b0dfc8db509518fb290
94,342,296,770,926,850,000,000,000,000,000,000,000
16
net: avoid signed overflows for SO_{SND|RCV}BUFFORCE CAP_NET_ADMIN users should not be allowed to set negative sk_sndbuf or sk_rcvbuf values, as it can lead to various memory corruptions, crashes, OOM... Note that before commit 82981930125a ("net: cleanups in sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF and SO_RCVBUF were vulnerable. This needs to be backported to all known linux kernels. Again, many thanks to syzkaller team for discovering this gem. Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
struct razer_report razer_chroma_misc_set_orochi2011_poll_dpi(unsigned short poll_rate, unsigned char dpi_x, unsigned char dpi_y) { struct razer_report report = {0}; memcpy(&report, &orochi2011_dpi, sizeof(orochi2011_dpi)); switch(poll_rate) { case 1000: poll_rate = 0x01; break; case 500: poll_rate = 0x02; break; case 125: poll_rate = 0x08; break; default: // 500Hz poll_rate = 0x02; break; } report.arguments[1] = poll_rate; report.arguments[3] = clamp_u8(dpi_x, 0x15, 0x9C); report.arguments[4] = clamp_u8(dpi_y, 0x15, 0x9C); return report; }
0
[ "CWE-787" ]
openrazer
7e8a04feb378a679f1bcdcae079a5100cc45663b
291,510,171,618,935,200,000,000,000,000,000,000,000
27
Fix oob memcpy in matrix_custom_frame methods Adjust row_length if it exeeds the arguments array
static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); }
0
[ "CWE-476", "CWE-703" ]
linux
94f1bb15bed84ad6c893916b7e7b9db6f1d7eec6
139,521,051,229,510,440,000,000,000,000,000,000,000
6
crypto: rng - Remove old low-level rng interface Now that all rng implementations have switched over to the new interface, we can remove the old low-level interface. Signed-off-by: Herbert Xu <[email protected]>
worker_err_ratelimit(struct worker* worker, int err) { if(worker->err_limit_time == *worker->env.now) { /* see if limit is exceeded for this second */ if(worker->err_limit_count++ > ERROR_RATELIMIT) return -1; } else { /* new second, new limits */ worker->err_limit_time = *worker->env.now; worker->err_limit_count = 1; } return err; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
5,719,998,935,179,149,000,000,000,000,000,000,000
13
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { int ret; int size; if (ud->side == USBIP_STUB) { /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } return ret; }
1
[ "CWE-200", "CWE-119" ]
linux
b348d7dddb6c4fbfc810b7a0626e8ec9e29f7cbb
305,454,016,037,161,200,000,000,000,000,000,000,000
36
USB: usbip: fix potential out-of-bounds write Fix potential out-of-bounds write to urb->transfer_buffer usbip handles network communication directly in the kernel. When receiving a packet from its peer, usbip code parses headers according to protocol. As part of this parsing urb->actual_length is filled. Since the input for urb->actual_length comes from the network, it should be treated as untrusted. Any entity controlling the network may put any value in the input and the preallocated urb->transfer_buffer may not be large enough to hold the data. Thus, the malicious entity is able to write arbitrary data to kernel memory. Signed-off-by: Ignat Korchagin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int atrtr_delete(struct atalk_addr * addr) { struct atalk_route **r = &atalk_routes; int retval = 0; struct atalk_route *tmp; write_lock_bh(&atalk_routes_lock); while ((tmp = *r) != NULL) { if (tmp->target.s_net == addr->s_net && (!(tmp->flags&RTF_GATEWAY) || tmp->target.s_node == addr->s_node)) { *r = tmp->next; dev_put(tmp->dev); kfree(tmp); goto out; } r = &tmp->next; } retval = -ENOENT; out: write_unlock_bh(&atalk_routes_lock); return retval; }
0
[]
history
7ab442d7e0a76402c12553ee256f756097cae2d2
62,613,400,245,905,870,000,000,000,000,000,000,000
23
[DDP]: Convert to new protocol interface. Convert ddp to the new protocol interface which means it has to handle fragmented skb's. The only big change is in the checksum routine which has to do more work (like skb_checksum). Minor speedup is folding the carry to avoid a branch. Tested against a 2.4 system and by running both code over a range of packets.
void Compute(OpKernelContext* context) override { ResourceHandle handle; OP_REQUIRES_OK(context, HandleFromInput(context, kResourceHandleName, &handle)); core::RefCountPtr<QuantileStreamResource> stream_resource; // Create a reference to the underlying resource using the handle. OP_REQUIRES_OK(context, LookupResource(context, handle, &stream_resource)); // Remove the reference at the end of this scope. mutex_lock l(*stream_resource->mutex()); OpInputList summaries_list; OP_REQUIRES_OK(context, context->input_list(kSummariesName, &summaries_list)); int32_t num_streams = stream_resource->num_streams(); CHECK_EQ(static_cast<int>(num_streams), summaries_list.size()); auto do_quantile_add_summary = [&](const int64_t begin, const int64_t end) { // Iterating all features. for (int64_t feature_idx = begin; feature_idx < end; ++feature_idx) { QuantileStream* stream = stream_resource->stream(feature_idx); if (stream->IsFinalized()) { VLOG(1) << "QuantileStream has already been finalized for feature" << feature_idx << "."; continue; } const Tensor& summaries = summaries_list[feature_idx]; const auto summary_values = summaries.matrix<float>(); const auto& tensor_shape = summaries.shape(); const int64_t entries_size = tensor_shape.dim_size(0); CHECK_EQ(tensor_shape.dim_size(1), 4); std::vector<QuantileSummaryEntry> summary_entries; summary_entries.reserve(entries_size); for (int64_t i = 0; i < entries_size; i++) { float value = summary_values(i, 0); float weight = summary_values(i, 1); float min_rank = summary_values(i, 2); float max_rank = summary_values(i, 3); QuantileSummaryEntry entry(value, weight, min_rank, max_rank); summary_entries.push_back(entry); } stream_resource->stream(feature_idx)->PushSummary(summary_entries); } }; // TODO(tanzheny): comment on the magic number. const int64_t kCostPerUnit = 500 * num_streams; const DeviceBase::CpuWorkerThreads& worker_threads = *context->device()->tensorflow_cpu_worker_threads(); Shard(worker_threads.num_threads, worker_threads.workers, num_streams, kCostPerUnit, do_quantile_add_summary); }
0
[ "CWE-703", "CWE-681" ]
tensorflow
8a84f7a2b5a2b27ecf88d25bad9ac777cd2f7992
146,768,006,737,553,280,000,000,000,000,000,000,000
51
Ensure num_streams >= 0 in tf.raw_ops.BoostedTreesCreateQuantileStreamResource PiperOrigin-RevId: 387452765 Change-Id: I9990c760e177fabca6a3b9b4612ceeaeeba51495
void bn_init(BIGNUM *a) { static BIGNUM nilbn; *a = nilbn; bn_check_top(a); }
0
[ "CWE-310" ]
openssl
aab7c770353b1dc4ba045938c8fb446dd1c4531e
165,014,393,623,226,940,000,000,000,000,000,000,000
7
Elliptic curve scalar multiplication with timing attack defenses Co-authored-by: Nicola Tuveri <[email protected]> Co-authored-by: Cesar Pereida Garcia <[email protected]> Co-authored-by: Sohaib ul Hassan <[email protected]> Reviewed-by: Andy Polyakov <[email protected]> Reviewed-by: Matt Caswell <[email protected]> (Merged from https://github.com/openssl/openssl/pull/6009) (cherry picked from commit 40e48e54582e46c1a01e184ecf5bd31f4f7f8294)