func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
bgp_mp_unreach_parse (struct peer *peer, const bgp_size_t length, const u_char flag, u_char *startp, struct bgp_nlri *mp_withdraw) { struct stream *s; afi_t afi; safi_t safi; u_int16_t withdraw_len; int ret; bgp_size_t total; total = length + (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN) ? 4 : 3); /* Flag checks. */ if (bgp_attr_flag_invalid (peer, BGP_ATTR_MP_UNREACH_NLRI, flag)) return bgp_attr_malformed (peer, BGP_ATTR_MP_UNREACH_NLRI, flag, BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR, startp, total); s = peer->ibuf; #define BGP_MP_UNREACH_MIN_SIZE 3 if ((length > STREAM_READABLE(s)) || (length < BGP_MP_UNREACH_MIN_SIZE)) return BGP_ATTR_PARSE_ERROR; afi = stream_getw (s); safi = stream_getc (s); withdraw_len = length - BGP_MP_UNREACH_MIN_SIZE; if (safi != SAFI_MPLS_LABELED_VPN) { ret = bgp_nlri_sanity_check (peer, afi, stream_pnt (s), withdraw_len); if (ret < 0) return BGP_ATTR_PARSE_ERROR; } mp_withdraw->afi = afi; mp_withdraw->safi = safi; mp_withdraw->nlri = stream_pnt (s); mp_withdraw->length = withdraw_len; stream_forward_getp (s, withdraw_len); return BGP_ATTR_PARSE_PROCEED; }
1
[]
quagga
835315bfb49bff2b2fb354f2075c6d6693c2a151
209,700,651,408,804,170,000,000,000,000,000,000,000
45
bgpd: Move up flag-check calls, parcel up attr-parser args, and other cleanups * bgp_attr.h: (struct bgp_attr_parser_args) Attribute parsing context, containing common arguments. * bgp_attr.c: (general) Move the bgp_attr_flag_invalid flag-check calls up, out of each individual attr parser function, to be done once in attr_parse. Similarly move the calculation of the 'total' attribute length field up to attr_parse. Bundle together common arguments to attr-parsing functions and helpers into (struct bgp_attr_parser_args), so it can be passed by reference down the stack & also de-clutter the argument lists & make it easier to add/modify the context for attr-parsing - add local const aliases to avoid modifying body of code too much. This also should help avoid cut & paste errors, where calls to helpers with hard-coded attribute types are pasted to other functions but the code isn't changed. (bgp_attr_flags_diagnose) as above. (bgp_attr_flag_invalid) as above. (bgp_attr_{origin,aspath,as4_path,nexthop,med,local_pref,atomic}) as above. (bgp_attr_{aggregator,as4_aggregator,community,originator_id}) as above (bgp_attr_{cluster_list,ext_communities},bgp_mp_{un,}reach_parse) as above (bgp_attr_unknown) as above. (bgp_attr_malformed) as above. Also, startp and length have to be special-cased, because whether or not to send attribute data depends on the particular error - a separate length argument, distinct from args->length, indicates whether or not the attribute data should be sent in the NOTIFY. (bgp_attr_aspath_check) Call to bgp_attr_malformed is wrong here, there is no attribute parsing context - e.g. the 'flag' argument is unlikely to be right, remove it. Explicitly handle the error instead. (bgp_attr_munge_as4_attrs) Flag argument is pointless. As the comment notes, the check here is pointless as AS_PATH presence already checked elsewhere. (bgp_attr_parse) Do bgp_attr_flag_invalid call here. Use (struct bgp_attr_parser_args) for args to attr parser functions. Remove out-of-context 'flag' argument to as4 checking functions.
static avifBool avifParseTrackBox(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifTrack * track = avifDecoderDataCreateTrack(data); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "tkhd", 4)) { CHECK(avifParseTrackHeaderBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "meta", 4)) { CHECK(avifParseMetaBox(track->meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "mdia", 4)) { CHECK(avifParseMediaBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "tref", 4)) { CHECK(avifTrackReferenceBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; }
0
[ "CWE-703", "CWE-787" ]
libavif
0a8e7244d494ae98e9756355dfbfb6697ded2ff9
280,396,132,535,573,430,000,000,000,000,000,000,000
24
Set max image size to 16384 * 16384 Fix https://crbug.com/oss-fuzz/24728 and https://crbug.com/oss-fuzz/24734.
void CLASS linear_table (unsigned len) { int i; if (len > 0x1000) len = 0x1000; read_shorts (curve, len); for (i=len; i < 0x1000; i++) curve[i] = curve[i-1]; maximum = curve[0xfff]; }
0
[ "CWE-703" ]
LibRaw
11909cc59e712e09b508dda729b99aeaac2b29ad
212,540,948,201,763,540,000,000,000,000,000,000,000
9
cumulated data checks patch
static void mux_set_focus(MuxDriver *d, int focus) { assert(focus >= 0); assert(focus < d->mux_cnt); if (d->focus != -1) { mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_OUT); } d->focus = focus; mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_IN); }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
301,896,773,145,372,950,000,000,000,000,000,000,000
12
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static bool sock_ops_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock_ops)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; if (type == BPF_WRITE) { switch (off) { case offsetof(struct bpf_sock_ops, reply): case offsetof(struct bpf_sock_ops, sk_txhash): if (size != size_default) return false; break; default: return false; } } else { switch (off) { case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, bytes_acked): if (size != sizeof(__u64)) return false; break; default: if (size != size_default) return false; break; } } return true; }
0
[ "CWE-120" ]
linux
050fad7c4534c13c8eb1d9c2ba66012e014773cb
340,034,986,547,238,550,000,000,000,000,000,000,000
40
bpf: fix truncated jump targets on heavy expansions Recently during testing, I ran into the following panic: [ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP [ 207.901637] Modules linked in: binfmt_misc [...] [ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7 [ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017 [ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO) [ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 207.992603] lr : 0xffff000000bdb754 [ 207.996080] sp : ffff000013703ca0 [ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001 [ 208.004688] x27: 0000000000000001 x26: 0000000000000000 [ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00 [ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000 [ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a [ 208.025903] x19: ffff000009578000 x18: 0000000000000a03 [ 208.031206] x17: 0000000000000000 x16: 0000000000000000 [ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000 [ 208.041813] x13: 0000000000000000 x12: 0000000000000000 [ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18 [ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000 [ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000 [ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6 [ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500 [ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08 [ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974) [ 208.086235] Call trace: [ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 208.093713] 0xffff000000bdb754 [ 208.096845] bpf_test_run+0x78/0xf8 [ 208.100324] bpf_prog_test_run_skb+0x148/0x230 [ 208.104758] sys_bpf+0x314/0x1198 [ 208.108064] el0_svc_naked+0x30/0x34 [ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680) [ 208.117717] ---[ end trace 263cb8a59b5bf29f ]--- The program itself which caused this had a long jump over the whole instruction sequence where all of the inner instructions required heavy expansions into multiple BPF instructions. Additionally, I also had BPF hardening enabled which requires once more rewrites of all constant values in order to blind them. Each time we rewrite insns, bpf_adj_branches() would need to potentially adjust branch targets which cross the patchlet boundary to accommodate for the additional delta. Eventually that lead to the case where the target offset could not fit into insn->off's upper 0x7fff limit anymore where then offset wraps around becoming negative (in s16 universe), or vice versa depending on the jump direction. Therefore it becomes necessary to detect and reject any such occasions in a generic way for native eBPF and cBPF to eBPF migrations. For the latter we can simply check bounds in the bpf_convert_filter()'s BPF_EMIT_JMP helper macro and bail out once we surpass limits. The bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case of subsequent hardening) is a bit more complex in that we need to detect such truncations before hitting the bpf_prog_realloc(). Thus the latter is split into an extra pass to probe problematic offsets on the original program in order to fail early. With that in place and carefully tested I no longer hit the panic and the rewrites are rejected properly. The above example panic I've seen on bpf-next, though the issue itself is generic in that a guard against this issue in bpf seems more appropriate in this case. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Martin KaFai Lau <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
bytes_count(PyBytesObject *self, PyObject *args) { PyObject *sub_obj; const char *str = PyBytes_AS_STRING(self), *sub; Py_ssize_t sub_len; char byte; Py_ssize_t start = 0, end = PY_SSIZE_T_MAX; Py_buffer vsub; PyObject *count_obj; if (!stringlib_parse_args_finds_byte("count", args, &sub_obj, &byte, &start, &end)) return NULL; if (sub_obj) { if (PyObject_GetBuffer(sub_obj, &vsub, PyBUF_SIMPLE) != 0) return NULL; sub = vsub.buf; sub_len = vsub.len; } else { sub = &byte; sub_len = 1; } ADJUST_INDICES(start, end, PyBytes_GET_SIZE(self)); count_obj = PyLong_FromSsize_t( stringlib_count(str + start, end - start, sub, sub_len, PY_SSIZE_T_MAX) ); if (sub_obj) PyBuffer_Release(&vsub); return count_obj; }
0
[ "CWE-190" ]
cpython
6c004b40f9d51872d848981ef1a18bb08c2dfc42
132,212,162,131,734,020,000,000,000,000,000,000,000
38
bpo-30657: Fix CVE-2017-1000158 (#4758) Fixes possible integer overflow in PyBytes_DecodeEscape. Co-Authored-By: Jay Bosamiya <[email protected]>
static char *palColorFor(const char *k) { if (!r_cons_singleton ()) { return NULL; } RColor rcolor = r_cons_pal_get (k); return r_cons_rgb_tostring (rcolor.r, rcolor.g, rcolor.b); }
0
[ "CWE-416" ]
radare2
10517e3ff0e609697eb8cde60ec8dc999ee5ea24
253,037,256,282,708,030,000,000,000,000,000,000,000
7
aaef on arm/thumb switches causes uaf ##crash * Reported by peacock-doris via huntr.dev * Reproducer: poc_uaf_r_reg_get
static int vmsplice_type(struct fd f, int *type) { if (!f.file) return -EBADF; if (f.file->f_mode & FMODE_WRITE) { *type = WRITE; } else if (f.file->f_mode & FMODE_READ) { *type = READ; } else { fdput(f); return -EBADF; } return 0; }
0
[ "CWE-416" ]
linux
15fab63e1e57be9fdb5eec1bbc5916e9825e9acb
89,161,944,326,280,260,000,000,000,000,000,000,000
14
fs: prevent page refcount overflow in pipe_buf_get Change pipe_buf_get() to return a bool indicating whether it succeeded in raising the refcount of the page (if the thing in the pipe is a page). This removes another mechanism for overflowing the page refcount. All callers converted to handle a failure. Reported-by: Jann Horn <[email protected]> Signed-off-by: Matthew Wilcox <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
168,611,161,527,705,280,000,000,000,000,000,000,000
33
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
SendFocusButton(XtermWidget xw, XFocusChangeEvent *event) { if (okSendFocusPos(xw)) { ANSI reply; memset(&reply, 0, sizeof(reply)); reply.a_type = ANSI_CSI; #if OPT_SCO_FUNC_KEYS if (xw->keyboard.type == keyboardIsSCO) { reply.a_pintro = '>'; } #endif reply.a_final = CharOf((event->type == FocusIn) ? 'I' : 'O'); unparseseq(xw, &reply); } return; }
0
[ "CWE-399" ]
xterm-snapshots
82ba55b8f994ab30ff561a347b82ea340ba7075c
192,579,965,192,501,370,000,000,000,000,000,000,000
18
snapshot of project "xterm", label xterm-365d
ServerDescriptionPtr SdamServerSelector::_randomSelect( const std::vector<ServerDescriptionPtr>& servers) const { return servers[_random.nextInt64(servers.size())]; }
0
[ "CWE-755" ]
mongo
75f7184eafa78006a698cda4c4adfb57f1290047
310,012,642,948,058,100,000,000,000,000,000,000,000
4
SERVER-50170 fix max staleness read preference parameter for server selection
static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset) { /* MIPS chips strapped for BE will automagically configure the * peripheral registers for CPU-native byte order. */ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(priv->base + offset); else return readl_relaxed(priv->base + offset); }
0
[ "CWE-476" ]
linux
297a6961ffb8ff4dc66c9fbf53b924bd1dda05d5
293,491,783,515,820,960,000,000,000,000,000,000,000
10
net: phy: mdio-bcm-unimac: fix potential NULL dereference in unimac_mdio_probe() platform_get_resource() may fail and return NULL, so we should better check it's return value to avoid a NULL pointer dereference a bit later in the code. This is detected by Coccinelle semantic patch. @@ expression pdev, res, n, t, e, e1, e2; @@ res = platform_get_resource(pdev, t, n); + if (!res) + return -EINVAL; ... when != res == NULL e = devm_ioremap(e1, res->start, e2); Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: David S. Miller <[email protected]>
rfbProcessClientMessage(rfbClientPtr cl) { switch (cl->state) { case RFB_PROTOCOL_VERSION: rfbProcessClientProtocolVersion(cl); return; case RFB_SECURITY_TYPE: rfbProcessClientSecurityType(cl); return; case RFB_AUTHENTICATION: rfbAuthProcessClientMessage(cl); return; case RFB_INITIALISATION: rfbProcessClientInitMessage(cl); return; default: rfbProcessClientNormalMessage(cl); return; } }
0
[]
libvncserver
804335f9d296440bb708ca844f5d89b58b50b0c6
95,665,391,304,970,250,000,000,000,000,000,000,000
20
Thread safety for zrle, zlib, tight. Proposed tight security type fix for debian bug 517422.
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
76,614,409,678,210,980,000,000,000,000,000,000,000
4
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static void mem_add(MemoryListener *listener, MemoryRegionSection *section) { AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); AddressSpaceDispatch *d = as->next_dispatch; MemoryRegionSection now = *section, remain = *section; Int128 page_size = int128_make64(TARGET_PAGE_SIZE); if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) - now.offset_within_address_space; now.size = int128_min(int128_make64(left), now.size); register_subpage(d, &now); } else { now.size = int128_zero(); } while (int128_ne(remain.size, now.size)) { remain.size = int128_sub(remain.size, now.size); remain.offset_within_address_space += int128_get64(now.size); remain.offset_within_region += int128_get64(now.size); now = remain; if (int128_lt(remain.size, page_size)) { register_subpage(d, &now); } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { now.size = page_size; register_subpage(d, &now); } else { now.size = int128_and(now.size, int128_neg(page_size)); register_multipage(d, &now); } } }
0
[]
qemu
c3c1bb99d1c11978d9ce94d1bdcf0705378c1459
71,646,586,074,349,045,000,000,000,000,000,000,000
32
exec: Respect as_tranlsate_internal length clamp address_space_translate_internal will clamp the *plen length argument based on the size of the memory region being queried. The iommu walker logic in addresss_space_translate was ignoring this by discarding the post fn call value of *plen. Fix by just always using *plen as the length argument throughout the fn, removing the len local variable. This fixes a bootloader bug when a single elf section spans multiple QEMU memory regions. Signed-off-by: Peter Crosthwaite <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
TIFFWriteDirectoryTagSampleformatPerSample(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, double value) { switch (tif->tif_dir.td_sampleformat) { case SAMPLEFORMAT_IEEEFP: if (tif->tif_dir.td_bitspersample<=32) return(TIFFWriteDirectoryTagFloatPerSample(tif,ndir,dir,tag,(float)value)); else return(TIFFWriteDirectoryTagDoublePerSample(tif,ndir,dir,tag,value)); case SAMPLEFORMAT_INT: if (tif->tif_dir.td_bitspersample<=8) return(TIFFWriteDirectoryTagSbytePerSample(tif,ndir,dir,tag,(int8)value)); else if (tif->tif_dir.td_bitspersample<=16) return(TIFFWriteDirectoryTagSshortPerSample(tif,ndir,dir,tag,(int16)value)); else return(TIFFWriteDirectoryTagSlongPerSample(tif,ndir,dir,tag,(int32)value)); case SAMPLEFORMAT_UINT: if (tif->tif_dir.td_bitspersample<=8) return(TIFFWriteDirectoryTagBytePerSample(tif,ndir,dir,tag,(uint8)value)); else if (tif->tif_dir.td_bitspersample<=16) return(TIFFWriteDirectoryTagShortPerSample(tif,ndir,dir,tag,(uint16)value)); else return(TIFFWriteDirectoryTagLongPerSample(tif,ndir,dir,tag,(uint32)value)); default: return(1); } }
0
[ "CWE-617" ]
libtiff
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
252,999,527,567,495,570,000,000,000,000,000,000,000
27
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) { const void *p = flow; BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); }
1
[ "CWE-330" ]
linux
55667441c84fa5e0911a0aac44fb059c15ba6da2
220,424,286,428,793,660,000,000,000,000,000,000,000
7
net/flow_dissector: switch to siphash UDP IPv6 packets auto flowlabels are using a 32bit secret (static u32 hashrnd in net/core/flow_dissector.c) and apply jhash() over fields known by the receivers. Attackers can easily infer the 32bit secret and use this information to identify a device and/or user, since this 32bit secret is only set at boot time. Really, using jhash() to generate cookies sent on the wire is a serious security concern. Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be a dead end. Trying to periodically change the secret (like in sch_sfq.c) could change paths taken in the network for long lived flows. Let's switch to siphash, as we did in commit df453700e8d8 ("inet: switch IP ID generator to siphash") Using a cryptographically strong pseudo random function will solve this privacy issue and more generally remove other weak points in the stack. Packet schedulers using skb_get_hash_perturb() benefit from this change. Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default") Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels") Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel") Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Berger <[email protected]> Reported-by: Amit Klein <[email protected]> Reported-by: Benny Pinkas <[email protected]> Cc: Tom Herbert <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int local_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, FsCred *credp) { int err = -1; int dirfd; dirfd = local_opendir_nofollow(fs_ctx, dir_path->data); if (dirfd == -1) { return -1; } if (fs_ctx->export_flags & V9FS_SM_MAPPED || fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { err = mkdirat(dirfd, name, SM_LOCAL_DIR_MODE_BITS); if (err == -1) { goto out; } credp->fc_mode = credp->fc_mode | S_IFDIR; if (fs_ctx->export_flags & V9FS_SM_MAPPED) { err = local_set_xattrat(dirfd, name, credp); } else { err = local_set_mapped_file_attrat(dirfd, name, credp); } if (err == -1) { goto err_end; } } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || fs_ctx->export_flags & V9FS_SM_NONE) { err = mkdirat(dirfd, name, credp->fc_mode); if (err == -1) { goto out; } err = local_set_cred_passthrough(fs_ctx, dirfd, name, credp); if (err == -1) { goto err_end; } } goto out; err_end: unlinkat_preserve_errno(dirfd, name, AT_REMOVEDIR); out: close_preserve_errno(dirfd); return err; }
1
[ "CWE-732" ]
qemu
7a95434e0ca8a037fd8aa1a2e2461f92585eb77b
62,364,173,537,416,530,000,000,000,000,000,000,000
46
9pfs: local: forbid client access to metadata (CVE-2017-7493) When using the mapped-file security mode, we shouldn't let the client mess with the metadata. The current code already tries to hide the metadata dir from the client by skipping it in local_readdir(). But the client can still access or modify it through several other operations. This can be used to escalate privileges in the guest. Affected backend operations are: - local_mknod() - local_mkdir() - local_open2() - local_symlink() - local_link() - local_unlinkat() - local_renameat() - local_rename() - local_name_to_path() Other operations are safe because they are only passed a fid path, which is computed internally in local_name_to_path(). This patch converts all the functions listed above to fail and return EINVAL when being passed the name of the metadata dir. This may look like a poor choice for errno, but there's no such thing as an illegal path name on Linux and I could not think of anything better. This fixes CVE-2017-7493. Reported-by: Leo Gaspard <[email protected]> Signed-off-by: Greg Kurz <[email protected]> Reviewed-by: Eric Blake <[email protected]>
HandleARDAuth(rfbClient *client) { uint8_t gen[2], len[2]; size_t keylen; uint8_t *mod = NULL, *resp = NULL, *priv = NULL, *pub = NULL, *key = NULL, *shared = NULL; uint8_t userpass[128], ciphertext[128]; int ciphertext_len; int passwordLen, usernameLen; rfbCredential *cred = NULL; rfbBool result = FALSE; /* Step 1: Read the authentication material from the socket. A two-byte generator value, a two-byte key length value. */ if (!ReadFromRFBServer(client, (char *)gen, 2)) { rfbClientErr("HandleARDAuth: reading generator value failed\n"); goto out; } if (!ReadFromRFBServer(client, (char *)len, 2)) { rfbClientErr("HandleARDAuth: reading key length failed\n"); goto out; } keylen = 256*len[0]+len[1]; /* convert from char[] to int */ mod = (uint8_t*)malloc(keylen*5); /* the block actually contains mod, resp, pub, priv and key */ if (!mod) goto out; resp = mod+keylen; pub = resp+keylen; priv = pub+keylen; key = priv+keylen; /* Step 1: Read the authentication material from the socket. The prime modulus (keylen bytes) and the peer's generated public key (keylen bytes). */ if (!ReadFromRFBServer(client, (char *)mod, keylen)) { rfbClientErr("HandleARDAuth: reading prime modulus failed\n"); goto out; } if (!ReadFromRFBServer(client, (char *)resp, keylen)) { rfbClientErr("HandleARDAuth: reading peer's generated public key failed\n"); goto out; } /* Step 2: Generate own Diffie-Hellman public-private key pair. */ if(!dh_generate_keypair(priv, pub, gen, 2, mod, keylen)) { rfbClientErr("HandleARDAuth: generating keypair failed\n"); goto out; } /* Step 3: Perform Diffie-Hellman key agreement, using the generator (gen), prime (mod), and the peer's public key. The output will be a shared secret known to both us and the peer. */ if(!dh_compute_shared_key(key, priv, resp, mod, keylen)) { rfbClientErr("HandleARDAuth: creating shared key failed\n"); goto out; } /* Step 4: Perform an MD5 hash of the shared secret. This 128-bit (16-byte) value will be used as the AES key. */ shared = malloc(MD5_HASH_SIZE); if(!hash_md5(shared, key, keylen)) { rfbClientErr("HandleARDAuth: hashing shared key failed\n"); goto out; } /* Step 5: Pack the username and password into a 128-byte plaintext "userpass" structure: { username[64], password[64] }. Null-terminate each. Fill the unused bytes with random characters so that the encryption output is less predictable. */ if(!client->GetCredential) { rfbClientErr("HandleARDAuth: GetCredential callback is not set\n"); goto out; } cred = client->GetCredential(client, rfbCredentialTypeUser); if(!cred) { rfbClientErr("HandleARDAuth: reading credential failed\n"); goto out; } passwordLen = strlen(cred->userCredential.password)+1; usernameLen = strlen(cred->userCredential.username)+1; if (passwordLen > sizeof(userpass)/2) passwordLen = sizeof(userpass)/2; if (usernameLen > sizeof(userpass)/2) usernameLen = sizeof(userpass)/2; random_bytes(userpass, sizeof(userpass)); memcpy(userpass, cred->userCredential.username, usernameLen); memcpy(userpass+sizeof(userpass)/2, cred->userCredential.password, passwordLen); /* Step 6: Encrypt the plaintext credentials with the 128-bit MD5 hash from step 4, using the AES 128-bit symmetric cipher in electronic codebook (ECB) mode. Use no further padding for this block cipher. */ if(!encrypt_aes128ecb(ciphertext, &ciphertext_len, shared, userpass, sizeof(userpass))) { rfbClientErr("HandleARDAuth: encrypting credentials failed\n"); goto out; } /* Step 7: Write the ciphertext from step 6 to the stream. Write the generated DH public key to the stream. */ if (!WriteToRFBServer(client, (char *)ciphertext, sizeof(ciphertext))) goto out; if (!WriteToRFBServer(client, (char *)pub, keylen)) goto out; /* Handle the SecurityResult message */ if (!rfbHandleAuthResult(client)) goto out; result = TRUE; out: if (cred) FreeUserCredential(cred); free(mod); free(shared); return result; }
0
[ "CWE-703", "CWE-770" ]
libvncserver
8937203441ee241c4ace85da687b7d6633a12365
257,018,338,908,600,700,000,000,000,000,000,000,000
118
libvncclient/rfbproto: limit max textchat size Addresses GitHub Security Lab (GHSL) Vulnerability Report `GHSL-2020-063`. Re #275
static void worker_detach_from_pool(struct worker *worker, struct worker_pool *pool) { struct completion *detach_completion = NULL; mutex_lock(&pool->attach_mutex); list_del(&worker->node); if (list_empty(&pool->workers)) detach_completion = pool->detach_completion; mutex_unlock(&pool->attach_mutex); /* clear leftover flags without pool->lock after it is detached */ worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); if (detach_completion) complete(detach_completion); }
0
[ "CWE-200" ]
tip
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
96,527,517,109,255,900,000,000,000,000,000,000,000
17
time: Remove CONFIG_TIMER_STATS Currently CONFIG_TIMER_STATS exposes process information across namespaces: kernel/time/timer_list.c print_timer(): SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); /proc/timer_list: #11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570 Given that the tracer can give the same information, this patch entirely removes CONFIG_TIMER_STATS. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Kees Cook <[email protected]> Acked-by: John Stultz <[email protected]> Cc: Nicolas Pitre <[email protected]> Cc: [email protected] Cc: Lai Jiangshan <[email protected]> Cc: Shuah Khan <[email protected]> Cc: Xing Gao <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Jessica Frazelle <[email protected]> Cc: [email protected] Cc: Nicolas Iooss <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Richard Cochran <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Michal Marek <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: "Eric W. Biederman" <[email protected]> Cc: Olof Johansson <[email protected]> Cc: Andrew Morton <[email protected]> Cc: [email protected] Cc: Arjan van de Ven <[email protected]> Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast Signed-off-by: Thomas Gleixner <[email protected]>
sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); return SCTP_DISPOSITION_CONSUME; }
0
[ "CWE-20" ]
linux-2.6
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
318,430,313,302,405,140,000,000,000,000,000,000,000
9
sctp: Fix kernel panic while process protocol violation parameter Since call to function sctp_sf_abort_violation() need paramter 'arg' with 'struct sctp_chunk' type, it will read the chunk type and chunk length from the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen() always with 'struct sctp_paramhdr' type's parameter, it will be passed to sctp_sf_abort_violation(). This may cause kernel panic. sctp_sf_violation_paramlen() |-- sctp_sf_abort_violation() |-- sctp_make_abort_violation() This patch fixed this problem. This patch also fix two place which called sctp_sf_violation_paramlen() with wrong paramter type. Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; }
0
[ "CWE-20" ]
linux
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
330,892,672,756,320,900,000,000,000,000,000,000,000
8
bpf: fix branch pruning logic when the verifier detects that register contains a runtime constant and it's compared with another constant it will prune exploration of the branch that is guaranteed not to be taken at runtime. This is all correct, but malicious program may be constructed in such a way that it always has a constant comparison and the other branch is never taken under any conditions. In this case such path through the program will not be explored by the verifier. It won't be taken at run-time either, but since all instructions are JITed the malicious program may cause JITs to complain about using reserved fields, etc. To fix the issue we have to track the instructions explored by the verifier and sanitize instructions that are dead at run time with NOPs. We cannot reject such dead code, since llvm generates it for valid C code, since it doesn't do as much data flow analysis as the verifier does. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
Item *remove_item_direct_ref() { return (*ref)->remove_item_direct_ref(); }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
8,229,054,574,825,652,000,000,000,000,000,000,000
2
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
static int ZEND_FASTCALL ZEND_INCLUDE_OR_EVAL_SPEC_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_op_array *new_op_array=NULL; int return_value_used; zend_free_op free_op1; zval *inc_filename = _get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC); zval tmp_inc_filename; zend_bool failure_retval=0; if (inc_filename->type!=IS_STRING) { tmp_inc_filename = *inc_filename; zval_copy_ctor(&tmp_inc_filename); convert_to_string(&tmp_inc_filename); inc_filename = &tmp_inc_filename; } return_value_used = RETURN_VALUE_USED(opline); switch (Z_LVAL(opline->op2.u.constant)) { case ZEND_INCLUDE_ONCE: case ZEND_REQUIRE_ONCE: { zend_file_handle file_handle; char *resolved_path; resolved_path = zend_resolve_path(Z_STRVAL_P(inc_filename), Z_STRLEN_P(inc_filename) TSRMLS_CC); if (resolved_path) { failure_retval = zend_hash_exists(&EG(included_files), resolved_path, strlen(resolved_path)+1); } else { resolved_path = Z_STRVAL_P(inc_filename); } if (failure_retval) { /* do nothing, file already included */ } else if (SUCCESS == zend_stream_open(resolved_path, &file_handle TSRMLS_CC)) { if (!file_handle.opened_path) { file_handle.opened_path = estrdup(resolved_path); } if (zend_hash_add_empty_element(&EG(included_files), file_handle.opened_path, strlen(file_handle.opened_path)+1)==SUCCESS) { new_op_array = zend_compile_file(&file_handle, (Z_LVAL(opline->op2.u.constant)==ZEND_INCLUDE_ONCE?ZEND_INCLUDE:ZEND_REQUIRE) TSRMLS_CC); zend_destroy_file_handle(&file_handle TSRMLS_CC); } else { zend_file_handle_dtor(&file_handle TSRMLS_CC); failure_retval=1; } } else { if (Z_LVAL(opline->op2.u.constant)==ZEND_INCLUDE_ONCE) { zend_message_dispatcher(ZMSG_FAILED_INCLUDE_FOPEN, Z_STRVAL_P(inc_filename) TSRMLS_CC); } else { zend_message_dispatcher(ZMSG_FAILED_REQUIRE_FOPEN, Z_STRVAL_P(inc_filename) TSRMLS_CC); } } if (resolved_path != Z_STRVAL_P(inc_filename)) { efree(resolved_path); } } break; case ZEND_INCLUDE: case ZEND_REQUIRE: new_op_array = compile_filename(Z_LVAL(opline->op2.u.constant), inc_filename TSRMLS_CC); break; case ZEND_EVAL: { char *eval_desc = zend_make_compiled_string_description("eval()'d code" TSRMLS_CC); new_op_array = zend_compile_string(inc_filename, eval_desc TSRMLS_CC); efree(eval_desc); } break; EMPTY_SWITCH_DEFAULT_CASE() } if (inc_filename==&tmp_inc_filename) { zval_dtor(&tmp_inc_filename); } zval_dtor(free_op1.var); EX_T(opline->result.u.var).var.ptr_ptr = &EX_T(opline->result.u.var).var.ptr; if (new_op_array && !EG(exception)) { EX(original_return_value) = EG(return_value_ptr_ptr); EG(return_value_ptr_ptr) = return_value_used ? EX_T(opline->result.u.var).var.ptr_ptr : NULL; EG(active_op_array) = new_op_array; EX_T(opline->result.u.var).var.ptr = NULL; EX(current_object) = EX(object); EX(function_state).function = (zend_function *) new_op_array; EX(object) = NULL; if (!EG(active_symbol_table)) { zend_rebuild_symbol_table(TSRMLS_C); } if (zend_execute == execute) { EX(call_opline) = opline; ZEND_VM_ENTER(); } else { zend_execute(new_op_array TSRMLS_CC); } EX(function_state).function = (zend_function *) EX(op_array); EX(object) = EX(current_object); if (return_value_used) { if (!EX_T(opline->result.u.var).var.ptr) { /* there was no return statement */ ALLOC_ZVAL(EX_T(opline->result.u.var).var.ptr); INIT_PZVAL(EX_T(opline->result.u.var).var.ptr); Z_LVAL_P(EX_T(opline->result.u.var).var.ptr) = 1; Z_TYPE_P(EX_T(opline->result.u.var).var.ptr) = IS_BOOL; } } EG(opline_ptr) = &EX(opline); EG(active_op_array) = EX(op_array); EG(return_value_ptr_ptr) = EX(original_return_value); destroy_op_array(new_op_array TSRMLS_CC); efree(new_op_array); if (EG(exception)) { zend_throw_exception_internal(NULL TSRMLS_CC); } } else { if (return_value_used) { ALLOC_ZVAL(EX_T(opline->result.u.var).var.ptr); INIT_ZVAL(*EX_T(opline->result.u.var).var.ptr); Z_LVAL_P(EX_T(opline->result.u.var).var.ptr) = failure_retval; Z_TYPE_P(EX_T(opline->result.u.var).var.ptr) = IS_BOOL; } } ZEND_VM_NEXT_OPCODE(); }
1
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
146,436,470,033,437,200,000,000,000,000,000,000,000
129
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, bool relax, bool reuseport_ok) { struct sock *sk2; bool reuse = sk->sk_reuse; bool reuseport = !!sk->sk_reuseport && reuseport_ok; kuid_t uid = sock_i_uid((struct sock *)sk); /* * Unlike other sk lookup places we do not check * for sk_net here, since _all_ the socks listed * in tb->owners list belong to the same net - the * one this bucket belongs to. */ sk_for_each_bound(sk2, &tb->owners) { if (sk != sk2 && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if ((!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) && (!reuseport || !sk2->sk_reuseport || rcu_access_pointer(sk->sk_reuseport_cb) || (sk2->sk_state != TCP_TIME_WAIT && !uid_eq(uid, sock_i_uid(sk2))))) { if (inet_rcv_saddr_equal(sk, sk2, true)) break; } if (!relax && reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { if (inet_rcv_saddr_equal(sk, sk2, true)) break; } } } return sk2 != NULL; }
0
[ "CWE-200", "CWE-415" ]
linux
657831ffc38e30092a2d5f03d385d710eb88b09a
267,977,646,172,078,240,000,000,000,000,000,000,000
39
dccp/tcp: do not inherit mc_list from parent syzkaller found a way to trigger double frees from ip_mc_drop_socket() It turns out that leave a copy of parent mc_list at accept() time, which is very bad. Very similar to commit 8b485ce69876 ("tcp: do not inherit fastopen_req from parent") Initial report from Pray3r, completed by Andrey one. Thanks a lot to them ! Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Pray3r <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
deleteDirsFromLibpath(const char *const libpath, const char *const deleteStart, const size_t deleteLen) { char *newPath = NULL; size_t preLen = deleteStart - libpath; const char *postStart = deleteStart + deleteLen; size_t postLen = strlen(postStart); size_t delim = 0; /* Remove trailing : from the prefix */ while ((preLen > 0) && (':' == libpath[preLen - 1])) { preLen -= 1; } if (postLen > 0) { /* Remove leading : from the postfix */ while (':' == postStart[0]) { postStart += 1; postLen -= 1; } } if ((preLen > 0) && (postLen > 0)) { /* Add delimiter : */ delim = 1; } newPath = malloc(preLen + delim + postLen + 1); if (NULL == newPath) { fprintf(stderr, "deleteDirsFromLibpath: malloc(%d) failed, aborting\n", preLen + delim + postLen + 1); abort(); } memcpy(newPath, libpath, preLen); if (delim > 0) { newPath[preLen] = ':'; } memcpy(newPath + preLen + delim, postStart, postLen); /* Set the NUL terminator at the end */ newPath[preLen + delim + postLen] = '\0'; return newPath; }
0
[ "CWE-119" ]
openj9
0971f22d88f42cf7332364ad7430e9bd8681c970
240,697,384,791,662,570,000,000,000,000,000,000,000
44
Clean up jio_snprintf and jio_vfprintf Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659 Signed-off-by: Peter Bain <[email protected]>
void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); int32_t* input_offset_ptr = GetTensorData<int32_t>( GetTemporary(context, node, data->input_offset_index)); for (int b = 0; b < batch_size; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( GetTensorData<float>(input) + offset, input_size, quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); } int8_t* im2col_ptr = nullptr; int8_t* filter_ptr = nullptr; if (im2col != nullptr) { im2col_ptr = im2col->data.int8; } filter_ptr = filter->data.int8; const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; switch (kernel_type) { case kReference: reference_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr); break; case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { TfLiteTensor* row_sums = GetTemporary(context, node, data->row_sums_index); TfLiteTensor* scratch = GetTemporary(context, node, data->accum_scratch_index); optimized_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr, GetTensorShape(scratch), GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), &data->compute_hybrid_row_sums, CpuBackendContext::GetFromContext(context)); data->compute_hybrid_row_sums = false; break; } } }
1
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
9,622,147,909,787,428,000,000,000,000,000,000,000
76
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
Pl_AES_PDF::setIV(unsigned char const* iv, size_t bytes) { if (bytes != this->buf_size) { throw std::logic_error( "Pl_AES_PDF: specified initialization vector" " size in bytes must be " + QUtil::int_to_string(bytes)); } this->use_specified_iv = true; memcpy(this->specified_iv, iv, bytes); }
1
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
716,356,194,452,751,600,000,000,000,000,000,000
11
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
bool Item_ref::is_null() { DBUG_ASSERT(fixed); bool tmp=(*ref)->is_null_result(); null_value=(*ref)->null_value; return tmp; }
0
[]
server
b000e169562697aa072600695d4f0c0412f94f4f
45,466,834,863,637,920,000,000,000,000,000,000,000
7
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) based on: commit f7316aa0c9a Author: Ajo Robert <[email protected]> Date: Thu Aug 24 17:03:21 2017 +0530 Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; if (!conn) goto drop; BT_DBG("conn %p len %u", conn, skb->len); if (skb->len) { sco_recv_frame(conn, skb); return; } drop: kfree_skb(skb); }
0
[ "CWE-416" ]
linux
0771cbb3b97d3c1d68eecd7f00055f599954c34e
170,711,217,795,960,400,000,000,000,000,000,000,000
17
Bluetooth: SCO: Replace use of memcpy_from_msg with bt_skb_sendmsg This makes use of bt_skb_sendmsg instead of allocating a different buffer to be used with memcpy_from_msg which cause one extra copy. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
} static HashTable *date_object_get_properties_timezone(zval *object TSRMLS_DC) { HashTable *props; zval *zv; php_timezone_obj *tzobj; tzobj = (php_timezone_obj *) zend_object_store_get_object(object TSRMLS_CC); props = zend_std_get_properties(object TSRMLS_CC); if (!tzobj->initialized) { return props; } MAKE_STD_ZVAL(zv); ZVAL_LONG(zv, tzobj->type); zend_hash_update(props, "timezone_type", 14, &zv, sizeof(zv), NULL); MAKE_STD_ZVAL(zv); switch (tzobj->type) { case TIMELIB_ZONETYPE_ID: ZVAL_STRING(zv, tzobj->tzi.tz->name, 1); break; case TIMELIB_ZONETYPE_OFFSET: { char *tmpstr = emalloc(sizeof("UTC+05:00")); snprintf(tmpstr, sizeof("+05:00"), "%c%02d:%02d", tzobj->tzi.utc_offset > 0 ? '-' : '+', abs(tzobj->tzi.utc_offset / 60), abs((tzobj->tzi.utc_offset % 60))); ZVAL_STRING(zv, tmpstr, 0); } break; case TIMELIB_ZONETYPE_ABBR: ZVAL_STRING(zv, tzobj->tzi.z.abbr, 1); break; } zend_hash_update(props, "timezone", 9, &zv, sizeof(zv), NULL);
0
[]
php-src
bb057498f7457e8b2eba98332a3bad434de4cf12
315,642,224,592,746,680,000,000,000,000,000,000,000
43
Fix #70277: new DateTimeZone($foo) is ignoring text after null byte The DateTimeZone constructors are not binary safe. They're parsing the timezone as string, but discard the length when calling timezone_initialize(). This patch adds a tz_len parameter and a respective check to timezone_initialize().
bool is_reference(t_field* tfield) { return tfield->get_reference(); }
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
262,259,684,022,556,740,000,000,000,000,000,000,000
1
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <[email protected]>
static OPJ_BOOL opj_j2k_setup_header_writing (opj_j2k_t *p_j2k, opj_event_mgr_t * p_manager) { /* preconditions */ assert(p_j2k != 00); assert(p_manager != 00); if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_init_info, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_soc, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_siz, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_cod, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_qcd, p_manager)) { return OPJ_FALSE; } if (OPJ_IS_CINEMA(p_j2k->m_cp.rsiz)) { /* No need for COC or QCC, QCD and COD are used if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_coc, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_all_qcc, p_manager)) { return OPJ_FALSE; } */ if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_tlm, p_manager)) { return OPJ_FALSE; } if (p_j2k->m_cp.rsiz == OPJ_PROFILE_CINEMA_4K) { if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_poc, p_manager)) { return OPJ_FALSE; } } } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_regions, p_manager)) { return OPJ_FALSE; } if (p_j2k->m_cp.comment != 00) { if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_com, p_manager)) { return OPJ_FALSE; } } /* DEVELOPER CORNER, insert your custom procedures */ if (p_j2k->m_cp.rsiz & OPJ_EXTENSION_MCT) { if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_write_mct_data_group, p_manager)) { return OPJ_FALSE; } } /* End of Developer Corner */ if (p_j2k->cstr_index) { if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_get_end_header, p_manager)) { return OPJ_FALSE; } } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_create_tcd, p_manager)) { return OPJ_FALSE; } if (! opj_procedure_list_add_procedure(p_j2k->m_procedure_list,(opj_procedure)opj_j2k_update_rates, p_manager)) { return OPJ_FALSE; } return OPJ_TRUE; }
0
[ "CWE-416" ]
openjpeg
940100c28ae28931722290794889cf84a92c5f6f
237,385,116,010,037,300,000,000,000,000,000,000,000
75
Fix potential use-after-free in opj_j2k_write_mco function Fixes #563
ephy_embed_auto_download_url (EphyEmbed *embed, const char *url) { WebKitNetworkRequest *request; WebKitDownload *download; request = webkit_network_request_new (url); download = webkit_download_new (request); g_object_unref (request); if (perform_auto_download (download)) webkit_download_start (download); }
0
[]
epiphany
3e0f7dea754381c5ad11a06ccc62eb153382b498
22,877,126,480,829,737,000,000,000,000,000,000,000
12
Report broken certs through the padlock icon This uses a new feature in libsoup that reports through a SoupMessageFlag whether the message is talking to a server that has a trusted server. Bug #600663
psf_rand_int32 (void) { static uint64_t value = 0 ; int k, count ; if (value == 0) { #if HAVE_GETTIMEOFDAY struct timeval tv ; gettimeofday (&tv, NULL) ; value = tv.tv_sec + tv.tv_usec ; #else value = time (NULL) ; #endif } ; count = 4 + (value & 7) ; for (k = 0 ; k < count ; k++) value = (11117 * value + 211231) & 0x7fffffff ; return (int32_t) value ; } /* psf_rand_int32 */
0
[ "CWE-119", "CWE-787" ]
libsndfile
708e996c87c5fae77b104ccfeb8f6db784c32074
23,733,012,366,294,020,000,000,000,000,000,000,000
21
src/ : Move to a variable length header buffer Previously, the `psf->header` buffer was a fixed length specified by `SF_HEADER_LEN` which was set to `12292`. This was problematic for two reasons; this value was un-necessarily large for the majority of files and too small for some others. Now the size of the header buffer starts at 256 bytes and grows as necessary up to a maximum of 100k.
static void node_device_release(struct device *dev) { struct node *node = to_node(dev); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) /* * We schedule the work only when a memory section is * onlined/offlined on this node. When we come here, * all the memory on this node has been offlined, * so we won't enqueue new work to this work. * * The work is using node->node_work, so we should * flush work before freeing the memory. */ flush_work(&node->node_work); #endif kfree(node); }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
4,322,445,893,716,062,000,000,000,000,000,000,000
18
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
TEST_F(ServerSelectorTestFixture, ShouldSelectPreferredIfAvailable) { TopologyStateMachine stateMachine(sdamConfiguration); auto topologyDescription = std::make_shared<TopologyDescription>(sdamConfiguration); const auto now = Date_t::now(); const auto d0 = now - Milliseconds(1000); const auto s0 = ServerDescriptionBuilder() .withAddress(HostAndPort("s0")) .withType(ServerType::kRSPrimary) .withRtt(sdamConfiguration.getLocalThreshold()) .withSetName("set") .withHost(HostAndPort("s0")) .withHost(HostAndPort("s1")) .withMinWireVersion(WireVersion::SUPPORTS_OP_MSG) .withMaxWireVersion(WireVersion::LATEST_WIRE_VERSION) .withLastWriteDate(d0) .withTag("tag", "primary") .instance(); stateMachine.onServerDescription(*topologyDescription, s0); const auto s1 = ServerDescriptionBuilder() .withAddress(HostAndPort("s1")) .withType(ServerType::kRSSecondary) .withRtt(sdamConfiguration.getLocalThreshold()) .withSetName("set") .withHost(HostAndPort("s0")) .withHost(HostAndPort("s1")) .withMinWireVersion(WireVersion::SUPPORTS_OP_MSG) .withMaxWireVersion(WireVersion::LATEST_WIRE_VERSION) .withLastWriteDate(d0) .withTag("tag", "secondary") .instance(); stateMachine.onServerDescription(*topologyDescription, s1); const auto primaryPreferredTagSecondary = ReadPreferenceSetting(ReadPreference::PrimaryPreferred, TagSets::secondarySet); auto result1 = selector.selectServer(topologyDescription, primaryPreferredTagSecondary); ASSERT(result1 != boost::none); ASSERT_EQ(HostAndPort("s0"), (*result1)->getAddress()); const auto secondaryPreferredWithTag = ReadPreferenceSetting(ReadPreference::SecondaryPreferred, TagSets::secondarySet); auto result2 = selector.selectServer(topologyDescription, secondaryPreferredWithTag); ASSERT(result2 != boost::none); ASSERT_EQ(HostAndPort("s1"), (*result2)->getAddress()); const auto secondaryPreferredNoTag = ReadPreferenceSetting(ReadPreference::SecondaryPreferred); auto result3 = selector.selectServer(topologyDescription, secondaryPreferredNoTag); ASSERT(result3 != boost::none); ASSERT_EQ(HostAndPort("s1"), (*result2)->getAddress()); }
1
[ "CWE-755" ]
mongo
75f7184eafa78006a698cda4c4adfb57f1290047
181,817,538,129,080,900,000,000,000,000,000,000,000
53
SERVER-50170 fix max staleness read preference parameter for server selection
soup_filter_input_stream_create_source (GPollableInputStream *stream, GCancellable *cancellable) { SoupFilterInputStream *fstream = SOUP_FILTER_INPUT_STREAM (stream); GSource *base_source, *pollable_source; if (fstream->priv->buf && !fstream->priv->need_more) base_source = g_timeout_source_new (0); else base_source = g_pollable_input_stream_create_source (G_POLLABLE_INPUT_STREAM (G_FILTER_INPUT_STREAM (fstream)->base_stream), cancellable); g_source_set_dummy_callback (base_source); pollable_source = g_pollable_source_new (G_OBJECT (stream)); g_source_add_child_source (pollable_source, base_source); g_source_unref (base_source); return pollable_source; }
0
[ "CWE-787" ]
libsoup
03c91c76daf70ee227f38304c5e45a155f45073d
281,463,275,582,601,600,000,000,000,000,000,000,000
18
Fix chunked decoding buffer overrun (CVE-2017-2885) https://bugzilla.gnome.org/show_bug.cgi?id=785774
int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local; struct sk_buff *skb; u8 *miux_tlv = NULL, miux_tlv_length; u8 *rw_tlv = NULL, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; pr_debug("Sending CC\n"); local = sock->local; if (local == NULL) return -ENODEV; /* If the socket parameters are not set, use the local ones */ miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ? local->miux : sock->miux; rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); if (!miux_tlv) { err = -ENOMEM; goto error_tlv; } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); if (!rw_tlv) { err = -ENOMEM; goto error_tlv; } size += rw_tlv_length; skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); if (skb == NULL) { err = -ENOMEM; goto error_tlv; } llcp_add_tlv(skb, miux_tlv, miux_tlv_length); llcp_add_tlv(skb, rw_tlv, rw_tlv_length); skb_queue_tail(&local->tx_queue, skb); err = 0; error_tlv: if (err) pr_err("error %d\n", err); kfree(miux_tlv); kfree(rw_tlv); return err; }
0
[ "CWE-476" ]
linux
58bdd544e2933a21a51eecf17c3f5f94038261b5
64,830,070,237,343,610,000,000,000,000,000,000,000
58
net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails KASAN report this: BUG: KASAN: null-ptr-deref in nfc_llcp_build_gb+0x37f/0x540 [nfc] Read of size 3 at addr 0000000000000000 by task syz-executor.0/5401 CPU: 0 PID: 5401 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xfa/0x1ce lib/dump_stack.c:113 kasan_report+0x171/0x18d mm/kasan/report.c:321 memcpy+0x1f/0x50 mm/kasan/common.c:130 nfc_llcp_build_gb+0x37f/0x540 [nfc] nfc_llcp_register_device+0x6eb/0xb50 [nfc] nfc_register_device+0x50/0x1d0 [nfc] nfcsim_device_new+0x394/0x67d [nfcsim] ? 0xffffffffc1080000 nfcsim_init+0x6b/0x1000 [nfcsim] do_one_initcall+0xfa/0x5ca init/main.c:887 do_init_module+0x204/0x5f6 kernel/module.c:3460 load_module+0x66b2/0x8570 kernel/module.c:3808 __do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902 do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f9cb79dcc58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003 RBP: 00007f9cb79dcc70 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9cb79dd6bc R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004 nfc_llcp_build_tlv will return NULL on fails, caller should check it, otherwise will trigger a NULL dereference. Reported-by: Hulk Robot <[email protected]> Fixes: eda21f16a5ed ("NFC: Set MIU and RW values from CONNECT and CC LLCP frames") Fixes: d646960f7986 ("NFC: Initial LLCP support") Signed-off-by: YueHaibing <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void convert_16u32s_C1R(const OPJ_BYTE* pSrc, OPJ_INT32* pDst, OPJ_SIZE_T length) { OPJ_SIZE_T i; for (i = 0; i < length; i++) { OPJ_INT32 val0 = *pSrc++; OPJ_INT32 val1 = *pSrc++; pDst[i] = val0 << 8 | val1; } }
0
[ "CWE-787" ]
openjpeg
b2072402b7e14d22bba6fb8cde2a1e9996e9a919
223,412,975,376,256,700,000,000,000,000,000,000,000
10
pngtoimage(): fix wrong computation of x1,y1 if -d option is used, that would result in a heap buffer overflow (fixes #1284)
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, struct cftype *cft) { return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); }
0
[ "CWE-416" ]
linux
3b0462726e7ef281c35a7a4ae33e93ee2bc9975b
310,931,196,486,062,800,000,000,000,000,000,000,000
5
cgroup: verify that source is a string The following sequence can be used to trigger a UAF: int fscontext_fd = fsopen("cgroup"); int fd_null = open("/dev/null, O_RDONLY); int fsconfig(fscontext_fd, FSCONFIG_SET_FD, "source", fd_null); close_range(3, ~0U, 0); The cgroup v1 specific fs parser expects a string for the "source" parameter. However, it is perfectly legitimate to e.g. specify a file descriptor for the "source" parameter. The fs parser doesn't know what a filesystem allows there. So it's a bug to assume that "source" is always of type fs_value_is_string when it can reasonably also be fs_value_is_file. This assumption in the cgroup code causes a UAF because struct fs_parameter uses a union for the actual value. Access to that union is guarded by the param->type member. Since the cgroup paramter parser didn't check param->type but unconditionally moved param->string into fc->source a close on the fscontext_fd would trigger a UAF during put_fs_context() which frees fc->source thereby freeing the file stashed in param->file causing a UAF during a close of the fd_null. Fix this by verifying that param->type is actually a string and report an error if not. In follow up patches I'll add a new generic helper that can be used here and by other filesystems instead of this error-prone copy-pasta fix. But fixing it in here first makes backporting a it to stable a lot easier. Fixes: 8d2451f4994f ("cgroup1: switch to option-by-option parsing") Reported-by: [email protected] Cc: Christoph Hellwig <[email protected]> Cc: Alexander Viro <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Cc: syzkaller-bugs <[email protected]> Signed-off-by: Christian Brauner <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
tparm_proto(const char *string, TPARM_ARG a1, TPARM_ARG a2, TPARM_ARG a3, TPARM_ARG a4, TPARM_ARG a5, TPARM_ARG a6, TPARM_ARG a7, TPARM_ARG a8, TPARM_ARG a9) { return tparm_varargs(string, a1, a2, a3, a4, a5, a6, a7, a8, a9); }
1
[]
ncurses
790a85dbd4a81d5f5d8dd02a44d84f01512ef443
331,579,015,936,266,820,000,000,000,000,000,000,000
13
ncurses 6.2 - patch 20200531 + correct configure version-check/warnng for g++ to allow for 10.x + re-enable "bel" in konsole-base (report by Nia Huang) + add linux-s entry (patch by Alexandre Montaron). + drop long-obsolete convert_configure.pl + add test/test_parm.c, for checking tparm changes. + improve parameter-checking for tparm, adding function _nc_tiparm() to handle the most-used case, which accepts only numeric parameters (report/testcase by "puppet-meteor"). + use a more conservative estimate of the buffer-size in lib_tparm.c's save_text() and save_number(), in case the sprintf() function passes-through unexpected characters from a format specifier (report/testcase by "puppet-meteor"). + add a check for end-of-string in cvtchar to handle a malformed string in infotocap (report/testcase by "puppet-meteor").
visit_unsupported_type (const bson_iter_t *iter, const char *key, uint32_t type_code, void *data) { unsupported_type_test_data_t *context; context = (unsupported_type_test_data_t *) data; context->visited = true; context->key = key; context->type_code = type_code; }
0
[ "CWE-125" ]
libbson
42900956dc461dfe7fb91d93361d10737c1602b3
35,711,111,198,659,355,000,000,000,000,000,000,000
12
CDRIVER-2269 Check for zero string length in codewscope
ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; }
0
[ "CWE-415" ]
linux
4397f04575c44e1440ec2e49b6302785c95fd2f8
129,070,716,800,088,120,000,000,000,000,000,000,000
20
tracing: Fix possible double free on failure of allocating trace buffer Jing Xia and Chunyan Zhang reported that on failing to allocate part of the tracing buffer, memory is freed, but the pointers that point to them are not initialized back to NULL, and later paths may try to free the freed memory again. Jing and Chunyan fixed one of the locations that does this, but missed a spot. Link: http://lkml.kernel.org/r/[email protected] Cc: [email protected] Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Reported-by: Jing Xia <[email protected]> Reported-by: Chunyan Zhang <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
void ebt_unregister_table(struct net *net, struct ebt_table *table) { int i; if (!table) { BUGPRINT("Request to unregister NULL table!!!\n"); return; } mutex_lock(&ebt_mutex); list_del(&table->list); mutex_unlock(&ebt_mutex); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); if (table->private->chainstack) { for_each_possible_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } vfree(table->private); kfree(table); }
0
[ "CWE-20" ]
linux
d846f71195d57b0bbb143382647c2c6638b04c5a
325,721,086,000,210,500,000,000,000,000,000,000,000
24
bridge: netfilter: fix information leak Struct tmp is copied from userspace. It is not checked whether the "name" field is NULL terminated. This may lead to buffer overflow and passing contents of kernel stack as a module name to try_then_request_module() and, consequently, to modprobe commandline. It would be seen by all userspace processes. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Patrick McHardy <[email protected]>
PHP_FUNCTION( msgfmt_format ) { zval *args; MSG_FORMAT_METHOD_INIT_VARS; /* Parse parameters. */ if( zend_parse_method_parameters( ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Oa", &object, MessageFormatter_ce_ptr, &args ) == FAILURE ) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "msgfmt_format: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } /* Fetch the object. */ MSG_FORMAT_METHOD_FETCH_OBJECT; msgfmt_do_format(mfo, args, return_value TSRMLS_CC); }
0
[ "CWE-119", "CWE-787" ]
php-src
6d55ba265637d6adf0ba7e9c9ef11187d1ec2f5b
30,965,895,845,814,685,000,000,000,000,000,000,000
21
Fix bug #73007: add locale length check
vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, struct vmci_handle *handle, u64 produce_size, u64 consume_size, u32 peer, u32 flags, bool trusted) { int err = 0; if (trusted) { /* Try to allocate our queue pair as trusted. This will only * work if vsock is running in the host. */ err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size, peer, flags, VMCI_PRIVILEGE_FLAG_TRUSTED); if (err != VMCI_ERROR_NO_ACCESS) goto out; } err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size, peer, flags, VMCI_NO_PRIVILEGE_FLAGS); out: if (err < 0) { pr_err("Could not attach to queue pair with %d\n", err); err = vmci_transport_error_to_vsock_error(err); } return err; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
36,532,127,094,116,110,000,000,000,000,000,000,000
33
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
decompilePUSH (SWF_ACTION *act) { int i; OUT_BEGIN(SWF_ACTIONPUSH); SanityCheck(SWF_PUSH, act->SWF_ACTIONRECORD.ActionCode == SWFACTION_PUSH, "not a PUSH") for(i=0;i<sact->NumParam;i++) push(&(sact->Params[i])); }
0
[ "CWE-119", "CWE-125" ]
libming
da9d86eab55cbf608d5c916b8b690f5b76bca462
186,787,981,383,802,120,000,000,000,000,000,000,000
12
decompileAction: Prevent heap buffer overflow and underflow with using OpCode
static Image *ReadVIPSImage(const ImageInfo *image_info, ExceptionInfo *exception) { char buffer[MagickPathExtent], *metadata; Image *image; MagickBooleanType status; ssize_t n; unsigned int channels, marker; VIPSBandFormat format; VIPSCoding coding; VIPSType type; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } marker=ReadBlobLSBLong(image); if (marker == VIPS_MAGIC_LSB) image->endian=LSBEndian; else if (marker == VIPS_MAGIC_MSB) image->endian=MSBEndian; else ThrowReaderException(CorruptImageError,"ImproperImageHeader"); image->columns=(size_t) ReadBlobLong(image); image->rows=(size_t) ReadBlobLong(image); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); channels=ReadBlobLong(image); (void) ReadBlobLong(image); /* Legacy */ format=(VIPSBandFormat) ReadBlobLong(image); switch(format) { case VIPSBandFormatUCHAR: case VIPSBandFormatCHAR: image->depth=8; break; case VIPSBandFormatUSHORT: case VIPSBandFormatSHORT: image->depth=16; break; case VIPSBandFormatUINT: case VIPSBandFormatINT: case VIPSBandFormatFLOAT: image->depth=32; break; case VIPSBandFormatDOUBLE: image->depth=64; break; default: case VIPSBandFormatCOMPLEX: case VIPSBandFormatDPCOMPLEX: case VIPSBandFormatNOTSET: ThrowReaderException(CoderError,"Unsupported band format"); } coding=(VIPSCoding) ReadBlobLong(image); type=(VIPSType) ReadBlobLong(image); switch(type) { case VIPSTypeCMYK: SetImageColorspace(image,CMYKColorspace,exception); if (channels == 5) image->alpha_trait=BlendPixelTrait; break; case VIPSTypeB_W: case VIPSTypeGREY16: SetImageColorspace(image,GRAYColorspace,exception); if (channels == 2) image->alpha_trait=BlendPixelTrait; break; case VIPSTypeRGB: case VIPSTypeRGB16: SetImageColorspace(image,RGBColorspace,exception); if (channels == 4) image->alpha_trait=BlendPixelTrait; break; case VIPSTypesRGB: SetImageColorspace(image,sRGBColorspace,exception); if (channels == 4) image->alpha_trait=BlendPixelTrait; break; default: case VIPSTypeFOURIER: case VIPSTypeHISTOGRAM: case VIPSTypeLAB: case VIPSTypeLABS: case VIPSTypeLABQ: case VIPSTypeLCH: case VIPSTypeMULTIBAND: case VIPSTypeUCS: case VIPSTypeXYZ: case VIPSTypeYXY: ThrowReaderException(CoderError,"Unsupported colorspace"); } image->units=PixelsPerCentimeterResolution; image->resolution.x=ReadBlobFloat(image)*10; image->resolution.y=ReadBlobFloat(image)*10; /* Legacy, offsets, future */ (void) ReadBlobLongLong(image); (void) ReadBlobLongLong(image); (void) ReadBlobLongLong(image); if (image_info->ping != MagickFalse) return(image); if (IsSupportedCombination(format,type) == MagickFalse) ThrowReaderException(CoderError, "Unsupported combination of band format and colorspace"); if (channels == 0 || channels > 5) ThrowReaderException(CoderError,"Unsupported number of channels"); if (coding == VIPSCodingNONE) status=ReadVIPSPixelsNONE(image,format,type,channels,exception); else ThrowReaderException(CoderError,"Unsupported coding"); metadata=(char *) NULL; while ((n=ReadBlob(image,MagickPathExtent-1,(unsigned char *) buffer)) != 0) { buffer[n]='\0'; if (metadata == (char *) NULL) metadata=ConstantString(buffer); else (void) ConcatenateString(&metadata,buffer); } if (metadata != (char *) NULL) SetImageProperty(image,"vips:metadata",metadata,exception); (void) CloseBlob(image); if (status == MagickFalse) return((Image *) NULL); return(image); }
1
[ "CWE-772" ]
ImageMagick
dd367e0c3c3f37fbf1c20fa107b67a668b22c6e2
258,398,624,408,430,550,000,000,000,000,000,000,000
158
https://github.com/ImageMagick/ImageMagick/issues/770
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { struct bpf_prog_aux *aux = prog->aux; if (atomic64_dec_and_test(&aux->refcnt)) { /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); if (in_irq() || irqs_disabled()) { INIT_WORK(&aux->work, bpf_prog_put_deferred); schedule_work(&aux->work); } else { bpf_prog_put_deferred(&aux->work); } } }
0
[ "CWE-367" ]
bpf
353050be4c19e102178ccc05988101887c25ae53
96,771,513,948,735,330,000,000,000,000,000,000,000
16
bpf: Fix toctou on read-only map's constant scalar tracking Commit a23740ec43ba ("bpf: Track contents of read-only maps as scalars") is checking whether maps are read-only both from BPF program side and user space side, and then, given their content is constant, reading out their data via map->ops->map_direct_value_addr() which is then subsequently used as known scalar value for the register, that is, it is marked as __mark_reg_known() with the read value at verification time. Before a23740ec43ba, the register content was marked as an unknown scalar so the verifier could not make any assumptions about the map content. The current implementation however is prone to a TOCTOU race, meaning, the value read as known scalar for the register is not guaranteed to be exactly the same at a later point when the program is executed, and as such, the prior made assumptions of the verifier with regards to the program will be invalid which can cause issues such as OOB access, etc. While the BPF_F_RDONLY_PROG map flag is always fixed and required to be specified at map creation time, the map->frozen property is initially set to false for the map given the map value needs to be populated, e.g. for global data sections. Once complete, the loader "freezes" the map from user space such that no subsequent updates/deletes are possible anymore. For the rest of the lifetime of the map, this freeze one-time trigger cannot be undone anymore after a successful BPF_MAP_FREEZE cmd return. Meaning, any new BPF_* cmd calls which would update/delete map entries will be rejected with -EPERM since map_get_sys_perms() removes the FMODE_CAN_WRITE permission. This also means that pending update/delete map entries must still complete before this guarantee is given. This corner case is not an issue for loaders since they create and prepare such program private map in successive steps. However, a malicious user is able to trigger this TOCTOU race in two different ways: i) via userfaultfd, and ii) via batched updates. For i) userfaultfd is used to expand the competition interval, so that map_update_elem() can modify the contents of the map after map_freeze() and bpf_prog_load() were executed. This works, because userfaultfd halts the parallel thread which triggered a map_update_elem() at the time where we copy key/value from the user buffer and this already passed the FMODE_CAN_WRITE capability test given at that time the map was not "frozen". Then, the main thread performs the map_freeze() and bpf_prog_load(), and once that had completed successfully, the other thread is woken up to complete the pending map_update_elem() which then changes the map content. For ii) the idea of the batched update is similar, meaning, when there are a large number of updates to be processed, it can increase the competition interval between the two. It is therefore possible in practice to modify the contents of the map after executing map_freeze() and bpf_prog_load(). One way to fix both i) and ii) at the same time is to expand the use of the map's map->writecnt. The latter was introduced in fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY") and further refined in 1f6cb19be2e2 ("bpf: Prevent re-mmap()'ing BPF map as writable for initially r/o mapping") with the rationale to make a writable mmap()'ing of a map mutually exclusive with read-only freezing. The counter indicates writable mmap() mappings and then prevents/fails the freeze operation. Its semantics can be expanded beyond just mmap() by generally indicating ongoing write phases. This would essentially span any parallel regular and batched flavor of update/delete operation and then also have map_freeze() fail with -EBUSY. For the check_mem_access() in the verifier we expand upon the bpf_map_is_rdonly() check ensuring that all last pending writes have completed via bpf_map_write_active() test. Once the map->frozen is set and bpf_map_write_active() indicates a map->writecnt of 0 only then we are really guaranteed to use the map's data as known constants. For map->frozen being set and pending writes in process of still being completed we fall back to marking that register as unknown scalar so we don't end up making assumptions about it. With this, both TOCTOU reproducers from i) and ii) are fixed. Note that the map->writecnt has been converted into a atomic64 in the fix in order to avoid a double freeze_mutex mutex_{un,}lock() pair when updating map->writecnt in the various map update/delete BPF_* cmd flavors. Spanning the freeze_mutex over entire map update/delete operations in syscall side would not be possible due to then causing everything to be serialized. Similarly, something like synchronize_rcu() after setting map->frozen to wait for update/deletes to complete is not possible either since it would also have to span the user copy which can sleep. On the libbpf side, this won't break d66562fba1ce ("libbpf: Add BPF object skeleton support") as the anonymous mmap()-ed "map initialization image" is remapped as a BPF map-backed mmap()-ed memory where for .rodata it's non-writable. Fixes: a23740ec43ba ("bpf: Track contents of read-only maps as scalars") Reported-by: [email protected] Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
smpl_t aubio_tempo_get_threshold(aubio_tempo_t * o) { return o->threshold; }
0
[]
aubio
b1559f4c9ce2b304d8d27ffdc7128b6795ca82e5
194,824,173,044,332,020,000,000,000,000,000,000,000
3
[tempo] fix buffer overflow in method parser
WasmResult dequeue(uint32_t token, std::string* data) { absl::ReaderMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } if (it->second.queue.empty()) { return WasmResult::Empty; } *data = it->second.queue.front(); it->second.queue.pop_front(); return WasmResult::Ok; }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
100,812,374,793,858,930,000,000,000,000,000,000,000
13
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
void __do_SAK(struct tty_struct *tty) { #ifdef TTY_SOFT_SAK tty_hangup(tty); #else struct task_struct *g, *p; struct pid *session; int i; if (!tty) return; session = tty->session; tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); read_lock(&tasklist_lock); /* Kill the entire session */ do_each_pid_task(session, PIDTYPE_SID, p) { tty_notice(tty, "SAK: killed process %d (%s): by session\n", task_pid_nr(p), p->comm); group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID); } while_each_pid_task(session, PIDTYPE_SID, p); /* Now kill any processes that happen to have the tty open */ do_each_thread(g, p) { if (p->signal->tty == tty) { tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n", task_pid_nr(p), p->comm); group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID); continue; } task_lock(p); i = iterate_fd(p->files, 0, this_tty, tty); if (i != 0) { tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n", task_pid_nr(p), p->comm, i - 1); group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID); } task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); #endif }
1
[ "CWE-416" ]
linux
c8bcd9c5be24fb9e6132e97da5a35e55a83e36b9
327,113,028,686,969,300,000,000,000,000,000,000,000
45
tty: Fix ->session locking Currently, locking of ->session is very inconsistent; most places protect it using the legacy tty mutex, but disassociate_ctty(), __do_SAK(), tiocspgrp() and tiocgsid() don't. Two of the writers hold the ctrl_lock (because they already need it for ->pgrp), but __proc_set_tty() doesn't do that yet. On a PREEMPT=y system, an unprivileged user can theoretically abuse this broken locking to read 4 bytes of freed memory via TIOCGSID if tiocgsid() is preempted long enough at the right point. (Other things might also go wrong, especially if root-only ioctls are involved; I'm not sure about that.) Change the locking on ->session such that: - tty_lock() is held by all writers: By making disassociate_ctty() hold it. This should be fine because the same lock can already be taken through the call to tty_vhangup_session(). The tricky part is that we need to shorten the area covered by siglock to be able to take tty_lock() without ugly retry logic; as far as I can tell, this should be fine, since nothing in the signal_struct is touched in the `if (tty)` branch. - ctrl_lock is held by all writers: By changing __proc_set_tty() to hold the lock a little longer. - All readers that aren't holding tty_lock() hold ctrl_lock: By adding locking to tiocgsid() and __do_SAK(), and expanding the area covered by ctrl_lock in tiocspgrp(). Cc: [email protected] Signed-off-by: Jann Horn <[email protected]> Reviewed-by: Jiri Slaby <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
void Compute(OpKernelContext* context) override { // Only create one, if one does not exist already. Report status for all // other exceptions. If one already exists, it unrefs the new one. // An epsilon value of zero could cause performance issues and is therefore, // disallowed. const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); float epsilon = epsilon_t->scalar<float>()(); OP_REQUIRES( context, epsilon > 0, errors::InvalidArgument("An epsilon value of zero is not allowed.")); const Tensor* num_streams_t; OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t)); int64_t num_streams = num_streams_t->scalar<int64>()(); auto result = new QuantileStreamResource(epsilon, max_elements_, num_streams); auto status = CreateResource(context, HandleFromInput(context, 0), result); if (!status.ok() && status.code() != tensorflow::error::ALREADY_EXISTS) { OP_REQUIRES(context, false, status); } }
1
[ "CWE-703", "CWE-681" ]
tensorflow
8a84f7a2b5a2b27ecf88d25bad9ac777cd2f7992
89,054,104,973,263,520,000,000,000,000,000,000,000
23
Ensure num_streams >= 0 in tf.raw_ops.BoostedTreesCreateQuantileStreamResource PiperOrigin-RevId: 387452765 Change-Id: I9990c760e177fabca6a3b9b4612ceeaeeba51495
import_keys_internal (ctrl_t ctrl, iobuf_t inp, char **fnames, int nnames, void *stats_handle, unsigned char **fpr, size_t *fpr_len, unsigned int options ) { int i, rc = 0; struct stats_s *stats = stats_handle; if (!stats) stats = import_new_stats_handle (); if (inp) { rc = import (ctrl, inp, "[stream]", stats, fpr, fpr_len, options); } else { if( !fnames && !nnames ) nnames = 1; /* Ohh what a ugly hack to jump into the loop */ for(i=0; i < nnames; i++ ) { const char *fname = fnames? fnames[i] : NULL; IOBUF inp2 = iobuf_open(fname); if( !fname ) fname = "[stdin]"; if (inp2 && is_secured_file (iobuf_get_fd (inp2))) { iobuf_close (inp2); inp2 = NULL; gpg_err_set_errno (EPERM); } if( !inp2 ) log_error(_("can't open '%s': %s\n"), fname, strerror(errno) ); else { rc = import (ctrl, inp2, fname, stats, fpr, fpr_len, options); iobuf_close(inp2); /* Must invalidate that ugly cache to actually close it. */ iobuf_ioctl (NULL, IOBUF_IOCTL_INVALIDATE_CACHE, 0, (char*)fname); if( rc ) log_error("import from '%s' failed: %s\n", fname, g10_errstr(rc) ); } if( !fname ) break; } } if (!stats_handle) { import_print_stats (stats); import_release_stats_handle (stats); } /* If no fast import and the trustdb is dirty (i.e. we added a key or userID that had something other than a selfsig, a signature that was other than a selfsig, or any revocation), then update/check the trustdb if the user specified by setting interactive or by not setting no-auto-check-trustdb */ if(!(options&IMPORT_FAST)) trustdb_check_or_update(); return rc; }
0
[ "CWE-20" ]
gnupg
f0b33b6fb8e0586e9584a7a409dcc31263776a67
265,796,258,729,298,700,000,000,000,000,000,000,000
61
gpg: Import only packets which are allowed in a keyblock. * g10/import.c (valid_keyblock_packet): New. (read_block): Store only valid packets. -- A corrupted key, which for example included a mangled public key encrypted packet, used to corrupt the keyring. This change skips all packets which are not allowed in a keyblock. GnuPG-bug-id: 1455 (cherry-picked from commit f795a0d59e197455f8723c300eebf59e09853efa)
void __init printk_all_partitions(void) { struct class_dev_iter iter; struct device *dev; class_dev_iter_init(&iter, &block_class, NULL, &disk_type); while ((dev = class_dev_iter_next(&iter))) { struct gendisk *disk = dev_to_disk(dev); struct disk_part_iter piter; struct hd_struct *part; char name_buf[BDEVNAME_SIZE]; char devt_buf[BDEVT_SIZE]; /* * Don't show empty devices or things that have been * suppressed */ if (get_capacity(disk) == 0 || (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) continue; /* * Note, unlike /proc/partitions, I am showing the * numbers in hex - the same format as the root= * option takes. */ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) { bool is_part0 = part == &disk->part0; printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), (unsigned long long)part_nr_sects_read(part) >> 1 , disk_name(disk, part->partno, name_buf), part->info ? part->info->uuid : ""); if (is_part0) { if (dev->parent && dev->parent->driver) printk(" driver: %s\n", dev->parent->driver->name); else printk(" (driver?)\n"); } else printk("\n"); } disk_part_iter_exit(&piter); } class_dev_iter_exit(&iter); }
0
[ "CWE-416" ]
linux-stable
77da160530dd1dc94f6ae15a981f24e5f0021e84
215,350,096,733,660,540,000,000,000,000,000,000,000
48
block: fix use-after-free in seq file I got a KASAN report of use-after-free: ================================================================== BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508 Read of size 8 by task trinity-c1/315 ============================================================================= BUG kmalloc-32 (Not tainted): kasan: bad access detected ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315 ___slab_alloc+0x4f1/0x520 __slab_alloc.isra.58+0x56/0x80 kmem_cache_alloc_trace+0x260/0x2a0 disk_seqf_start+0x66/0x110 traverse+0x176/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315 __slab_free+0x17a/0x2c0 kfree+0x20a/0x220 disk_seqf_stop+0x42/0x50 traverse+0x3b5/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480 ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480 ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970 Call Trace: [<ffffffff81d6ce81>] dump_stack+0x65/0x84 [<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0 [<ffffffff814704ff>] object_err+0x2f/0x40 [<ffffffff814754d1>] kasan_report_error+0x221/0x520 [<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40 [<ffffffff83888161>] klist_iter_exit+0x61/0x70 [<ffffffff82404389>] class_dev_iter_exit+0x9/0x10 [<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50 [<ffffffff8151f812>] seq_read+0x4b2/0x11a0 [<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180 [<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210 [<ffffffff814b4c45>] do_readv_writev+0x565/0x660 [<ffffffff814b8a17>] vfs_readv+0x67/0xa0 [<ffffffff814b8de6>] do_preadv+0x126/0x170 [<ffffffff814b92ec>] SyS_preadv+0xc/0x10 This problem can occur in the following situation: open() - pread() - .seq_start() - iter = kmalloc() // succeeds - seqf->private = iter - .seq_stop() - kfree(seqf->private) - pread() - .seq_start() - iter = kmalloc() // fails - .seq_stop() - class_dev_iter_exit(seqf->private) // boom! old pointer As the comment in disk_seqf_stop() says, stop is called even if start failed, so we need to reinitialise the private pointer to NULL when seq iteration stops. An alternative would be to set the private pointer to NULL when the kmalloc() in disk_seqf_start() fails. Cc: [email protected] Signed-off-by: Vegard Nossum <[email protected]> Acked-by: Tejun Heo <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static void udf_open_lvid(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; struct logicalVolIntegrityDescImpUse *lvidiu; if (!bh) return; mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sbi); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); lvid->descTag.descCRC = cpu_to_le16( crc_itu_t(0, (char *)lvid + sizeof(struct tag), le16_to_cpu(lvid->descTag.descCRCLength))); lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; mutex_unlock(&sbi->s_alloc_mutex); }
0
[ "CWE-119", "CWE-787" ]
linux
1df2ae31c724e57be9d7ac00d78db8a5dabdd050
132,624,433,700,540,650,000,000,000,000,000,000,000
29
udf: Fortify loading of sparing table Add sanity checks when loading sparing table from disk to avoid accessing unallocated memory or writing to it. Signed-off-by: Jan Kara <[email protected]>
int HttpDownstreamConnection::end_upload_data() { if (!downstream_->get_request_header_sent()) { downstream_->set_blocked_request_data_eof(true); if (request_header_written_) { signal_write(); } return 0; } signal_write(); if (!downstream_->get_chunked_request()) { return 0; } end_upload_data_chunk(); return 0; }
0
[]
nghttp2
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
100,786,804,946,357,390,000,000,000,000,000,000,000
19
nghttpx: Fix request stall Fix request stall if backend connection is reused and buffer is full.
int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block) { int ret; inode_lock(inode); ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); inode_unlock(inode); return ret; }
0
[ "CWE-362", "CWE-119", "CWE-787" ]
linux
10eec60ce79187686e052092e5383c99b4420a20
99,840,030,815,060,730,000,000,000,000,000,000,000
10
vfs: ioctl: prevent double-fetch in dedupe ioctl This prevents a double-fetch from user space that can lead to to an undersized allocation and heap overflow. Fixes: 54dbc1517237 ("vfs: hoist the btrfs deduplication ioctl to the vfs") Signed-off-by: Scott Bauer <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
compare_address (ESoapMessage *message, EContact *new, EContact *old, EContactField field, const gchar *key) { EContactAddress *new_address, *old_address; gboolean set = FALSE; new_address = e_contact_get (new, field); old_address = e_contact_get (old, field); if (!new_address && !old_address) return; if (!old_address && new_address) set = TRUE; if (!new_address && old_address) { set = TRUE; new_address = g_new0 (EContactAddress, 1); } if (set || g_strcmp0 (new_address->street, old_address->street) != 0) convert_indexed_contact_property_to_updatexml_physical_address (message, "PhysicalAddress", "Street", new_address->street, "contacts", "PhysicalAddresses", key); if (set || g_strcmp0 (new_address->locality, old_address->locality) != 0) convert_indexed_contact_property_to_updatexml_physical_address (message, "PhysicalAddress", "City", new_address->locality, "contacts", "PhysicalAddresses", key); if (set || g_strcmp0 (new_address->region, old_address->region) != 0) convert_indexed_contact_property_to_updatexml_physical_address (message, "PhysicalAddress", "State", new_address->region, "contacts", "PhysicalAddresses", key); if (set || g_strcmp0 (new_address->country, old_address->country) != 0) convert_indexed_contact_property_to_updatexml_physical_address (message, "PhysicalAddress", "CountryOrRegion", new_address->country, "contacts", "PhysicalAddresses", key); if (set || g_strcmp0 (new_address->code, old_address->code) != 0) convert_indexed_contact_property_to_updatexml_physical_address (message, "PhysicalAddress", "PostalCode", new_address->code, "contacts", "PhysicalAddresses", key); e_contact_address_free (old_address); e_contact_address_free (new_address); }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
141,850,934,853,311,140,000,000,000,000,000,000,000
38
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
void setEorTracking(bool track) { CHECK(!socket_); // should only be called during setup trackEor_ = track; }
0
[ "CWE-125" ]
folly
c321eb588909646c15aefde035fd3133ba32cdee
198,932,947,504,099,700,000,000,000,000,000,000,000
4
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
onig_is_code_in_cc_len(int elen, OnigCodePoint code, /* CClassNode* */ void* cc_arg) { int found; CClassNode* cc = (CClassNode* )cc_arg; if (elen > 1 || (code >= SINGLE_BYTE_SIZE)) { if (IS_NULL(cc->mbuf)) { found = 0; } else { found = onig_is_in_code_range(cc->mbuf->p, code) != 0; } } else { found = BITSET_AT(cc->bs, code) != 0; } if (IS_NCCLASS_NOT(cc)) return !found; else return found; }
0
[ "CWE-476", "CWE-125" ]
oniguruma
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
168,492,614,739,082,940,000,000,000,000,000,000,000
22
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; FieldOrderContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int h, plane, line_step, line_size, line; uint8_t *data; if (!frame->interlaced_frame || frame->top_field_first == s->dst_tff) return ff_filter_frame(outlink, frame); av_dlog(ctx, "picture will move %s one line\n", s->dst_tff ? "up" : "down"); h = frame->height; for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) { line_step = frame->linesize[plane]; line_size = s->line_size[plane]; data = frame->data[plane]; if (s->dst_tff) { /** Move every line up one line, working from * the top to the bottom of the frame. * The original top line is lost. * The new last line is created as a copy of the * penultimate line from that field. */ for (line = 0; line < h; line++) { if (1 + line < frame->height) { memcpy(data, data + line_step, line_size); } else { memcpy(data, data - line_step - line_step, line_size); } data += line_step; } } else { /** Move every line down one line, working from * the bottom to the top of the frame. * The original bottom line is lost. * The new first line is created as a copy of the * second line from that field. */ data += (h - 1) * line_step; for (line = h - 1; line >= 0 ; line--) { if (line > 0) { memcpy(data, data - line_step, line_size); } else { memcpy(data, data + line_step + line_step, line_size); } data -= line_step; } } } frame->top_field_first = s->dst_tff; return ff_filter_frame(outlink, frame); }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
e43a0a232dbf6d3c161823c2e07c52e76227a1bc
42,767,649,794,982,470,000,000,000,000,000,000,000
55
avfilter: fix plane validity checks Fixes out of array accesses Signed-off-by: Michael Niedermayer <[email protected]>
PHP_FUNCTION(imageconvolution) { zval *SIM, *hash_matrix; zval **var = NULL, **var2 = NULL; gdImagePtr im_src = NULL; double div, offset; int nelem, i, j, res; float matrix[3][3] = {{0,0,0}, {0,0,0}, {0,0,0}}; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "radd", &SIM, &hash_matrix, &div, &offset) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); nelem = zend_hash_num_elements(Z_ARRVAL_P(hash_matrix)); if (nelem != 3) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (i=0; i<3; i++) { if (zend_hash_index_find(Z_ARRVAL_P(hash_matrix), (i), (void **) &var) == SUCCESS && Z_TYPE_PP(var) == IS_ARRAY) { if (Z_TYPE_PP(var) != IS_ARRAY || zend_hash_num_elements(Z_ARRVAL_PP(var)) != 3 ) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (j=0; j<3; j++) { if (zend_hash_index_find(Z_ARRVAL_PP(var), (j), (void **) &var2) == SUCCESS) { if (Z_TYPE_PP(var2) != IS_DOUBLE) { zval dval; dval = **var; zval_copy_ctor(&dval); convert_to_double(&dval); matrix[i][j] = (float)Z_DVAL(dval); } else { matrix[i][j] = (float)Z_DVAL_PP(var2); } } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have a 3x3 matrix"); RETURN_FALSE; } } } } res = gdImageConvolution(im_src, matrix, (float)div, (float)offset); if (res) { RETURN_TRUE; } else { RETURN_FALSE; } }
0
[ "CWE-703", "CWE-189" ]
php-src
2938329ce19cb8c4197dec146c3ec887c6f61d01
336,116,024,076,884,480,000,000,000,000,000,000,000
54
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop()) And also fixed the bug: arguments are altered after some calls
vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, struct vhost_virtqueue *vq, struct rte_crypto_op *op, struct vring_desc *head, uint16_t desc_idx) { struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src); struct rte_cryptodev_sym_session *session; struct virtio_crypto_op_data_req *req, tmp_req; struct virtio_crypto_inhdr *inhdr; struct vring_desc *desc = NULL; uint64_t session_id; uint64_t dlen; uint32_t nb_descs = vq->size; int err = 0; vc_req->desc_idx = desc_idx; vc_req->dev = vcrypto->dev; vc_req->vq = vq; if (likely(head->flags & VRING_DESC_F_INDIRECT)) { dlen = head->len; nb_descs = dlen / sizeof(struct vring_desc); /* drop invalid descriptors */ if (unlikely(nb_descs > vq->size)) return -1; desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr, &dlen, VHOST_ACCESS_RO); if (unlikely(!desc || dlen != head->len)) return -1; desc_idx = 0; head = desc; } else { desc = head; } vc_req->head = head; vc_req->zero_copy = vcrypto->option; req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(req == NULL)) { switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: err = VIRTIO_CRYPTO_BADMSG; VC_LOG_ERR("Invalid descriptor"); goto error_exit; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: req = &tmp_req; if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req), &nb_descs, vq->size) < 0)) { err = VIRTIO_CRYPTO_BADMSG; VC_LOG_ERR("Invalid descriptor"); goto error_exit; } break; default: err = VIRTIO_CRYPTO_ERR; VC_LOG_ERR("Invalid option"); goto error_exit; } } else { if (unlikely(move_desc(vc_req->head, &desc, sizeof(*req), &nb_descs, vq->size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); goto error_exit; } } switch (req->header.opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: session_id = req->header.session_id; /* one branch to avoid unnecessary table lookup */ if (vcrypto->cache_session_id != session_id) { err = rte_hash_lookup_data(vcrypto->session_map, &session_id, (void **)&session); if (unlikely(err < 0)) { err = VIRTIO_CRYPTO_ERR; VC_LOG_ERR("Failed to find session %"PRIu64, session_id); goto error_exit; } vcrypto->cache_session = session; vcrypto->cache_session_id = session_id; } session = vcrypto->cache_session; err = rte_crypto_op_attach_sym_session(op, session); if (unlikely(err < 0)) { err = VIRTIO_CRYPTO_ERR; VC_LOG_ERR("Failed to attach session to op"); goto error_exit; } switch (req->u.sym_req.op_type) { case VIRTIO_CRYPTO_SYM_OP_NONE: err = VIRTIO_CRYPTO_NOTSUPP; break; case VIRTIO_CRYPTO_SYM_OP_CIPHER: err = prepare_sym_cipher_op(vcrypto, op, vc_req, &req->u.sym_req.u.cipher, desc, &nb_descs, vq->size); break; case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: err = prepare_sym_chain_op(vcrypto, op, vc_req, &req->u.sym_req.u.chain, desc, &nb_descs, vq->size); break; } if (unlikely(err != 0)) { VC_LOG_ERR("Failed to process sym request"); goto error_exit; } break; default: VC_LOG_ERR("Unsupported symmetric crypto request type %u", req->header.opcode); goto error_exit; } return 0; error_exit: inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size); if (likely(inhdr != NULL)) inhdr->status = (uint8_t)err; return -1; }
0
[ "CWE-125" ]
dpdk
acd4c92fa693bbea695f2bb42bb93fb8567c3ca5
210,365,662,794,704,360,000,000,000,000,000,000,000
131
vhost/crypto: validate keys lengths transform_cipher_param() and transform_chain_param() handle the payload data for the VHOST_USER_CRYPTO_CREATE_SESS message. These payloads have to be validated, since it could come from untrusted sources. Two buffers and their lengths are defined in this payload, one the the auth key and one for the cipher key. But above functions do not validate the key length inputs, which could lead to read out of bounds, as buffers have static sizes of 64 bytes for the cipher key and 512 bytes for the auth key. This patch adds necessary checks on the key length field before being used. CVE-2020-10724 Fixes: e80a98708166 ("vhost/crypto: add session message handler") Cc: [email protected] Reported-by: Ilja Van Sprundel <[email protected]> Signed-off-by: Maxime Coquelin <[email protected]> Reviewed-by: Xiaolong Ye <[email protected]> Reviewed-by: Ilja Van Sprundel <[email protected]>
static int vt_event_wait_ioctl(struct vt_event __user *event) { struct vt_event_wait vw; if (copy_from_user(&vw.event, event, sizeof(struct vt_event))) return -EFAULT; /* Highest supported event for now */ if (vw.event.event & ~VT_MAX_EVENT) return -EINVAL; vt_event_wait(&vw); /* If it occurred report it */ if (vw.done) { if (copy_to_user(event, &vw.event, sizeof(struct vt_event))) return -EFAULT; return 0; } return -EINTR; }
0
[ "CWE-662" ]
linux
90bfdeef83f1d6c696039b6a917190dcbbad3220
282,974,409,120,140,460,000,000,000,000,000,000,000
19
tty: make FONTX ioctl use the tty pointer they were actually passed Some of the font tty ioctl's always used the current foreground VC for their operations. Don't do that then. This fixes a data race on fg_console. Side note: both Michael Ellerman and Jiri Slaby point out that all these ioctls are deprecated, and should probably have been removed long ago, and everything seems to be using the KDFONTOP ioctl instead. In fact, Michael points out that it looks like busybox's loadfont program seems to have switched over to using KDFONTOP exactly _because_ of this bug (ahem.. 12 years ago ;-). Reported-by: Minh Yuan <[email protected]> Acked-by: Michael Ellerman <[email protected]> Acked-by: Jiri Slaby <[email protected]> Cc: Greg KH <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int intr_interception(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; }
0
[ "CWE-862" ]
kvm
0f923e07124df069ba68d8bb12324398f4b6b709
52,952,122,068,107,170,000,000,000,000,000,000,000
5
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) * Invert the mask of bits that we pick from L2 in nested_vmcb02_prepare_control * Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr This fixes a security issue that allowed a malicious L1 to run L2 with AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled AVIC to read/write the host physical memory at some offsets. Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; }
0
[ "CWE-284", "CWE-264" ]
linux
ce683e5f9d045e5d67d1312a42b359cb2ab2a13c
265,839,834,845,732,880,000,000,000,000,000,000,000
37
netfilter: x_tables: check for bogus target offset We're currently asserting that targetoff + targetsize <= nextoff. Extend it to also check that targetoff is >= sizeof(xt_entry). Since this is generic code, add an argument pointing to the start of the match/target, we can then derive the base structure size from the delta. We also need the e->elems pointer in a followup change to validate matches. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
static Bool CheckResponse(rfbClientPtr cl, int numPasswords, char *passwdFullControl, char *passwdViewOnly, CARD8 *response) { Bool ok = FALSE; CARD8 encryptedChallenge1[CHALLENGESIZE]; CARD8 encryptedChallenge2[CHALLENGESIZE]; memcpy(encryptedChallenge1, cl->authChallenge, CHALLENGESIZE); vncEncryptBytes(encryptedChallenge1, passwdFullControl); memcpy(encryptedChallenge2, cl->authChallenge, CHALLENGESIZE); vncEncryptBytes(encryptedChallenge2, (numPasswords == 2) ? passwdViewOnly : passwdFullControl); /* Delete the passwords from memory */ memset(passwdFullControl, 0, MAXPWLEN + 1); memset(passwdViewOnly, 0, MAXPWLEN + 1); if (memcmp(encryptedChallenge1, response, CHALLENGESIZE) == 0) { rfbLog("Full-control authentication enabled for %s\n", cl->host); ok = TRUE; cl->viewOnly = FALSE; } else if (memcmp(encryptedChallenge2, response, CHALLENGESIZE) == 0) { rfbLog("View-only authentication enabled for %s\n", cl->host); ok = TRUE; cl->viewOnly = TRUE; } return ok; }
0
[ "CWE-787" ]
turbovnc
cea98166008301e614e0d36776bf9435a536136e
155,541,256,659,179,180,000,000,000,000,000,000,000
31
Server: Fix two issues identified by ASan 1. If the TLSPlain and X509Plain security types were both disabled, then rfbOptPamAuth() would overflow the name field in the secTypes structure when testing the "none" security type, since the name of that security type has less than five characters. This issue was innocuous, since the overflow was fully contained within the secTypes structure, but the ASan error caused Xvnc to abort, which made it difficult to detect other errors. 2. If an ill-behaved RFB client sent the TurboVNC Server a fence message with more than 64 bytes, then the TurboVNC Server would try to read that message and subsequently overflow the stack before it detected that the payload was too large. This could never have occurred with any of the VNC viewers that currently support the RFB flow control extensions (TigerVNC and TurboVNC, namely.) This issue was also innocuous, since the stack overflow affected two variables (newScreens and errMsg) that were never accessed before the function returned.
static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct aead_alg *aead = crypto_aead_alg(tfm); unsigned long alignmask = crypto_aead_alignmask(tfm); if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return aead->setkey(tfm, key, keylen); }
0
[ "CWE-310" ]
linux
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
17,740,778,394,592,098,000,000,000,000,000,000,000
10
crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <[email protected]> Cc: Steffen Klassert <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
static void sg_timeout(unsigned long _req) { struct usb_sg_request *req = (struct usb_sg_request *) _req; usb_sg_cancel(req); }
0
[ "CWE-476" ]
linux
7c80f9e4a588f1925b07134bb2e3689335f6c6d8
273,433,566,063,858,140,000,000,000,000,000,000,000
6
usb: usbtest: fix NULL pointer dereference If the usbtest driver encounters a device with an IN bulk endpoint but no OUT bulk endpoint, it will try to dereference a NULL pointer (out->desc.bEndpointAddress). The problem can be solved by adding a missing test. Signed-off-by: Alan Stern <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
static void handle_input(VirtIODevice *vdev, VirtQueue *vq) { /* * Users of virtio-serial would like to know when guest becomes * writable again -- i.e. if a vq had stuff queued up and the * guest wasn't reading at all, the host would not be able to * write to the vq anymore. Once the guest reads off something, * we can start queueing things up again. However, this call is * made for each buffer addition by the guest -- even though free * buffers existed prior to the current buffer addition. This is * done so as not to maintain previous state, which will need * additional live-migration-related changes. */ VirtIOSerial *vser; VirtIOSerialPort *port; VirtIOSerialPortClass *vsc; vser = VIRTIO_SERIAL(vdev); port = find_port_by_vq(vser, vq); if (!port) { return; } vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); /* * If guest_connected is false, this call is being made by the * early-boot queueing up of descriptors, which is just noise for * the host apps -- don't disturb them in that case. */ if (port->guest_connected && port->host_connected && vsc->guest_writable) { vsc->guest_writable(port); } }
0
[ "CWE-120", "CWE-787" ]
qemu
7882080388be5088e72c425b02223c02e6cb4295
145,668,571,002,633,380,000,000,000,000,000,000,000
34
virtio-serial: fix ANY_LAYOUT Don't assume a specific layout for control messages. Required by virtio 1. Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Amit Shah <[email protected]> Reviewed-by: Jason Wang <[email protected]>
void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc) { GCstr *name = proto_chunkname(pt); const char *s = strdata(name); MSize i, len = name->len; BCLine line = lj_debug_line(pt, pc); if (*s == '@') { s++; len--; for (i = len; i > 0; i--) if (s[i] == '/' || s[i] == '\\') { s += i+1; break; } lj_str_pushf(L, "%s:%d", s, line); } else if (len > 40) { lj_str_pushf(L, "%p:%d", pt, line); } else if (*s == '=') { lj_str_pushf(L, "%s:%d", s+1, line); } else { lj_str_pushf(L, "\"%s\":%d", s, line); } }
0
[ "CWE-125" ]
LuaJIT
e296f56b825c688c3530a981dc6b495d972f3d01
61,279,169,538,785,310,000,000,000,000,000,000,000
22
Call error function on rethrow after trace exit.
static void rap_break (void *u) { RIORap *rior = (RIORap*) u; if (u) { r_socket_close (rior->fd); rior->fd = NULL; } }
0
[ "CWE-415", "CWE-703" ]
radare2
cb8b683758edddae2d2f62e8e63a738c39f92683
10,825,611,692,024,548,000,000,000,000,000,000,000
7
Fix #16303 - c->table_query double free (#16318)
struct file_list *recv_file_list(int f, int dir_ndx) { const char *good_dirname = NULL; struct file_list *flist; int dstart, flags; int64 start_read; if (!first_flist) { if (show_filelist_progress) start_filelist_progress("receiving file list"); else if (inc_recurse && INFO_GTE(FLIST, 1) && !am_server) rprintf(FCLIENT, "receiving incremental file list\n"); rprintf(FLOG, "receiving file list\n"); if (usermap) parse_name_map(usermap, True); if (groupmap) parse_name_map(groupmap, False); } start_read = stats.total_read; #ifdef SUPPORT_HARD_LINKS if (preserve_hard_links && !first_flist) init_hard_links(); #endif flist = flist_new(0, "recv_file_list"); flist_expand(flist, FLIST_START_LARGE); if (inc_recurse) { if (flist->ndx_start == 1) { dir_flist = flist_new(FLIST_TEMP, "recv_file_list"); flist_expand(dir_flist, FLIST_START_LARGE); } dstart = dir_flist->used; } else { dir_flist = flist; dstart = 0; } while (1) { struct file_struct *file; if (xfer_flags_as_varint) { if ((flags = read_varint(f)) == 0) { int err = read_varint(f); if (!ignore_errors) io_error |= err; break; } } else { if ((flags = read_byte(f)) == 0) break; if (protocol_version >= 28 && (flags & XMIT_EXTENDED_FLAGS)) flags |= read_byte(f) << 8; if (flags == (XMIT_EXTENDED_FLAGS|XMIT_IO_ERROR_ENDLIST)) { int err; if (!use_safe_inc_flist) { rprintf(FERROR, "Invalid flist flag: %x\n", flags); exit_cleanup(RERR_PROTOCOL); } err = read_varint(f); if (!ignore_errors) io_error |= err; break; } } flist_expand(flist, 1); file = recv_file_entry(f, flist, flags); if (inc_recurse) { static const char empty_dir[] = "\0"; const char *cur_dir = file->dirname ? file->dirname : empty_dir; if (relative_paths && *cur_dir == '/') cur_dir++; if (cur_dir != good_dirname) { const char *d = dir_ndx >= 0 ? f_name(dir_flist->files[dir_ndx], NULL) : empty_dir; if (strcmp(cur_dir, d) != 0) { rprintf(FERROR, "ABORTING due to invalid path from sender: %s/%s\n", cur_dir, file->basename); exit_cleanup(RERR_PROTOCOL); } good_dirname = cur_dir; } } if (S_ISREG(file->mode)) { /* Already counted */ } else if (S_ISDIR(file->mode)) { if (inc_recurse) { flist_expand(dir_flist, 1); dir_flist->files[dir_flist->used++] = file; } stats.num_dirs++; } else if (S_ISLNK(file->mode)) stats.num_symlinks++; else if (IS_DEVICE(file->mode)) stats.num_symlinks++; else stats.num_specials++; flist->files[flist->used++] = file; maybe_emit_filelist_progress(flist->used); if (DEBUG_GTE(FLIST, 2)) { char *name = f_name(file, NULL); rprintf(FINFO, "recv_file_name(%s)\n", NS(name)); } } file_total += flist->used; if (DEBUG_GTE(FLIST, 2)) rprintf(FINFO, "received %d names\n", flist->used); if (show_filelist_progress) finish_filelist_progress(flist); if (need_unsorted_flist) { /* Create an extra array of index pointers that we can sort for * the generator's use (for wading through the files in sorted * order and for calling flist_find()). We keep the "files" * list unsorted for our exchange of index numbers with the * other side (since their names may not sort the same). */ flist->sorted = new_array(struct file_struct *, flist->used); memcpy(flist->sorted, flist->files, flist->used * PTR_SIZE); if (inc_recurse && dir_flist->used > dstart) { static int dir_flist_malloced = 0; if (dir_flist_malloced < dir_flist->malloced) { dir_flist->sorted = realloc_array(dir_flist->sorted, struct file_struct *, dir_flist->malloced); dir_flist_malloced = dir_flist->malloced; } memcpy(dir_flist->sorted + dstart, dir_flist->files + dstart, (dir_flist->used - dstart) * PTR_SIZE); fsort(dir_flist->sorted + dstart, dir_flist->used - dstart); } } else { flist->sorted = flist->files; if (inc_recurse && dir_flist->used > dstart) { dir_flist->sorted = dir_flist->files; fsort(dir_flist->sorted + dstart, dir_flist->used - dstart); } } if (inc_recurse) flist_done_allocating(flist); else if (f >= 0) { recv_id_list(f, flist); flist_eof = 1; if (DEBUG_GTE(FLIST, 3)) rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i()); } /* The --relative option sends paths with a leading slash, so we need * to specify the strip_root option here. We rejected leading slashes * for a non-relative transfer in recv_file_entry(). */ flist_sort_and_clean(flist, relative_paths); if (protocol_version < 30) { /* Recv the io_error flag */ int err = read_int(f); if (!ignore_errors) io_error |= err; } else if (inc_recurse && flist->ndx_start == 1) { if (!file_total || strcmp(flist->sorted[flist->low]->basename, ".") != 0) flist->parent_ndx = -1; } if (DEBUG_GTE(FLIST, 3)) output_flist(flist); if (DEBUG_GTE(FLIST, 2)) rprintf(FINFO, "recv_file_list done\n"); stats.flist_size += stats.total_read - start_read; stats.num_files += flist->used; return flist; }
0
[]
rsync
b7231c7d02cfb65d291af74ff66e7d8c507ee871
35,175,655,350,321,912,000,000,000,000,000,000,000
185
Some extra file-list safety checks.
pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { struct task_struct *task; unsigned long val = 0UL, lval, ovfl_mask, sval; pfarg_reg_t *req = (pfarg_reg_t *)arg; unsigned int cnum, reg_flags = 0; int i, can_access_pmu = 0, state; int is_loaded, is_system, is_counting, expert_mode; int ret = -EINVAL; pfm_reg_check_t rd_func; /* * access is possible when loaded only for * self-monitoring tasks or in UP mode */ state = ctx->ctx_state; is_loaded = state == PFM_CTX_LOADED ? 1 : 0; is_system = ctx->ctx_fl_system; ovfl_mask = pmu_conf->ovfl_val; task = ctx->ctx_task; if (state == PFM_CTX_ZOMBIE) return -EINVAL; if (likely(is_loaded)) { /* * In system wide and when the context is loaded, access can only happen * when the caller is running on the CPU being monitored by the session. * It does not have to be the owner (ctx_task) of the context per se. */ if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); return -EBUSY; } /* * this can be true when not self-monitoring only in UP */ can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; if (can_access_pmu) ia64_srlz_d(); } expert_mode = pfm_sysctl.expert_mode; DPRINT(("ld=%d apmu=%d ctx_state=%d\n", is_loaded, can_access_pmu, state)); /* * on both UP and SMP, we can only read the PMD from the hardware register when * the task is the owner of the local PMU. */ for (i = 0; i < count; i++, req++) { cnum = req->reg_num; reg_flags = req->reg_flags; if (unlikely(!PMD_IS_IMPL(cnum))) goto error; /* * we can only read the register that we use. That includes * the one we explicitely initialize AND the one we want included * in the sampling buffer (smpl_regs). * * Having this restriction allows optimization in the ctxsw routine * without compromising security (leaks) */ if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; sval = ctx->ctx_pmds[cnum].val; lval = ctx->ctx_pmds[cnum].lval; is_counting = PMD_IS_COUNTING(cnum); /* * If the task is not the current one, then we check if the * PMU state is still in the local live register due to lazy ctxsw. * If true, then we read directly from the registers. */ if (can_access_pmu){ val = ia64_get_pmd(cnum); } else { /* * context has been saved * if context is zombie, then task does not exist anymore. * In this case, we use the full value saved in the context (pfm_flush_regs()). */ val = is_loaded ? ctx->th_pmds[cnum] : 0UL; } rd_func = pmu_conf->pmd_desc[cnum].read_check; if (is_counting) { /* * XXX: need to check for overflow when loaded */ val &= ovfl_mask; val += sval; } /* * execute read checker, if any */ if (unlikely(expert_mode == 0 && rd_func)) { unsigned long v = val; ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); if (ret) goto error; val = v; ret = -EINVAL; } PFM_REG_RETFLAG_SET(reg_flags, 0); DPRINT(("pmd[%u]=0x%lx\n", cnum, val)); /* * update register return value, abort all if problem during copy. * we only modify the reg_flags field. no check mode is fine because * access has been verified upfront in sys_perfmonctl(). */ req->reg_value = val; req->reg_flags = reg_flags; req->reg_last_reset_val = lval; } return 0; error: PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); return ret; }
0
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
35,232,713,559,232,000,000,000,000,000,000,000,000
129
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <[email protected]> Cc: Stephane Eranian <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Tony Luck <[email protected]>
static int read_tfra(MOVContext *mov, AVIOContext *f) { MOVFragmentIndex* index = NULL; int version, fieldlength, i, j; int64_t pos = avio_tell(f); uint32_t size = avio_rb32(f); void *tmp; if (avio_rb32(f) != MKBETAG('t', 'f', 'r', 'a')) { return 1; } av_log(mov->fc, AV_LOG_VERBOSE, "found tfra\n"); index = av_mallocz(sizeof(MOVFragmentIndex)); if (!index) { return AVERROR(ENOMEM); } tmp = av_realloc_array(mov->fragment_index_data, mov->fragment_index_count + 1, sizeof(MOVFragmentIndex*)); if (!tmp) { av_freep(&index); return AVERROR(ENOMEM); } mov->fragment_index_data = tmp; mov->fragment_index_data[mov->fragment_index_count++] = index; version = avio_r8(f); avio_rb24(f); index->track_id = avio_rb32(f); fieldlength = avio_rb32(f); index->item_count = avio_rb32(f); index->items = av_mallocz_array( index->item_count, sizeof(MOVFragmentIndexItem)); if (!index->items) { index->item_count = 0; return AVERROR(ENOMEM); } for (i = 0; i < index->item_count; i++) { int64_t time, offset; if (version == 1) { time = avio_rb64(f); offset = avio_rb64(f); } else { time = avio_rb32(f); offset = avio_rb32(f); } index->items[i].time = time; index->items[i].moof_offset = offset; for (j = 0; j < ((fieldlength >> 4) & 3) + 1; j++) avio_r8(f); for (j = 0; j < ((fieldlength >> 2) & 3) + 1; j++) avio_r8(f); for (j = 0; j < ((fieldlength >> 0) & 3) + 1; j++) avio_r8(f); } avio_seek(f, pos + size, SEEK_SET); return 0; }
1
[ "CWE-399", "CWE-834" ]
FFmpeg
9cb4eb772839c5e1de2855d126bf74ff16d13382
61,738,579,619,029,880,000,000,000,000,000,000,000
60
avformat/mov: Fix DoS in read_tfra() Fixes: Missing EOF check in loop No testcase Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <[email protected]>
Magick_Orientation_to_Exif_Orientation(const OrientationType orientation) { switch (orientation) { /* Convert to Exif orientations as defined in "Exchangeable image file * format for digital still cameras: Exif Version 2.31", * http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */ case TopLeftOrientation: return 1; case TopRightOrientation: return 2; case BottomRightOrientation: return 3; case BottomLeftOrientation: return 4; case LeftTopOrientation: return 5; case RightTopOrientation: return 6; case RightBottomOrientation: return 7; case LeftBottomOrientation: return 8; case UndefinedOrientation: default: return 0; } }
0
[ "CWE-772" ]
ImageMagick
4d6accd355119d54429a86a1859b8329f0130f30
278,545,886,259,607,600,000,000,000,000,000,000,000
30
https://github.com/ImageMagick/ImageMagick/issues/902
struct minidump_memory_info *r_bin_mdmp_get_mem_info(struct r_bin_mdmp_obj *obj, ut64 vaddr) { struct minidump_memory_info *mem_info; RListIter *it; if (!obj) { return NULL; } r_list_foreach (obj->streams.memory_infos, it, mem_info) { if (mem_info->allocation_base && vaddr == mem_info->base_address) { return mem_info; } } return NULL; }
0
[ "CWE-400", "CWE-703" ]
radare2
27fe8031782d3a06c3998eaa94354867864f9f1b
316,726,412,787,329,160,000,000,000,000,000,000,000
16
Fix DoS in the minidump parser ##crash * Reported by lazymio via huntr.dev * Reproducer: mdmp-dos
static int i740fb_setup_ddc_bus(struct fb_info *info) { struct i740fb_par *par = info->par; strlcpy(par->ddc_adapter.name, info->fix.id, sizeof(par->ddc_adapter.name)); par->ddc_adapter.owner = THIS_MODULE; par->ddc_adapter.class = I2C_CLASS_DDC; par->ddc_adapter.algo_data = &par->ddc_algo; par->ddc_adapter.dev.parent = info->device; par->ddc_algo.setsda = i740fb_ddc_setsda; par->ddc_algo.setscl = i740fb_ddc_setscl; par->ddc_algo.getsda = i740fb_ddc_getsda; par->ddc_algo.getscl = i740fb_ddc_getscl; par->ddc_algo.udelay = 10; par->ddc_algo.timeout = 20; par->ddc_algo.data = par; i2c_set_adapdata(&par->ddc_adapter, par); return i2c_bit_add_bus(&par->ddc_adapter); }
0
[ "CWE-369" ]
linux-fbdev
15cf0b82271b1823fb02ab8c377badba614d95d5
214,953,531,010,997,440,000,000,000,000,000,000,000
22
video: fbdev: i740fb: Error out if 'pixclock' equals zero The userspace program could pass any values to the driver through ioctl() interface. If the driver doesn't check the value of 'pixclock', it may cause divide error. Fix this by checking whether 'pixclock' is zero in the function i740fb_check_var(). The following log reveals it: divide error: 0000 [#1] PREEMPT SMP KASAN PTI RIP: 0010:i740fb_decode_var drivers/video/fbdev/i740fb.c:444 [inline] RIP: 0010:i740fb_set_par+0x272f/0x3bb0 drivers/video/fbdev/i740fb.c:739 Call Trace: fb_set_var+0x604/0xeb0 drivers/video/fbdev/core/fbmem.c:1036 do_fb_ioctl+0x234/0x670 drivers/video/fbdev/core/fbmem.c:1112 fb_ioctl+0xdd/0x130 drivers/video/fbdev/core/fbmem.c:1191 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] Signed-off-by: Zheyu Ma <[email protected]> Signed-off-by: Helge Deller <[email protected]>
__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, u64 time_seq, struct tree_mod_elem *first_tm) { u32 n; struct rb_node *next; struct tree_mod_elem *tm = first_tm; unsigned long o_dst; unsigned long o_src; unsigned long p_size = sizeof(struct btrfs_key_ptr); n = btrfs_header_nritems(eb); read_lock(&fs_info->tree_mod_log_lock); while (tm && tm->seq >= time_seq) { /* * all the operations are recorded with the operator used for * the modification. as we're going backwards, we do the * opposite of each operation here. */ switch (tm->op) { case MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); fallthrough; case MOD_LOG_KEY_REMOVE_WHILE_MOVING: case MOD_LOG_KEY_REMOVE: btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); n++; break; case MOD_LOG_KEY_REPLACE: BUG_ON(tm->slot >= n); btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); break; case MOD_LOG_KEY_ADD: /* if a move operation is needed it's in the log */ n--; break; case MOD_LOG_MOVE_KEYS: o_dst = btrfs_node_key_ptr_offset(tm->slot); o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); memmove_extent_buffer(eb, o_dst, o_src, tm->move.nr_items * p_size); break; case MOD_LOG_ROOT_REPLACE: /* * this operation is special. for roots, this must be * handled explicitly before rewinding. * for non-roots, this operation may exist if the node * was a root: root A -> child B; then A gets empty and * B is promoted to the new root. in the mod log, we'll * have a root-replace operation for B, a tree block * that is no root. we simply ignore that operation. */ break; } next = rb_next(&tm->node); if (!next) break; tm = rb_entry(next, struct tree_mod_elem, node); if (tm->logical != first_tm->logical) break; } read_unlock(&fs_info->tree_mod_log_lock); btrfs_set_header_nritems(eb, n); }
0
[ "CWE-362" ]
linux
dbcc7d57bffc0c8cac9dac11bec548597d59a6a5
65,969,175,482,550,120,000,000,000,000,000,000,000
69
btrfs: fix race when cloning extent buffer during rewind of an old root While resolving backreferences, as part of a logical ino ioctl call or fiemap, we can end up hitting a BUG_ON() when replaying tree mod log operations of a root, triggering a stack trace like the following: ------------[ cut here ]------------ kernel BUG at fs/btrfs/ctree.c:1210! invalid opcode: 0000 [#1] SMP KASAN PTI CPU: 1 PID: 19054 Comm: crawl_335 Tainted: G W 5.11.0-2d11c0084b02-misc-next+ #89 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 RIP: 0010:__tree_mod_log_rewind+0x3b1/0x3c0 Code: 05 48 8d 74 10 (...) RSP: 0018:ffffc90001eb70b8 EFLAGS: 00010297 RAX: 0000000000000000 RBX: ffff88812344e400 RCX: ffffffffb28933b6 RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88812344e42c RBP: ffffc90001eb7108 R08: 1ffff11020b60a20 R09: ffffed1020b60a20 R10: ffff888105b050f9 R11: ffffed1020b60a1f R12: 00000000000000ee R13: ffff8880195520c0 R14: ffff8881bc958500 R15: ffff88812344e42c FS: 00007fd1955e8700(0000) GS:ffff8881f5600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007efdb7928718 CR3: 000000010103a006 CR4: 0000000000170ee0 Call Trace: btrfs_search_old_slot+0x265/0x10d0 ? lock_acquired+0xbb/0x600 ? btrfs_search_slot+0x1090/0x1090 ? free_extent_buffer.part.61+0xd7/0x140 ? free_extent_buffer+0x13/0x20 resolve_indirect_refs+0x3e9/0xfc0 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? add_prelim_ref.part.11+0x150/0x150 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? do_raw_spin_unlock+0xa8/0x140 ? rb_insert_color+0x30/0x360 ? prelim_ref_insert+0x12d/0x430 find_parent_nodes+0x5c3/0x1830 ? resolve_indirect_refs+0xfc0/0xfc0 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x160/0x210 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? poison_range+0x38/0x40 ? unpoison_range+0x14/0x40 ? trace_hardirqs_on+0x55/0x120 btrfs_find_all_roots_safe+0x142/0x1e0 ? find_parent_nodes+0x1830/0x1830 ? btrfs_inode_flags_to_xflags+0x50/0x50 iterate_extent_inodes+0x20e/0x580 ? tree_backref_for_extent+0x230/0x230 ? lock_downgrade+0x3d0/0x3d0 ? read_extent_buffer+0xdd/0x110 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? _raw_spin_unlock+0x22/0x30 ? __kasan_check_write+0x14/0x20 iterate_inodes_from_logical+0x129/0x170 ? iterate_inodes_from_logical+0x129/0x170 ? btrfs_inode_flags_to_xflags+0x50/0x50 ? iterate_extent_inodes+0x580/0x580 ? __vmalloc_node+0x92/0xb0 ? init_data_container+0x34/0xb0 ? init_data_container+0x34/0xb0 ? kvmalloc_node+0x60/0x80 btrfs_ioctl_logical_to_ino+0x158/0x230 btrfs_ioctl+0x205e/0x4040 ? __might_sleep+0x71/0xe0 ? btrfs_ioctl_get_supported_features+0x30/0x30 ? getrusage+0x4b6/0x9c0 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __might_fault+0x64/0xd0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? do_vfs_ioctl+0xfc/0x9d0 ? ioctl_file_clone+0xe0/0xe0 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __task_pid_nr_ns+0xd3/0x250 ? lock_acquire+0xc7/0x510 ? __fget_files+0x160/0x230 ? __fget_light+0xf2/0x110 __x64_sys_ioctl+0xc3/0x100 do_syscall_64+0x37/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fd1976e2427 Code: 00 00 90 48 8b 05 (...) RSP: 002b:00007fd1955e5cf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007fd1955e5f40 RCX: 00007fd1976e2427 RDX: 00007fd1955e5f48 RSI: 00000000c038943b RDI: 0000000000000004 RBP: 0000000001000000 R08: 0000000000000000 R09: 00007fd1955e6120 R10: 0000557835366b00 R11: 0000000000000246 R12: 0000000000000004 R13: 00007fd1955e5f48 R14: 00007fd1955e5f40 R15: 00007fd1955e5ef8 Modules linked in: ---[ end trace ec8931a1c36e57be ]--- (gdb) l *(__tree_mod_log_rewind+0x3b1) 0xffffffff81893521 is in __tree_mod_log_rewind (fs/btrfs/ctree.c:1210). 1205 * the modification. as we're going backwards, we do the 1206 * opposite of each operation here. 1207 */ 1208 switch (tm->op) { 1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1210 BUG_ON(tm->slot < n); 1211 fallthrough; 1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1213 case MOD_LOG_KEY_REMOVE: 1214 btrfs_set_node_key(eb, &tm->key, tm->slot); Here's what happens to hit that BUG_ON(): 1) We have one tree mod log user (through fiemap or the logical ino ioctl), with a sequence number of 1, so we have fs_info->tree_mod_seq == 1; 2) Another task is at ctree.c:balance_level() and we have eb X currently as the root of the tree, and we promote its single child, eb Y, as the new root. Then, at ctree.c:balance_level(), we call: tree_mod_log_insert_root(eb X, eb Y, 1); 3) At tree_mod_log_insert_root() we create tree mod log elements for each slot of eb X, of operation type MOD_LOG_KEY_REMOVE_WHILE_FREEING each with a ->logical pointing to ebX->start. These are placed in an array named tm_list. Lets assume there are N elements (N pointers in eb X); 4) Then, still at tree_mod_log_insert_root(), we create a tree mod log element of operation type MOD_LOG_ROOT_REPLACE, ->logical set to ebY->start, ->old_root.logical set to ebX->start, ->old_root.level set to the level of eb X and ->generation set to the generation of eb X; 5) Then tree_mod_log_insert_root() calls tree_mod_log_free_eb() with tm_list as argument. After that, tree_mod_log_free_eb() calls __tree_mod_log_insert() for each member of tm_list in reverse order, from highest slot in eb X, slot N - 1, to slot 0 of eb X; 6) __tree_mod_log_insert() sets the sequence number of each given tree mod log operation - it increments fs_info->tree_mod_seq and sets fs_info->tree_mod_seq as the sequence number of the given tree mod log operation. This means that for the tm_list created at tree_mod_log_insert_root(), the element corresponding to slot 0 of eb X has the highest sequence number (1 + N), and the element corresponding to the last slot has the lowest sequence number (2); 7) Then, after inserting tm_list's elements into the tree mod log rbtree, the MOD_LOG_ROOT_REPLACE element is inserted, which gets the highest sequence number, which is N + 2; 8) Back to ctree.c:balance_level(), we free eb X by calling btrfs_free_tree_block() on it. Because eb X was created in the current transaction, has no other references and writeback did not happen for it, we add it back to the free space cache/tree; 9) Later some other task T allocates the metadata extent from eb X, since it is marked as free space in the space cache/tree, and uses it as a node for some other btree; 10) The tree mod log user task calls btrfs_search_old_slot(), which calls get_old_root(), and finally that calls __tree_mod_log_oldest_root() with time_seq == 1 and eb_root == eb Y; 11) First iteration of the while loop finds the tree mod log element with sequence number N + 2, for the logical address of eb Y and of type MOD_LOG_ROOT_REPLACE; 12) Because the operation type is MOD_LOG_ROOT_REPLACE, we don't break out of the loop, and set root_logical to point to tm->old_root.logical which corresponds to the logical address of eb X; 13) On the next iteration of the while loop, the call to tree_mod_log_search_oldest() returns the smallest tree mod log element for the logical address of eb X, which has a sequence number of 2, an operation type of MOD_LOG_KEY_REMOVE_WHILE_FREEING and corresponds to the old slot N - 1 of eb X (eb X had N items in it before being freed); 14) We then break out of the while loop and return the tree mod log operation of type MOD_LOG_ROOT_REPLACE (eb Y), and not the one for slot N - 1 of eb X, to get_old_root(); 15) At get_old_root(), we process the MOD_LOG_ROOT_REPLACE operation and set "logical" to the logical address of eb X, which was the old root. We then call tree_mod_log_search() passing it the logical address of eb X and time_seq == 1; 16) Then before calling tree_mod_log_search(), task T adds a key to eb X, which results in adding a tree mod log operation of type MOD_LOG_KEY_ADD to the tree mod log - this is done at ctree.c:insert_ptr() - but after adding the tree mod log operation and before updating the number of items in eb X from 0 to 1... 17) The task at get_old_root() calls tree_mod_log_search() and gets the tree mod log operation of type MOD_LOG_KEY_ADD just added by task T. Then it enters the following if branch: if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { (...) } (...) Calls read_tree_block() for eb X, which gets a reference on eb X but does not lock it - task T has it locked. Then it clones eb X while it has nritems set to 0 in its header, before task T sets nritems to 1 in eb X's header. From hereupon we use the clone of eb X which no other task has access to; 18) Then we call __tree_mod_log_rewind(), passing it the MOD_LOG_KEY_ADD mod log operation we just got from tree_mod_log_search() in the previous step and the cloned version of eb X; 19) At __tree_mod_log_rewind(), we set the local variable "n" to the number of items set in eb X's clone, which is 0. Then we enter the while loop, and in its first iteration we process the MOD_LOG_KEY_ADD operation, which just decrements "n" from 0 to (u32)-1, since "n" is declared with a type of u32. At the end of this iteration we call rb_next() to find the next tree mod log operation for eb X, that gives us the mod log operation of type MOD_LOG_KEY_REMOVE_WHILE_FREEING, for slot 0, with a sequence number of N + 1 (steps 3 to 6); 20) Then we go back to the top of the while loop and trigger the following BUG_ON(): (...) switch (tm->op) { case MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); fallthrough; (...) Because "n" has a value of (u32)-1 (4294967295) and tm->slot is 0. Fix this by taking a read lock on the extent buffer before cloning it at ctree.c:get_old_root(). This should be done regardless of the extent buffer having been freed and reused, as a concurrent task might be modifying it (while holding a write lock on it). Reported-by: Zygo Blaxell <[email protected]> Link: https://lore.kernel.org/linux-btrfs/[email protected]/ Fixes: 834328a8493079 ("Btrfs: tree mod log's old roots could still be part of the tree") CC: [email protected] # 4.4+ Signed-off-by: Filipe Manana <[email protected]> Signed-off-by: David Sterba <[email protected]>
void sched_exec(void) { struct task_struct *p = current; unsigned long flags; struct rq *rq; int dest_cpu; rq = task_rq_lock(p, &flags); dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); if (dest_cpu == smp_processor_id()) goto unlock; /* * select_task_rq() can race against ->cpus_allowed */ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) { struct migration_arg arg = { p, dest_cpu }; task_rq_unlock(rq, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); return; } unlock: task_rq_unlock(rq, &flags); }
0
[ "CWE-703", "CWE-835" ]
linux
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
183,543,889,111,731,000,000,000,000,000,000,000,000
26
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <[email protected]> Reported-by: Bjoern B. Brandenburg <[email protected]> Tested-by: Yong Zhang <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: [email protected] LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx, Sroutine_hash_entry *rt, Prelocking_strategy *prelocking_strategy, bool has_prelocking_list, Open_table_context *ot_ctx, bool *need_prelocking, bool *routine_modifies_data) { MDL_key::enum_mdl_namespace mdl_type= rt->mdl_request.key.mdl_namespace(); DBUG_ENTER("open_and_process_routine"); *routine_modifies_data= false; switch (mdl_type) { case MDL_key::FUNCTION: case MDL_key::PROCEDURE: { sp_head *sp; /* Try to get MDL lock on the routine. Note that we do not take locks on top-level CALLs as this can lead to a deadlock. Not locking top-level CALLs does not break the binlog as only the statements in the called procedure show up there, not the CALL itself. */ if (rt != (Sroutine_hash_entry*)prelocking_ctx->sroutines_list.first || mdl_type != MDL_key::PROCEDURE) { /* Since we acquire only shared lock on routines we don't need to care about global intention exclusive locks. */ DBUG_ASSERT(rt->mdl_request.type == MDL_SHARED); /* Waiting for a conflicting metadata lock to go away may lead to a deadlock, detected by MDL subsystem. If possible, we try to resolve such deadlocks by releasing all metadata locks and restarting the pre-locking process. To prevent the error from polluting the diagnostics area in case of successful resolution, install a special error handler for ER_LOCK_DEADLOCK error. */ MDL_deadlock_handler mdl_deadlock_handler(ot_ctx); thd->push_internal_handler(&mdl_deadlock_handler); bool result= thd->mdl_context.acquire_lock(&rt->mdl_request, ot_ctx->get_timeout()); thd->pop_internal_handler(); if (result) DBUG_RETURN(TRUE); DEBUG_SYNC(thd, "after_shared_lock_pname"); /* Ensures the routine is up-to-date and cached, if exists. */ if (sp_cache_routine(thd, rt, has_prelocking_list, &sp)) DBUG_RETURN(TRUE); /* Remember the version of the routine in the parse tree. */ if (check_and_update_routine_version(thd, rt, sp)) DBUG_RETURN(TRUE); /* 'sp' is NULL when there is no such routine. */ if (sp) { *routine_modifies_data= sp->modifies_data(); if (!has_prelocking_list) prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp, need_prelocking); } } else { /* If it's a top level call, just make sure we have a recent version of the routine, if it exists. Validating routine version is unnecessary, since CALL does not affect the prepared statement prelocked list. */ if (sp_cache_routine(thd, rt, FALSE, &sp)) DBUG_RETURN(TRUE); } } break; case MDL_key::TRIGGER: /** We add trigger entries to lex->sroutines_list, but we don't load them here. The trigger entry is only used when building a transitive closure of objects used in a statement, to avoid adding to this closure objects that are used in the trigger more than once. E.g. if a trigger trg refers to table t2, and the trigger table t1 is used multiple times in the statement (say, because it's used in function f1() twice), we will only add t2 once to the list of tables to prelock. We don't take metadata locks on triggers either: they are protected by a respective lock on the table, on which the trigger is defined. The only two cases which give "trouble" are SHOW CREATE TRIGGER and DROP TRIGGER statements. For these, statement syntax doesn't specify the table on which this trigger is defined, so we have to make a "dirty" read in the data dictionary to find out the table name. Once we discover the table name, we take a metadata lock on it, and this protects all trigger operations. Of course the table, in theory, may disappear between the dirty read and metadata lock acquisition, but in that case we just return a run-time error. Grammar of other trigger DDL statements (CREATE, DROP) requires the table to be specified explicitly, so we use the table metadata lock to protect trigger metadata in these statements. Similarly, in DML we always use triggers together with their tables, and thus don't need to take separate metadata locks on them. */ break; default: /* Impossible type value. */ DBUG_ASSERT(0); } DBUG_RETURN(FALSE); }
0
[]
server
0168d1eda30dad4b517659422e347175eb89e923
70,668,962,285,793,720,000,000,000,000,000,000,000
124
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list Do not assume that subquery Item always present.
virDomainDefFindDevice(virDomainDefPtr def, const char *devAlias, virDomainDeviceDefPtr dev, bool reportError) { virDomainDefFindDeviceCallbackData data = { devAlias, dev }; dev->type = VIR_DOMAIN_DEVICE_NONE; virDomainDeviceInfoIterateInternal(def, virDomainDefFindDeviceCallback, DOMAIN_DEVICE_ITERATE_ALL_CONSOLES, &data); if (dev->type == VIR_DOMAIN_DEVICE_NONE) { if (reportError) { virReportError(VIR_ERR_INTERNAL_ERROR, _("no device found with alias %s"), devAlias); } else { VIR_DEBUG("no device found with alias %s", devAlias); } return -1; } return 0; }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
185,457,753,132,188,300,000,000,000,000,000,000,000
24
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
UTF16Ref getUTF16Ref(llvh::SmallVectorImpl<char16_t> &allocator) const { assert(allocator.empty() && "Shouldn't use a non-empty allocator"); return getUTF16Ref(allocator, false); }
0
[ "CWE-416", "CWE-703" ]
hermes
d86e185e485b6330216dee8e854455c694e3a36e
248,883,205,034,086,100,000,000,000,000,000,000,000
4
Fix a bug in transient object property assignment and getUTF16Ref Summary: The returned `UTF16Ref` from `StringView::getUTF16Ref` can be invalidated by appending more contents to the same allocator. This case was encountered in `transientObjectPutErrorMessage`, resulting in using free'd memory. Reviewed By: tmikov Differential Revision: D23034855 fbshipit-source-id: 4c25a5369934bf3bdfc5582385503f4b87de3792
static irqreturn_t ql3xxx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE, &qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); netdev_warn(ndev, "Resetting chip. PortFatalErrStatus register = 0x%x\n", var); set_bit(QL_RESET_START, &qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; netdev_err(ndev, "Another function issued a reset to the chip. ISR value = %x\n", value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(napi_schedule_prep(&qdev->napi))) __napi_schedule(&qdev->napi); } else return IRQ_NONE; return IRQ_RETVAL(handled);
0
[ "CWE-401" ]
linux
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
285,857,492,899,041,650,000,000,000,000,000,000,000
53
net: qlogic: Fix memory leak in ql_alloc_large_buffers In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb. This skb should be released if pci_dma_mapping_error fails. Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()") Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: David S. Miller <[email protected]>
inline char* GetCurrentDirectoryA(int dwBufSize, char *lpBuffer) { char* ptr = dirTableA[nDefault]; while (--dwBufSize) { if ((*lpBuffer++ = *ptr++) == '\0') break; } *lpBuffer = '\0'; return /* unused */ NULL; };
0
[]
perl5
52236464559c6e410a4587d3c6da9639e75f3ec1
271,348,388,544,496,680,000,000,000,000,000,000,000
11
avoid invalid memory access in MapPath[AW] This issue was assigned CVE-2015-8608. [perl #126755]
Header headerFree(Header h) { (void) headerUnlink(h); if (h == NULL || h->nrefs > 0) return NULL; if (h->index) { indexEntry entry = h->index; int i; for (i = 0; i < h->indexUsed; i++, entry++) { if ((h->flags & HEADERFLAG_ALLOCATED) && ENTRY_IS_REGION(entry)) { if (entry->length > 0) { int32_t * ei = entry->data; if ((ei - 2) == h->blob) h->blob = _free(h->blob); entry->data = NULL; } } else if (!ENTRY_IN_REGION(entry)) { entry->data = _free(entry->data); } entry->data = NULL; } h->index = _free(h->index); } h = _free(h); return NULL; }
0
[ "CWE-125" ]
rpm
8f4b3c3cab8922a2022b9e47c71f1ecf906077ef
143,753,715,449,910,320,000,000,000,000,000,000,000
28
hdrblobInit() needs bounds checks too Users can pass untrusted data to hdrblobInit() and it must be robust against this.
parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; }
0
[ "CWE-125", "CWE-787" ]
tcpdump
7a923447fd49a069a0fd3b6c3547438ab5ee2123
276,086,903,060,296,920,000,000,000,000,000,000,000
65
CVE-2017-13001/NFS: Don't copy more data than is in the file handle. Also, put the buffer on the stack; no reason to make it static. (65 bytes isn't a lot.) This fixes a buffer over-read discovered by Kamil Frankowicz. Add a test using the capture file supplied by the reporter(s).
EC_GROUP *ec_asn1_pkparameters2group(const ECPKPARAMETERS *params) { EC_GROUP *ret = NULL; int tmp = 0; if (params == NULL) { ECerr(EC_F_EC_ASN1_PKPARAMETERS2GROUP, EC_R_MISSING_PARAMETERS); return NULL; } if (params->type == 0) { /* the curve is given by an OID */ tmp = OBJ_obj2nid(params->value.named_curve); if ((ret = EC_GROUP_new_by_curve_name(tmp)) == NULL) { ECerr(EC_F_EC_ASN1_PKPARAMETERS2GROUP, EC_R_EC_GROUP_NEW_BY_NAME_FAILURE); return NULL; } EC_GROUP_set_asn1_flag(ret, OPENSSL_EC_NAMED_CURVE); } else if (params->type == 1) { /* the parameters are given by a * ECPARAMETERS structure */ ret = ec_asn1_parameters2group(params->value.parameters); if (!ret) { ECerr(EC_F_EC_ASN1_PKPARAMETERS2GROUP, ERR_R_EC_LIB); return NULL; } EC_GROUP_set_asn1_flag(ret, 0x0); } else if (params->type == 2) { /* implicitlyCA */ return NULL; } else { ECerr(EC_F_EC_ASN1_PKPARAMETERS2GROUP, EC_R_ASN1_ERROR); return NULL; } return ret; }
0
[]
openssl
1b4a8df38fc9ab3c089ca5765075ee53ec5bd66a
17,050,013,269,981,631,000,000,000,000,000,000,000
35
Fix a failure to NULL a pointer freed on error. Inspired by BoringSSL commit 517073cd4b by Eric Roman <[email protected]> CVE-2015-0209 Reviewed-by: Emilia Käsper <[email protected]>
static inline void skb_put_u8(struct sk_buff *skb, u8 val) { *(u8 *)skb_put(skb, 1) = val;
0
[ "CWE-20" ]
linux
2b16f048729bf35e6c28a40cbfad07239f9dcd90
330,014,271,569,251,200,000,000,000,000,000,000,000
4
net: create skb_gso_validate_mac_len() If you take a GSO skb, and split it into packets, will the MAC length (L2 + L3 + L4 headers + payload) of those packets be small enough to fit within a given length? Move skb_gso_mac_seglen() to skbuff.h with other related functions like skb_gso_network_seglen() so we can use it, and then create skb_gso_validate_mac_len to do the full calculation. Signed-off-by: Daniel Axtens <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void rfbRunEventLoop(rfbScreenInfoPtr screen, long usec, rfbBool runInBackground) { if(runInBackground) { #ifdef LIBVNCSERVER_HAVE_LIBPTHREAD pthread_t listener_thread; screen->backgroundLoop = TRUE; pthread_create(&listener_thread, NULL, listenerRun, screen); return; #else rfbErr("Can't run in background, because I don't have PThreads!\n"); return; #endif } if(usec<0) usec=screen->deferUpdateTime*1000; while(rfbIsActive(screen)) rfbProcessEvents(screen,usec); }
0
[]
libvncserver
804335f9d296440bb708ca844f5d89b58b50b0c6
200,756,014,567,165,600,000,000,000,000,000,000,000
22
Thread safety for zrle, zlib, tight. Proposed tight security type fix for debian bug 517422.
MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = b->cur - start; bson_little_endian32( start, &i ); return BSON_OK; }
1
[ "CWE-190" ]
mongo-c-driver-legacy
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
249,870,922,522,328,730,000,000,000,000,000,000,000
12
don't mix up int and size_t (first pass to fix that)
static bool replmd_update_is_newer(const struct GUID *current_invocation_id, const struct GUID *update_invocation_id, uint32_t current_version, uint32_t update_version, NTTIME current_change_time, NTTIME update_change_time) { if (update_version != current_version) { return update_version > current_version; } if (update_change_time != current_change_time) { return update_change_time > current_change_time; } return GUID_compare(update_invocation_id, current_invocation_id) > 0; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
43,495,347,008,274,120,000,000,000,000,000,000,000
15
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
isdn_net_reset(struct net_device *dev) { #ifdef CONFIG_ISDN_X25 struct concap_device_ops *dops = ((isdn_net_local *)netdev_priv(dev))->dops; struct concap_proto *cprot = ((isdn_net_local *)netdev_priv(dev))->netdev->cprot; #endif #ifdef CONFIG_ISDN_X25 if (cprot && cprot->pops && dops) cprot->pops->restart(cprot, dev, dops); #endif }
0
[ "CWE-119" ]
linux
9f5af546e6acc30f075828cb58c7f09665033967
81,136,938,210,780,960,000,000,000,000,000,000,000
13
isdn/i4l: fix buffer overflow This fixes a potential buffer overflow in isdn_net.c caused by an unbounded strcpy. [ ISDN seems to be effectively unmaintained, and the I4L driver in particular is long deprecated, but in case somebody uses this.. - Linus ] Signed-off-by: Jiten Thakkar <[email protected]> Signed-off-by: Annie Cherkaev <[email protected]> Cc: Karsten Keil <[email protected]> Cc: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
_CheckSetOverlay(char **wire_inout, XkbGeometryPtr geom, XkbSectionPtr section, ClientPtr client) { register int r; char *wire; XkbOverlayPtr ol; xkbOverlayWireDesc *olWire; xkbOverlayRowWireDesc *rWire; wire = *wire_inout; olWire = (xkbOverlayWireDesc *) wire; if (client->swapped) { swapl(&olWire->name); } CHK_ATOM_ONLY(olWire->name); ol = XkbAddGeomOverlay(section, olWire->name, olWire->nRows); rWire = (xkbOverlayRowWireDesc *) &olWire[1]; for (r = 0; r < olWire->nRows; r++) { register int k; xkbOverlayKeyWireDesc *kWire; XkbOverlayRowPtr row; if (rWire->rowUnder > section->num_rows) { client->errorValue = _XkbErrCode4(0x20, r, section->num_rows, rWire->rowUnder); return BadMatch; } row = XkbAddGeomOverlayRow(ol, rWire->rowUnder, rWire->nKeys); kWire = (xkbOverlayKeyWireDesc *) &rWire[1]; for (k = 0; k < rWire->nKeys; k++, kWire++) { if (XkbAddGeomOverlayKey(ol, row, (char *) kWire->over, (char *) kWire->under) == NULL) { client->errorValue = _XkbErrCode3(0x21, r, k); return BadMatch; } } rWire = (xkbOverlayRowWireDesc *) kWire; } olWire = (xkbOverlayWireDesc *) rWire; wire = (char *) olWire; *wire_inout = wire; return Success; }
0
[ "CWE-119" ]
xserver
f7cd1276bbd4fe3a9700096dec33b52b8440788d
7,474,808,949,418,304,000,000,000,000,000,000,000
44
Correct bounds checking in XkbSetNames() CVE-2020-14345 / ZDI 11428 This vulnerability was discovered by: Jan-Niklas Sohn working with Trend Micro Zero Day Initiative Signed-off-by: Matthieu Herrb <[email protected]>
g_getchar(void) { return getchar(); }
0
[]
xrdp
d8f9e8310dac362bb9578763d1024178f94f4ecc
165,085,848,082,816,040,000,000,000,000,000,000,000
4
move temp files from /tmp to /tmp/.xrdp
bool directory_has_default_acl(connection_struct *conn, const char *fname) { SMB_ACL_T def_acl = SMB_VFS_SYS_ACL_GET_FILE( conn, fname, SMB_ACL_TYPE_DEFAULT); bool has_acl = False; SMB_ACL_ENTRY_T entry; if (def_acl != NULL && (SMB_VFS_SYS_ACL_GET_ENTRY(conn, def_acl, SMB_ACL_FIRST_ENTRY, &entry) == 1)) { has_acl = True; } if (def_acl) { SMB_VFS_SYS_ACL_FREE_ACL(conn, def_acl); } return has_acl; }
0
[ "CWE-264" ]
samba
d6c28913f3109d1327a3d1369b6eafd3874b2dca
212,233,100,674,576,030,000,000,000,000,000,000,000
15
Bug 6488: acl_group_override() call in posix acls references an uninitialized variable. (cherry picked from commit f92195e3a1baaddda47a5d496f9488c8445b41ad)
inline unsigned int _rand(cimg_ulong *const p_rng) { *p_rng = *p_rng*1103515245 + 12345U; return (unsigned int)*p_rng;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
287,415,775,544,160,340,000,000,000,000,000,000,000
4
.
static void test_message_parser_long_mime_boundary(void) { /* Close the boundaries in wrong reverse order. But because all boundaries are actually truncated to the same size (..890) it works the same as if all of them were duplicate boundaries. */ static const char input_msg[] = "Content-Type: multipart/mixed; boundary=\"1234567890123456789012345678901234567890123456789012345678901234567890123456789012\"\n" "\n" "--1234567890123456789012345678901234567890123456789012345678901234567890123456789012\n" "Content-Type: multipart/mixed; boundary=\"123456789012345678901234567890123456789012345678901234567890123456789012345678901\"\n" "\n" "--123456789012345678901234567890123456789012345678901234567890123456789012345678901\n" "Content-Type: multipart/mixed; boundary=\"12345678901234567890123456789012345678901234567890123456789012345678901234567890\"\n" "\n" "--12345678901234567890123456789012345678901234567890123456789012345678901234567890\n" "Content-Type: text/plain\n" "\n" "1\n" "--1234567890123456789012345678901234567890123456789012345678901234567890123456789012\n" "Content-Type: text/plain\n" "\n" "22\n" "--123456789012345678901234567890123456789012345678901234567890123456789012345678901\n" "Content-Type: text/plain\n" "\n" "333\n" "--12345678901234567890123456789012345678901234567890123456789012345678901234567890\n" "Content-Type: text/plain\n" "\n" "4444\n"; struct istream *input; struct message_part *parts, *part; struct message_size body_size, header_size; pool_t pool; test_begin("message parser long mime boundary"); pool = pool_alloconly_create("message parser", 10240); input = test_istream_create(input_msg); test_assert(message_parse_stream(pool, input, &set_empty, FALSE, &parts) < 0); i_stream_seek(input, 0); test_message_parser_get_sizes(input, &body_size, &header_size, FALSE); part = parts; test_assert(part->children_count == 6); test_assert(part->flags == (MESSAGE_PART_FLAG_MULTIPART | MESSAGE_PART_FLAG_IS_MIME)); test_assert(part->header_size.lines == 2); test_assert(part->header_size.physical_size == 126); test_assert(part->header_size.virtual_size == 126+2); test_assert(part->body_size.lines == 22); test_assert(part->body_size.physical_size == 871); test_assert(part->body_size.virtual_size == 871+22); test_message_parser_assert_sizes(part, &body_size, &header_size); part = parts->children; test_assert(part->children_count == 5); test_assert(part->flags == (MESSAGE_PART_FLAG_MULTIPART | MESSAGE_PART_FLAG_IS_MIME)); test_assert(part->header_size.lines == 2); test_assert(part->header_size.physical_size == 125); test_assert(part->header_size.virtual_size == 125+2); test_assert(part->body_size.lines == 19); test_assert(part->body_size.physical_size == 661); test_assert(part->body_size.virtual_size == 661+19); part = parts->children->children; test_assert(part->children_count == 4); test_assert(part->flags == (MESSAGE_PART_FLAG_MULTIPART | MESSAGE_PART_FLAG_IS_MIME)); test_assert(part->header_size.lines == 2); test_assert(part->header_size.physical_size == 124); test_assert(part->header_size.virtual_size == 124+2); test_assert(part->body_size.lines == 16); test_assert(part->body_size.physical_size == 453); test_assert(part->body_size.virtual_size == 453+16); part = parts->children->children->children; for (unsigned int i = 1; i <= 3; i++, part = part->next) { test_assert(part->children_count == 0); test_assert(part->flags == (MESSAGE_PART_FLAG_TEXT | MESSAGE_PART_FLAG_IS_MIME)); test_assert(part->header_size.lines == 2); test_assert(part->header_size.physical_size == 26); test_assert(part->header_size.virtual_size == 26+2); test_assert(part->body_size.lines == 0); test_assert(part->body_size.physical_size == i); test_assert(part->body_size.virtual_size == i); } test_parsed_parts(input, parts); i_stream_unref(&input); pool_unref(&pool); test_end(); }
0
[ "CWE-20" ]
core
fb97a1cddbda4019e327fa736972a1c7433fedaa
8,256,913,654,215,706,000,000,000,000,000,000,000
92
lib-mail: message-parser - Fix assert-crash when enforcing MIME part limit The limit could have been exceeded with message/rfc822 parts.